// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
)
type tracesGroup struct {
traces ptrace.Traces
resourceHashes [][16]byte
}
func newTracesGroup() *tracesGroup {
return &tracesGroup{traces: ptrace.NewTraces()}
}
// findOrCreateResource searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (tg *tracesGroup) findOrCreateResourceSpans(originResource pcommon.Resource, requiredAttributes pcommon.Map) ptrace.ResourceSpans {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rss := tg.traces.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
if tg.resourceHashes[i] == referenceResourceHash {
return rss.At(i)
}
}
rs := tg.traces.ResourceSpans().AppendEmpty()
referenceResource.MoveTo(rs.Resource())
tg.resourceHashes = append(tg.resourceHashes, referenceResourceHash)
return rs
}
type metricsGroup struct {
metrics pmetric.Metrics
resourceHashes [][16]byte
}
func newMetricsGroup() *metricsGroup {
return &metricsGroup{metrics: pmetric.NewMetrics()}
}
// findOrCreateResourceMetrics searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resource, requiredAttributes pcommon.Map) pmetric.ResourceMetrics {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rms := mg.metrics.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
if mg.resourceHashes[i] == referenceResourceHash {
return rms.At(i)
}
}
rm := mg.metrics.ResourceMetrics().AppendEmpty()
referenceResource.MoveTo(rm.Resource())
mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash)
return rm
}
type logsGroup struct {
logs plog.Logs
resourceHashes [][16]byte
}
// newLogsGroup returns new logsGroup with predefined capacity
func newLogsGroup() *logsGroup {
return &logsGroup{logs: plog.NewLogs()}
}
// findOrCreateResourceLogs searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (lg *logsGroup) findOrCreateResourceLogs(originResource pcommon.Resource, requiredAttributes pcommon.Map) plog.ResourceLogs {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rls := lg.logs.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
if lg.resourceHashes[i] == referenceResourceHash {
return rls.At(i)
}
}
rl := lg.logs.ResourceLogs().AppendEmpty()
referenceResource.MoveTo(rl.Resource())
lg.resourceHashes = append(lg.resourceHashes, referenceResourceHash)
return rl
}
func instrumentationLibrariesEqual(il1, il2 pcommon.InstrumentationScope) bool {
return il1.Name() == il2.Name() && il1.Version() == il2.Version()
}
// matchingScopeSpans searches for a ptrace.ScopeSpans instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.InstrumentationScope) ptrace.ScopeSpans {
ilss := rl.ScopeSpans()
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
if instrumentationLibrariesEqual(ils.Scope(), library) {
return ils
}
}
ils := ilss.AppendEmpty()
library.CopyTo(ils.Scope())
return ils
}
// matchingScopeLogs searches for a plog.ScopeLogs instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScope) plog.ScopeLogs {
ills := rl.ScopeLogs()
for i := 0; i < ills.Len(); i++ {
sl := ills.At(i)
if instrumentationLibrariesEqual(sl.Scope(), library) {
return sl
}
}
sl := ills.AppendEmpty()
library.CopyTo(sl.Scope())
return sl
}
// matchingScopeMetrics searches for a pmetric.ScopeMetrics instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeMetrics(rm pmetric.ResourceMetrics, library pcommon.InstrumentationScope) pmetric.ScopeMetrics {
ilms := rm.ScopeMetrics()
for i := 0; i < ilms.Len(); i++ {
ilm := ilms.At(i)
if instrumentationLibrariesEqual(ilm.Scope(), library) {
return ilm
}
}
ilm := ilms.AppendEmpty()
library.CopyTo(ilm.Scope())
return ilm
}
// buildReferenceResource returns a new resource that we'll be looking for in existing Resources
// as a merge of the Attributes of the original Resource with the requested Attributes.
func buildReferenceResource(originResource pcommon.Resource, requiredAttributes pcommon.Map) pcommon.Resource {
referenceResource := pcommon.NewResource()
originResource.Attributes().CopyTo(referenceResource.Attributes())
requiredAttributes.Range(func(k string, v pcommon.Value) bool {
v.CopyTo(referenceResource.Attributes().PutEmpty(k))
return true
})
return referenceResource
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata"
)
var consumerCapabilities = consumer.Capabilities{MutatesData: true}
// NewFactory returns a new factory for the Filter processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability))
}
// createDefaultConfig creates the default configuration for the processor.
func createDefaultConfig() component.Config {
return &Config{
GroupByKeys: []string{},
}
}
func createGroupByAttrsProcessor(set processor.Settings, attributes []string) (*groupByAttrsProcessor, error) {
var nonEmptyAttributes []string
presentAttributes := make(map[string]struct{})
for _, str := range attributes {
if str != "" {
_, isPresent := presentAttributes[str]
if isPresent {
set.Logger.Warn("A grouping key is already present", zap.String("key", str))
} else {
nonEmptyAttributes = append(nonEmptyAttributes, str)
presentAttributes[str] = struct{}{}
}
}
}
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
return &groupByAttrsProcessor{logger: set.Logger, groupByKeys: nonEmptyAttributes, telemetryBuilder: telemetryBuilder}, nil
}
// createTracesProcessor creates a trace processor based on this config.
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
gap.processTraces,
processorhelper.WithCapabilities(consumerCapabilities))
}
// createLogsProcessor creates a logs processor based on this config.
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
gap.processLogs,
processorhelper.WithCapabilities(consumerCapabilities))
}
// createMetricsProcessor creates a metrics processor based on this config.
func createMetricsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Metrics,
) (processor.Metrics, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewMetrics(
ctx,
set,
cfg,
nextConsumer,
gap.processMetrics,
processorhelper.WithCapabilities(consumerCapabilities))
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorGroupbyattrsLogGroups metric.Int64Histogram
ProcessorGroupbyattrsMetricGroups metric.Int64Histogram
ProcessorGroupbyattrsNumGroupedLogs metric.Int64Counter
ProcessorGroupbyattrsNumGroupedMetrics metric.Int64Counter
ProcessorGroupbyattrsNumGroupedSpans metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedLogs metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedMetrics metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedSpans metric.Int64Counter
ProcessorGroupbyattrsSpanGroups metric.Int64Histogram
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorGroupbyattrsLogGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_log_groups",
metric.WithDescription("Distribution of groups extracted for logs"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsMetricGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_metric_groups",
metric.WithDescription("Distribution of groups extracted for metrics"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedLogs, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_logs",
metric.WithDescription("Number of logs that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedMetrics, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_metrics",
metric.WithDescription("Number of metrics that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedSpans, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_spans",
metric.WithDescription("Number of spans that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedLogs, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_logs",
metric.WithDescription("Number of logs that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedMetrics, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_metrics",
metric.WithDescription("Number of metrics that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedSpans, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_spans",
metric.WithDescription("Number of spans that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsSpanGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_span_groups",
metric.WithDescription("Distribution of groups extracted for spans"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Code generated by mdatagen. DO NOT EDIT.
package metadatatest
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processortest"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
)
// Deprecated: [v0.119.0] Use componenttest.Telemetry
type Telemetry struct {
*componenttest.Telemetry
}
// Deprecated: [v0.119.0] Use componenttest.NewTelemetry
func SetupTelemetry(opts ...componenttest.TelemetryOption) Telemetry {
return Telemetry{Telemetry: componenttest.NewTelemetry(opts...)}
}
// Deprecated: [v0.119.0] Use metadatatest.NewSettings
func (tt *Telemetry) NewSettings() processor.Settings {
return NewSettings(tt.Telemetry)
}
func NewSettings(tt *componenttest.Telemetry) processor.Settings {
set := processortest.NewNopSettings()
set.ID = component.NewID(component.MustNewType("groupbyattrs"))
set.TelemetrySettings = tt.NewTelemetrySettings()
return set
}
// Deprecated: [v0.119.0] Use metadatatest.AssertEqual*
func (tt *Telemetry) AssertMetrics(t *testing.T, expected []metricdata.Metrics, opts ...metricdatatest.Option) {
var md metricdata.ResourceMetrics
require.NoError(t, tt.Reader.Collect(context.Background(), &md))
// ensure all required metrics are present
for _, want := range expected {
got := getMetricFromResource(want.Name, md)
metricdatatest.AssertEqual(t, want, got, opts...)
}
// ensure no additional metrics are emitted
require.Equal(t, len(expected), lenMetrics(md))
}
func AssertEqualProcessorGroupbyattrsLogGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_log_groups",
Description: "Distribution of groups extracted for logs",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_log_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsMetricGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_metric_groups",
Description: "Distribution of groups extracted for metrics",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_metric_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedLogs(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_logs",
Description: "Number of logs that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_logs")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedMetrics(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_metrics",
Description: "Number of metrics that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_metrics")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedSpans(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_spans",
Description: "Number of spans that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_spans")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedLogs(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_logs",
Description: "Number of logs that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_logs")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedMetrics(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_metrics",
Description: "Number of metrics that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_metrics")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedSpans(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_spans",
Description: "Number of spans that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_spans")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsSpanGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_span_groups",
Description: "Distribution of groups extracted for spans",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_span_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func getMetricFromResource(name string, got metricdata.ResourceMetrics) metricdata.Metrics {
for _, sm := range got.ScopeMetrics {
for _, m := range sm.Metrics {
if m.Name == name {
return m
}
}
}
return metricdata.Metrics{}
}
func lenMetrics(got metricdata.ResourceMetrics) int {
metricsCount := 0
for _, sm := range got.ScopeMetrics {
metricsCount += len(sm.Metrics)
}
return metricsCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata"
)
type groupByAttrsProcessor struct {
logger *zap.Logger
groupByKeys []string
telemetryBuilder *metadata.TelemetryBuilder
}
// ProcessTraces process traces and groups traces by attribute.
func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
rss := td.ResourceSpans()
tg := newTracesGroup()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ilss := rs.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
ils := ilss.At(j)
for k := 0; k < ils.Spans().Len(); k++ {
span := ils.Spans().At(k)
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(span.Attributes())
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedSpans.Add(ctx, 1)
// Some attributes are going to be moved from span to resource level,
// so we can delete those on the record level
deleteAttributes(requiredAttributes, span.Attributes())
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedSpans.Add(ctx, 1)
}
// Lets combine the base resource attributes + the extracted (grouped) attributes
// and keep them in the grouping entry
groupedResourceSpans := tg.findOrCreateResourceSpans(rs.Resource(), requiredAttributes)
sp := matchingScopeSpans(groupedResourceSpans, ils.Scope()).Spans().AppendEmpty()
span.CopyTo(sp)
}
}
}
// Copy the grouped data into output
gap.telemetryBuilder.ProcessorGroupbyattrsSpanGroups.Record(ctx, int64(tg.traces.ResourceSpans().Len()))
return tg.traces, nil
}
func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) {
rl := ld.ResourceLogs()
lg := newLogsGroup()
for i := 0; i < rl.Len(); i++ {
ls := rl.At(i)
ills := ls.ScopeLogs()
for j := 0; j < ills.Len(); j++ {
sl := ills.At(j)
for k := 0; k < sl.LogRecords().Len(); k++ {
log := sl.LogRecords().At(k)
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(log.Attributes())
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedLogs.Add(ctx, 1)
// Some attributes are going to be moved from log record to resource level,
// so we can delete those on the record level
deleteAttributes(requiredAttributes, log.Attributes())
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedLogs.Add(ctx, 1)
}
// Lets combine the base resource attributes + the extracted (grouped) attributes
// and keep them in the grouping entry
groupedResourceLogs := lg.findOrCreateResourceLogs(ls.Resource(), requiredAttributes)
lr := matchingScopeLogs(groupedResourceLogs, sl.Scope()).LogRecords().AppendEmpty()
log.CopyTo(lr)
}
}
}
// Copy the grouped data into output
gap.telemetryBuilder.ProcessorGroupbyattrsLogGroups.Record(ctx, int64(lg.logs.ResourceLogs().Len()))
return lg.logs, nil
}
func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) {
rms := md.ResourceMetrics()
mg := newMetricsGroup()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
for k := 0; k < ilm.Metrics().Len(); k++ {
metric := ilm.Metrics().At(k)
//exhaustive:enforce
switch metric.Type() {
case pmetric.MetricTypeGauge:
for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Gauge().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Gauge().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeSum:
for pointIndex := 0; pointIndex < metric.Sum().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Sum().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Sum().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeSummary:
for pointIndex := 0; pointIndex < metric.Summary().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Summary().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Summary().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeHistogram:
for pointIndex := 0; pointIndex < metric.Histogram().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Histogram().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Histogram().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeExponentialHistogram:
for pointIndex := 0; pointIndex < metric.ExponentialHistogram().DataPoints().Len(); pointIndex++ {
dataPoint := metric.ExponentialHistogram().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.ExponentialHistogram().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeEmpty:
}
}
}
}
gap.telemetryBuilder.ProcessorGroupbyattrsMetricGroups.Record(ctx, int64(mg.metrics.ResourceMetrics().Len()))
return mg.metrics, nil
}
func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) {
attrsForRemoval.Range(func(key string, _ pcommon.Value) bool {
targetAttrs.Remove(key)
return true
})
}
// extractGroupingAttributes extracts the keys and values of the specified Attributes
// that match with the attributes keys that is used for grouping
// Returns:
// - whether any attribute matched (true) or none (false)
// - the extracted AttributeMap of matching keys and their corresponding values
func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) {
groupingAttributes := pcommon.NewMap()
foundMatch := false
for _, attrKey := range gap.groupByKeys {
attrVal, found := attrMap.Get(attrKey)
if found {
attrVal.CopyTo(groupingAttributes.PutEmpty(attrKey))
foundMatch = true
}
}
return foundMatch, groupingAttributes
}
// Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it.
func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric {
// Loop through all metrics and try to find the one that matches with the one we search for
// (name and type)
for i := 0; i < ilm.Metrics().Len(); i++ {
metric := ilm.Metrics().At(i)
if metric.Name() == searchedMetric.Name() && metric.Type() == searchedMetric.Type() {
return metric
}
}
// We're here, which means that we haven't found our metric, so we need to create a new one, with the same name and type
metric := ilm.Metrics().AppendEmpty()
metric.SetDescription(searchedMetric.Description())
metric.SetName(searchedMetric.Name())
metric.SetUnit(searchedMetric.Unit())
searchedMetric.Metadata().CopyTo(metric.Metadata())
// Move other special type specific values
//exhaustive:enforce
switch searchedMetric.Type() {
case pmetric.MetricTypeHistogram:
metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality())
case pmetric.MetricTypeExponentialHistogram:
metric.SetEmptyExponentialHistogram().SetAggregationTemporality(searchedMetric.ExponentialHistogram().AggregationTemporality())
case pmetric.MetricTypeSum:
metric.SetEmptySum().SetAggregationTemporality(searchedMetric.Sum().AggregationTemporality())
metric.Sum().SetIsMonotonic(searchedMetric.Sum().IsMonotonic())
case pmetric.MetricTypeGauge:
metric.SetEmptyGauge()
case pmetric.MetricTypeSummary:
metric.SetEmptySummary()
case pmetric.MetricTypeEmpty:
}
return metric
}
// Returns the Metric in the appropriate Resource matching with the specified Attributes
func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes(
ctx context.Context,
mg *metricsGroup,
originResourceMetrics pmetric.ResourceMetrics,
ilm pmetric.ScopeMetrics,
metric pmetric.Metric,
attributes pcommon.Map,
) pmetric.Metric {
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes)
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1)
// These attributes are going to be moved from datapoint to resource level,
// so we can delete those on the datapoint
deleteAttributes(requiredAttributes, attributes)
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedMetrics.Add(ctx, 1)
}
// Get the ResourceMetrics matching with these attributes
groupedResourceMetrics := mg.findOrCreateResourceMetrics(originResourceMetrics.Resource(), requiredAttributes)
// Get the corresponding instrumentation library
groupedInstrumentationLibrary := matchingScopeMetrics(groupedResourceMetrics, ilm.Scope())
// Return the metric in this resource
return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package logdedupprocessor provides a processor that counts logs as metrics.
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"errors"
"fmt"
"strings"
"time"
"go.opentelemetry.io/collector/component"
)
// Config defaults
const (
// defaultInterval is the default export interval.
defaultInterval = 10 * time.Second
// defaultLogCountAttribute is the default log count attribute
defaultLogCountAttribute = "log_count"
// defaultTimezone is the default timezone
defaultTimezone = "UTC"
// bodyField is the name of the body field
bodyField = "body"
// attributeField is the name of the attribute field
attributeField = "attributes"
)
// Config errors
var (
errInvalidLogCountAttribute = errors.New("log_count_attribute must be set")
errInvalidInterval = errors.New("interval must be greater than 0")
errCannotExcludeBody = errors.New("cannot exclude the entire body")
errCannotIncludeBody = errors.New("cannot include the entire body")
)
// Config is the config of the processor.
type Config struct {
LogCountAttribute string `mapstructure:"log_count_attribute"`
Interval time.Duration `mapstructure:"interval"`
Timezone string `mapstructure:"timezone"`
ExcludeFields []string `mapstructure:"exclude_fields"`
IncludeFields []string `mapstructure:"include_fields"`
Conditions []string `mapstructure:"conditions"`
}
// createDefaultConfig returns the default config for the processor.
func createDefaultConfig() component.Config {
return &Config{
LogCountAttribute: defaultLogCountAttribute,
Interval: defaultInterval,
Timezone: defaultTimezone,
ExcludeFields: []string{},
IncludeFields: []string{},
Conditions: []string{},
}
}
// Validate validates the configuration
func (c Config) Validate() error {
if c.Interval <= 0 {
return errInvalidInterval
}
if c.LogCountAttribute == "" {
return errInvalidLogCountAttribute
}
_, err := time.LoadLocation(c.Timezone)
if err != nil {
return fmt.Errorf("timezone is invalid: %w", err)
}
if len(c.ExcludeFields) > 0 && len(c.IncludeFields) > 0 {
return errors.New("cannot define both exclude_fields and include_fields")
}
if err = c.validateExcludeFields(); err != nil {
return err
}
if err = c.validateIncludeFields(); err != nil {
return err
}
return nil
}
// validateExcludeFields validates that all the exclude fields
func (c Config) validateExcludeFields() error {
knownExcludeFields := make(map[string]struct{})
for _, field := range c.ExcludeFields {
// Special check to make sure the entire body is not excluded
if field == bodyField {
return errCannotExcludeBody
}
// Split and ensure the field starts with `body` or `attributes`
parts := strings.Split(field, fieldDelimiter)
if parts[0] != bodyField && parts[0] != attributeField {
return fmt.Errorf("an excludefield must start with %s or %s", bodyField, attributeField)
}
// If a field is valid make sure we haven't already seen it
if _, ok := knownExcludeFields[field]; ok {
return fmt.Errorf("duplicate exclude_field %s", field)
}
knownExcludeFields[field] = struct{}{}
}
return nil
}
// validateIncludeFields validates that all the exclude fields
func (c Config) validateIncludeFields() error {
knownFields := make(map[string]struct{})
for _, field := range c.IncludeFields {
// Special check to make sure the entire body is not included
if field == bodyField {
return errCannotIncludeBody
}
// Split and ensure the field starts with `body` or `attributes`
parts := strings.Split(field, fieldDelimiter)
if parts[0] != bodyField && parts[0] != attributeField {
return fmt.Errorf("an include_fields must start with %s or %s", bodyField, attributeField)
}
// If a field is valid make sure we haven't already seen it
if _, ok := knownFields[field]; ok {
return fmt.Errorf("duplicate include_fields %s", field)
}
knownFields[field] = struct{}{}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// Attributes names for first and last observed timestamps
const (
firstObservedTSAttr = "first_observed_timestamp"
lastObservedTSAttr = "last_observed_timestamp"
)
// timeNow can be reassigned for testing
var timeNow = time.Now
// logAggregator tracks the number of times a specific logRecord has been seen.
type logAggregator struct {
resources map[uint64]*resourceAggregator
logCountAttribute string
timezone *time.Location
telemetryBuilder *metadata.TelemetryBuilder
dedupFields []string
}
// newLogAggregator creates a new LogCounter.
func newLogAggregator(logCountAttribute string, timezone *time.Location, telemetryBuilder *metadata.TelemetryBuilder, dedupFields []string) *logAggregator {
return &logAggregator{
resources: make(map[uint64]*resourceAggregator),
logCountAttribute: logCountAttribute,
timezone: timezone,
telemetryBuilder: telemetryBuilder,
dedupFields: dedupFields,
}
}
// Export exports the counter as a Logs
func (l *logAggregator) Export(ctx context.Context) plog.Logs {
logs := plog.NewLogs()
for _, resourceAggregator := range l.resources {
rl := logs.ResourceLogs().AppendEmpty()
resourceAggregator.resource.CopyTo(rl.Resource())
for _, scopeAggregator := range resourceAggregator.scopeCounters {
sl := rl.ScopeLogs().AppendEmpty()
scopeAggregator.scope.CopyTo(sl.Scope())
for _, logAggregator := range scopeAggregator.logCounters {
// Record aggregated logs records
l.telemetryBuilder.DedupProcessorAggregatedLogs.Record(ctx, logAggregator.count)
lr := sl.LogRecords().AppendEmpty()
logAggregator.logRecord.CopyTo(lr)
// Set log record timestamps
lr.SetTimestamp(pcommon.NewTimestampFromTime(timeNow()))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(logAggregator.firstObservedTimestamp))
// Add attributes for log count and first/last observed timestamps
lr.Attributes().EnsureCapacity(lr.Attributes().Len() + 3)
lr.Attributes().PutInt(l.logCountAttribute, logAggregator.count)
firstTimestampStr := logAggregator.firstObservedTimestamp.In(l.timezone).Format(time.RFC3339)
lr.Attributes().PutStr(firstObservedTSAttr, firstTimestampStr)
lastTimestampStr := logAggregator.lastObservedTimestamp.In(l.timezone).Format(time.RFC3339)
lr.Attributes().PutStr(lastObservedTSAttr, lastTimestampStr)
}
}
}
return logs
}
// Add adds the logRecord to the resource aggregator that is identified by the resource attributes
func (l *logAggregator) Add(resource pcommon.Resource, scope pcommon.InstrumentationScope, logRecord plog.LogRecord) {
key := getResourceKey(resource)
resourceAggregator, ok := l.resources[key]
if !ok {
resourceAggregator = newResourceAggregator(resource, l.dedupFields)
l.resources[key] = resourceAggregator
}
resourceAggregator.Add(scope, logRecord)
}
// Reset resets the counter.
func (l *logAggregator) Reset() {
l.resources = make(map[uint64]*resourceAggregator)
}
// resourceAggregator dimensions the counter by resource.
type resourceAggregator struct {
resource pcommon.Resource
scopeCounters map[uint64]*scopeAggregator
dedupFields []string
}
// newResourceAggregator creates a new ResourceCounter.
func newResourceAggregator(resource pcommon.Resource, dedupFields []string) *resourceAggregator {
return &resourceAggregator{
resource: resource,
scopeCounters: make(map[uint64]*scopeAggregator),
dedupFields: dedupFields,
}
}
// Add increments the counter that the logRecord matches.
func (r *resourceAggregator) Add(scope pcommon.InstrumentationScope, logRecord plog.LogRecord) {
key := getScopeKey(scope)
scopeAggregator, ok := r.scopeCounters[key]
if !ok {
scopeAggregator = newScopeAggregator(scope, r.dedupFields)
r.scopeCounters[key] = scopeAggregator
}
scopeAggregator.Add(logRecord)
}
// scopeAggregator dimensions the counter by scope.
type scopeAggregator struct {
scope pcommon.InstrumentationScope
logCounters map[uint64]*logCounter
dedupFields []string
}
// newScopeAggregator creates a new ScopeCounter.
func newScopeAggregator(scope pcommon.InstrumentationScope, dedupFields []string) *scopeAggregator {
return &scopeAggregator{
scope: scope,
logCounters: make(map[uint64]*logCounter),
dedupFields: dedupFields,
}
}
// Add increments the counter that the logRecord matches.
func (s *scopeAggregator) Add(logRecord plog.LogRecord) {
key := getLogKey(logRecord, s.dedupFields)
lc, ok := s.logCounters[key]
if !ok {
lc = newLogCounter(logRecord)
s.logCounters[key] = lc
}
lc.Increment()
}
// logCounter is a counter for a log record.
type logCounter struct {
logRecord plog.LogRecord
firstObservedTimestamp time.Time
lastObservedTimestamp time.Time
count int64
}
// newLogCounter creates a new AttributeCounter.
func newLogCounter(logRecord plog.LogRecord) *logCounter {
return &logCounter{
logRecord: logRecord,
count: 0,
firstObservedTimestamp: timeNow().UTC(),
lastObservedTimestamp: timeNow().UTC(),
}
}
// Increment increments the counter.
func (a *logCounter) Increment() {
a.lastObservedTimestamp = timeNow().UTC()
a.count++
}
// getResourceKey creates a unique hash for the resource to use as a map key
func getResourceKey(resource pcommon.Resource) uint64 {
return pdatautil.Hash64(
pdatautil.WithMap(resource.Attributes()),
)
}
// getScopeKey creates a unique hash for the scope to use as a map key
func getScopeKey(scope pcommon.InstrumentationScope) uint64 {
return pdatautil.Hash64(
pdatautil.WithMap(scope.Attributes()),
pdatautil.WithString(scope.Name()),
pdatautil.WithString(scope.Version()),
)
}
// getLogKey creates a unique hash for the log record to use as a map key.
// If dedupFields is non-empty, it is used to determine the fields whose values are hashed.
func getLogKey(logRecord plog.LogRecord, dedupFields []string) uint64 {
if len(dedupFields) > 0 {
var opts []pdatautil.HashOption
for _, field := range dedupFields {
parts := splitField(field)
var m pcommon.Map
switch parts[0] {
case bodyField:
if logRecord.Body().Type() == pcommon.ValueTypeMap {
m = logRecord.Body().Map()
}
case attributeField:
m = logRecord.Attributes()
}
value, ok := getKeyValue(m, parts[1:])
if ok {
opts = append(opts, pdatautil.WithString(value.AsString()))
}
}
if len(opts) > 0 {
return pdatautil.Hash64(opts...)
}
}
return pdatautil.Hash64(
pdatautil.WithMap(logRecord.Attributes()),
pdatautil.WithValue(logRecord.Body()),
pdatautil.WithString(logRecord.SeverityNumber().String()),
pdatautil.WithString(logRecord.SeverityText()),
)
}
func getKeyValue(valueMap pcommon.Map, keyParts []string) (pcommon.Value, bool) {
nextKeyPart, remainingParts := keyParts[0], keyParts[1:]
// Look for the value associated with the next key part.
// If we don't find it then return
value, ok := valueMap.Get(nextKeyPart)
if !ok {
return pcommon.NewValueEmpty(), false
}
// No more key parts that means we have found the value
if len(remainingParts) == 0 {
return valueMap.Get(nextKeyPart)
}
// If the value is a map then recurse through with the remaining parts
if value.Type() == pcommon.ValueTypeMap {
return getKeyValue(value.Map(), remainingParts)
}
return pcommon.NewValueEmpty(), false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// NewFactory creates a new factory for the processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
)
}
// createLogsProcessor creates a log processor.
func createLogsProcessor(_ context.Context, settings processor.Settings, cfg component.Config, consumer consumer.Logs) (processor.Logs, error) {
processorCfg, ok := cfg.(*Config)
if !ok {
return nil, fmt.Errorf("invalid config type: %+v", cfg)
}
if err := processorCfg.Validate(); err != nil {
return nil, err
}
processor, err := newProcessor(processorCfg, consumer, settings)
if err != nil {
return nil, fmt.Errorf("error creating processor: %w", err)
}
if len(processorCfg.Conditions) == 0 {
processor.conditions = nil
} else {
conditions, err := filterottl.NewBoolExprForLog(
processorCfg.Conditions,
filterottl.StandardLogFuncs(),
ottl.PropagateError,
settings.TelemetrySettings,
)
if err != nil {
return nil, fmt.Errorf("invalid condition: %w", err)
}
processor.conditions = conditions
}
return processor, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"fmt"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
)
const (
// fieldDelimiter is the delimiter used to split a field key into its parts.
fieldDelimiter = "."
// fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimiters while splitting a field key.
fieldEscapeKeyReplacement = "{TEMP_REPLACE}"
)
// fieldRemover handles removing excluded fields from log records
type fieldRemover struct {
fields []*field
}
// field represents a field and it's compound key to match on
type field struct {
keyParts []string
}
// newFieldRemover creates a new field remover based on the passed in field keys
func newFieldRemover(fieldKeys []string) *fieldRemover {
fe := &fieldRemover{
fields: make([]*field, 0, len(fieldKeys)),
}
for _, f := range fieldKeys {
fe.fields = append(fe.fields, &field{
keyParts: splitField(f),
})
}
return fe
}
// RemoveFields removes any body or attribute fields that match in the log record
func (fe *fieldRemover) RemoveFields(logRecord plog.LogRecord) {
for _, field := range fe.fields {
field.removeField(logRecord)
}
}
// removeField removes the field from the log record if it exists
func (f *field) removeField(logRecord plog.LogRecord) {
firstPart, remainingParts := f.keyParts[0], f.keyParts[1:]
switch firstPart {
case bodyField:
// If body is a map then recurse through to remove the field
if logRecord.Body().Type() == pcommon.ValueTypeMap {
removeFieldFromMap(logRecord.Body().Map(), remainingParts)
}
case attributeField:
// Remove all attributes
if len(remainingParts) == 0 {
logRecord.Attributes().Clear()
return
}
// Recurse through map and remove fields
removeFieldFromMap(logRecord.Attributes(), remainingParts)
}
}
// removeFieldFromMap recurses through the map and removes the field if it's found.
func removeFieldFromMap(valueMap pcommon.Map, keyParts []string) {
nextKeyPart, remainingParts := keyParts[0], keyParts[1:]
// Look for the value associated with the next key part.
// If we don't find it then return
value, ok := valueMap.Get(nextKeyPart)
if !ok {
return
}
// No more key parts that means we have found the value and remove it
if len(remainingParts) == 0 {
valueMap.Remove(nextKeyPart)
return
}
// If the value is a map then recurse through with the remaining parts
if value.Type() == pcommon.ValueTypeMap {
removeFieldFromMap(value.Map(), remainingParts)
}
}
// splitField splits a field key into its parts.
// It replaces escaped delimiters with the full delimiter after splitting.
func splitField(fieldKey string) []string {
escapedKey := strings.ReplaceAll(fieldKey, fmt.Sprintf("\\%s", fieldDelimiter), fieldEscapeKeyReplacement)
keyParts := strings.Split(escapedKey, fieldDelimiter)
// Replace the temporarily escaped delimiters with the actual delimiter.
for i := range keyParts {
keyParts[i] = strings.ReplaceAll(keyParts[i], fieldEscapeKeyReplacement, fieldDelimiter)
}
return keyParts
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
"go.opentelemetry.io/otel/trace"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
DedupProcessorAggregatedLogs metric.Int64Histogram
level configtelemetry.Level
}
// telemetryBuilderOption applies changes to default builder.
type telemetryBuilderOption func(*TelemetryBuilder)
// WithLevel sets the current telemetry level for the component.
func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption {
return func(builder *TelemetryBuilder) {
builder.level = lvl
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{level: configtelemetry.LevelBasic}
for _, op := range options {
op(&builder)
}
var err, errs error
if builder.level >= configtelemetry.LevelBasic {
builder.meter = Meter(settings)
} else {
builder.meter = noop.Meter{}
}
builder.DedupProcessorAggregatedLogs, err = builder.meter.Int64Histogram(
"otelcol_dedup_processor_aggregated_logs",
metric.WithDescription("Number of log records that were aggregated together."),
metric.WithUnit("{records}"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"fmt"
"sync"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/processor"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs.
type logDedupProcessor struct {
emitInterval time.Duration
conditions *ottl.ConditionSequence[ottllog.TransformContext]
aggregator *logAggregator
remover *fieldRemover
nextConsumer consumer.Logs
logger *zap.Logger
cancel context.CancelFunc
wg sync.WaitGroup
mux sync.Mutex
}
func newProcessor(cfg *Config, nextConsumer consumer.Logs, settings processor.Settings) (*logDedupProcessor, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(settings.TelemetrySettings)
if err != nil {
return nil, fmt.Errorf("failed to create telemetry builder: %w", err)
}
// This should not happen due to config validation but we check anyways.
timezone, err := time.LoadLocation(cfg.Timezone)
if err != nil {
return nil, fmt.Errorf("invalid timezone: %w", err)
}
return &logDedupProcessor{
emitInterval: cfg.Interval,
aggregator: newLogAggregator(cfg.LogCountAttribute, timezone, telemetryBuilder, cfg.IncludeFields),
remover: newFieldRemover(cfg.ExcludeFields),
nextConsumer: nextConsumer,
logger: settings.Logger,
}, nil
}
// Start starts the processor.
func (p *logDedupProcessor) Start(ctx context.Context, _ component.Host) error {
ctx, cancel := context.WithCancel(ctx)
p.cancel = cancel
p.wg.Add(1)
go p.handleExportInterval(ctx)
return nil
}
// Capabilities returns the consumer's capabilities.
func (p *logDedupProcessor) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: true}
}
// Shutdown stops the processor.
func (p *logDedupProcessor) Shutdown(_ context.Context) error {
if p.cancel != nil {
// Call cancel to stop the export interval goroutine and wait for it to finish.
p.cancel()
p.wg.Wait()
}
return nil
}
// ConsumeLogs processes the logs.
func (p *logDedupProcessor) ConsumeLogs(ctx context.Context, pl plog.Logs) error {
p.mux.Lock()
defer p.mux.Unlock()
for i := 0; i < pl.ResourceLogs().Len(); i++ {
rl := pl.ResourceLogs().At(i)
resource := rl.Resource()
for j := 0; j < rl.ScopeLogs().Len(); j++ {
sl := rl.ScopeLogs().At(j)
scope := sl.Scope()
logs := sl.LogRecords()
logs.RemoveIf(func(logRecord plog.LogRecord) bool {
if p.conditions == nil {
p.aggregateLog(logRecord, scope, resource)
return true
}
logCtx := ottllog.NewTransformContext(logRecord, scope, resource, sl, rl)
logMatch, err := p.conditions.Eval(ctx, logCtx)
if err != nil {
p.logger.Error("error matching conditions", zap.Error(err))
return false
}
if logMatch {
p.aggregateLog(logRecord, scope, resource)
}
return logMatch
})
}
}
// immediately consume any logs that didn't match any conditions
if pl.LogRecordCount() > 0 {
err := p.nextConsumer.ConsumeLogs(ctx, pl)
if err != nil {
p.logger.Error("failed to consume logs", zap.Error(err))
}
}
return nil
}
func (p *logDedupProcessor) aggregateLog(logRecord plog.LogRecord, scope pcommon.InstrumentationScope, resource pcommon.Resource) {
p.remover.RemoveFields(logRecord)
p.aggregator.Add(resource, scope, logRecord)
}
// handleExportInterval sends metrics at the configured interval.
func (p *logDedupProcessor) handleExportInterval(ctx context.Context) {
defer p.wg.Done()
ticker := time.NewTicker(p.emitInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
// Export any remaining logs
p.exportLogs(ctx)
if err := ctx.Err(); err != context.Canceled {
p.logger.Error("context error", zap.Error(err))
}
return
case <-ticker.C:
p.exportLogs(ctx)
}
}
}
// exportLogs exports the logs to the next consumer.
func (p *logDedupProcessor) exportLogs(ctx context.Context) {
p.mux.Lock()
defer p.mux.Unlock()
logs := p.aggregator.Export(ctx)
// Only send logs if we have some
if logs.LogRecordCount() > 0 {
err := p.nextConsumer.ConsumeLogs(ctx, logs)
if err != nil {
p.logger.Error("failed to consume logs", zap.Error(err))
}
}
p.aggregator.Reset()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"fmt"
"math"
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
)
type AttributeSource string
const (
traceIDAttributeSource = AttributeSource("traceID")
recordAttributeSource = AttributeSource("record")
defaultAttributeSource = traceIDAttributeSource
)
var validAttributeSource = map[AttributeSource]bool{
traceIDAttributeSource: true,
recordAttributeSource: true,
}
// Config has the configuration guiding the sampler processor.
type Config struct {
// SamplingPercentage is the percentage rate at which traces or logs are going to be sampled. Defaults to zero, i.e.: no sample.
// Values greater or equal 100 are treated as "sample all traces/logs".
SamplingPercentage float32 `mapstructure:"sampling_percentage"`
// HashSeed allows one to configure the hashing seed. This is important in scenarios where multiple layers of collectors
// have different sampling rates: if they use the same seed all passing one layer may pass the other even if they have
// different sampling rates, configuring different seeds avoids that.
HashSeed uint32 `mapstructure:"hash_seed"`
// Mode selects the sampling behavior. Supported values:
//
// - "hash_seed": the legacy behavior of this processor.
// Using an FNV hash combined with the HashSeed value, this
// sampler performs a non-consistent probabilistic
// downsampling. The number of spans output is expected to
// equal SamplingPercentage (as a ratio) times the number of
// spans inpout, assuming good behavior from FNV and good
// entropy in the hashed attributes or TraceID.
//
// - "equalizing": Using an OTel-specified consistent sampling
// mechanism, this sampler selectively reduces the effective
// sampling probability of arriving spans. This can be
// useful to select a small fraction of complete traces from
// a stream with mixed sampling rates. The rate of spans
// passing through depends on how much sampling has already
// been applied. If an arriving span was head sampled at
// the same probability it passes through. If the span
// arrives with lower probability, a warning is logged
// because it means this sampler is configured with too
// large a sampling probability to ensure complete traces.
//
// - "proportional": Using an OTel-specified consistent sampling
// mechanism, this sampler reduces the effective sampling
// probability of each span by `SamplingProbability`.
Mode SamplerMode `mapstructure:"mode"`
// FailClosed indicates to not sample data (the processor will
// fail "closed") in case of error, such as failure to parse
// the tracestate field or missing the randomness attribute.
//
// By default, failure cases are sampled (the processor is
// fails "open"). Sampling priority-based decisions are made after
// FailClosed is processed, making it possible to sample
// despite errors using priority.
FailClosed bool `mapstructure:"fail_closed"`
// SamplingPrecision is how many hex digits of sampling
// threshold will be encoded, from 1 up to 14. Default is 4.
// 0 is treated as full precision.
SamplingPrecision int `mapstructure:"sampling_precision"`
///////
// Logs only fields below.
// AttributeSource (logs only) defines where to look for the attribute in from_attribute. The allowed values are
// `traceID` or `record`. Default is `traceID`.
AttributeSource `mapstructure:"attribute_source"`
// FromAttribute (logs only) The optional name of a log record attribute used for sampling purposes, such as a
// unique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`.
FromAttribute string `mapstructure:"from_attribute"`
// SamplingPriority (logs only) enables using a log record attribute as the sampling priority of the log record.
SamplingPriority string `mapstructure:"sampling_priority"`
}
var _ component.Config = (*Config)(nil)
// Validate checks if the processor configuration is valid
func (cfg *Config) Validate() error {
pct := float64(cfg.SamplingPercentage)
if math.IsInf(pct, 0) || math.IsNaN(pct) {
return fmt.Errorf("sampling rate is invalid: %f%%", cfg.SamplingPercentage)
}
ratio := pct / 100.0
switch {
case ratio < 0:
return fmt.Errorf("sampling rate is negative: %f%%", cfg.SamplingPercentage)
case ratio == 0:
// Special case
case ratio < sampling.MinSamplingProbability:
// Too-small case
return fmt.Errorf("sampling rate is too small: %g%%", cfg.SamplingPercentage)
default:
// Note that ratio > 1 is specifically allowed by the README, taken to mean 100%
}
if cfg.AttributeSource != "" && !validAttributeSource[cfg.AttributeSource] {
return fmt.Errorf("invalid attribute source: %v. Expected: %v or %v", cfg.AttributeSource, traceIDAttributeSource, recordAttributeSource)
}
if cfg.SamplingPrecision == 0 {
return fmt.Errorf("invalid sampling precision: 0")
} else if cfg.SamplingPrecision > sampling.NumHexDigits {
return fmt.Errorf("sampling precision is too great, should be <= 14: %d", cfg.SamplingPrecision)
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate mdatagen metadata.yaml
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
// The default precision is 4 hex digits, slightly more the original
// component logic's 14-bits of precision.
const defaultPrecision = 4
// NewFactory returns a new factory for the Probabilistic sampler processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability))
}
func createDefaultConfig() component.Config {
return &Config{
AttributeSource: defaultAttributeSource,
FailClosed: true,
Mode: modeUnset,
SamplingPrecision: defaultPrecision,
}
}
// createTracesProcessor creates a trace processor based on this config.
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
return newTracesProcessor(ctx, set, cfg.(*Config), nextConsumer)
}
// createLogsProcessor creates a log processor based on this config.
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
return newLogsProcessor(ctx, set, nextConsumer, cfg.(*Config))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"encoding/binary"
"hash/fnv"
)
// computeHash creates a hash using the FNV-1a algorithm
func computeHash(b []byte, seed uint32) uint32 {
hash := fnv.New32a()
// the implementation fnv.Write() does not return an error, see hash/fnv/fnv.go
_, _ = hash.Write(i32tob(seed))
_, _ = hash.Write(b)
return hash.Sum32()
}
// i32tob converts a seed to a byte array to be used as part of fnv.Write()
func i32tob(val uint32) []byte {
r := make([]byte, 4)
binary.LittleEndian.PutUint32(r, val)
return r
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorProbabilisticSamplerCountLogsSampled metric.Int64Counter
ProcessorProbabilisticSamplerCountTracesSampled metric.Int64Counter
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorProbabilisticSamplerCountLogsSampled, err = builder.meter.Int64Counter(
"otelcol_processor_probabilistic_sampler_count_logs_sampled",
metric.WithDescription("Count of logs that were sampled or not"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorProbabilisticSamplerCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_probabilistic_sampler_count_traces_sampled",
metric.WithDescription("Count of traces that were sampled or not"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"errors"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
type logsProcessor struct {
sampler dataSampler
samplingPriority string
precision int
failClosed bool
logger *zap.Logger
telemetryBuilder *metadata.TelemetryBuilder
}
type recordCarrier struct {
record plog.LogRecord
parsed struct {
tvalue string
threshold sampling.Threshold
rvalue string
randomness sampling.Randomness
}
}
var _ samplingCarrier = &recordCarrier{}
func (rc *recordCarrier) get(key string) string {
val, ok := rc.record.Attributes().Get(key)
if !ok || val.Type() != pcommon.ValueTypeStr {
return ""
}
return val.Str()
}
func newLogRecordCarrier(l plog.LogRecord) (samplingCarrier, error) {
var ret error
carrier := &recordCarrier{
record: l,
}
if tvalue := carrier.get("sampling.threshold"); len(tvalue) != 0 {
th, err := sampling.TValueToThreshold(tvalue)
if err != nil {
ret = errors.Join(err, ret)
} else {
carrier.parsed.tvalue = tvalue
carrier.parsed.threshold = th
}
}
if rvalue := carrier.get("sampling.randomness"); len(rvalue) != 0 {
rnd, err := sampling.RValueToRandomness(rvalue)
if err != nil {
ret = errors.Join(err, ret)
} else {
carrier.parsed.rvalue = rvalue
carrier.parsed.randomness = rnd
}
}
return carrier, ret
}
func (rc *recordCarrier) threshold() (sampling.Threshold, bool) {
return rc.parsed.threshold, len(rc.parsed.tvalue) != 0
}
func (rc *recordCarrier) explicitRandomness() (randomnessNamer, bool) {
if len(rc.parsed.rvalue) == 0 {
return newMissingRandomnessMethod(), false
}
return newSamplingRandomnessMethod(rc.parsed.randomness), true
}
func (rc *recordCarrier) updateThreshold(th sampling.Threshold) error {
exist, has := rc.threshold()
if has && sampling.ThresholdLessThan(th, exist) {
return sampling.ErrInconsistentSampling
}
rc.record.Attributes().PutStr("sampling.threshold", th.TValue())
return nil
}
func (rc *recordCarrier) setExplicitRandomness(rnd randomnessNamer) {
rc.parsed.randomness = rnd.randomness()
rc.parsed.rvalue = rnd.randomness().RValue()
rc.record.Attributes().PutStr("sampling.randomness", rnd.randomness().RValue())
}
func (rc *recordCarrier) clearThreshold() {
rc.parsed.threshold = sampling.NeverSampleThreshold
rc.parsed.tvalue = ""
rc.record.Attributes().Remove("sampling.threshold")
}
func (rc *recordCarrier) reserialize() error {
return nil
}
func (*neverSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
// We return a fake randomness value, since it will not be used.
// This avoids a consistency check error for missing randomness.
lrc, err := newLogRecordCarrier(logRec)
return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), lrc, err
}
// randomnessFromLogRecord (hashingSampler) uses a hash function over
// the TraceID or logs attribute source.
func (th *hashingSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
rnd := newMissingRandomnessMethod()
lrc, err := newLogRecordCarrier(logRec)
if th.logsTraceIDEnabled {
value := logRec.TraceID()
if !value.IsEmpty() {
rnd = newTraceIDHashingMethod(randomnessFromBytes(value[:], th.hashSeed))
}
}
if isMissing(rnd) && th.logsRandomnessSourceAttribute != "" {
if value, ok := logRec.Attributes().Get(th.logsRandomnessSourceAttribute); ok {
by := getBytesFromValue(value)
if len(by) > 0 {
rnd = newAttributeHashingMethod(
th.logsRandomnessSourceAttribute,
randomnessFromBytes(by, th.hashSeed),
)
}
}
}
if err != nil {
// The sampling.randomness or sampling.threshold attributes
// had a parse error, in this case.
lrc = nil
} else if _, hasRnd := lrc.explicitRandomness(); hasRnd {
// If the log record contains a randomness value, do not update.
err = ErrRandomnessInUse
lrc = nil
} else if _, hasTh := lrc.threshold(); hasTh {
// If the log record contains a threshold value, do not update.
err = ErrThresholdInUse
lrc = nil
} else if !isMissing(rnd) {
// When no sampling information is already present and we have
// calculated new randomness, add it to the record.
lrc.setExplicitRandomness(rnd)
}
return rnd, lrc, err
}
// randomnessFromLogRecord (hashingSampler) uses OTEP 235 semantic
// conventions basing its decision only on the TraceID.
func (ctc *consistentTracestateCommon) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
lrc, err := newLogRecordCarrier(logRec)
rnd := newMissingRandomnessMethod()
if err != nil {
// Parse error in sampling.randomness or sampling.threshold
lrc = nil
} else if rv, hasRnd := lrc.explicitRandomness(); hasRnd {
rnd = rv
} else if tid := logRec.TraceID(); !tid.IsEmpty() {
rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(tid))
}
return rnd, lrc, err
}
// newLogsProcessor returns a processor.LogsProcessor that will perform head sampling according to the given
// configuration.
func newLogsProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Logs, cfg *Config) (processor.Logs, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
lsp := &logsProcessor{
sampler: makeSampler(cfg, true),
samplingPriority: cfg.SamplingPriority,
precision: cfg.SamplingPrecision,
failClosed: cfg.FailClosed,
logger: set.Logger,
telemetryBuilder: telemetryBuilder,
}
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
lsp.processLogs,
processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}))
}
func (lsp *logsProcessor) processLogs(ctx context.Context, logsData plog.Logs) (plog.Logs, error) {
logsData.ResourceLogs().RemoveIf(func(rl plog.ResourceLogs) bool {
rl.ScopeLogs().RemoveIf(func(ill plog.ScopeLogs) bool {
ill.LogRecords().RemoveIf(func(l plog.LogRecord) bool {
return !commonShouldSampleLogic(
ctx,
l,
lsp.sampler,
lsp.failClosed,
lsp.sampler.randomnessFromLogRecord,
lsp.priorityFunc,
"logs sampler",
lsp.logger,
lsp.telemetryBuilder.ProcessorProbabilisticSamplerCountLogsSampled,
)
})
// Filter out empty ScopeLogs
return ill.LogRecords().Len() == 0
})
// Filter out empty ResourceLogs
return rl.ScopeLogs().Len() == 0
})
if logsData.ResourceLogs().Len() == 0 {
return logsData, processorhelper.ErrSkipProcessingData
}
return logsData, nil
}
func (lsp *logsProcessor) priorityFunc(logRec plog.LogRecord, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) {
// Note: in logs, unlike traces, the sampling priority
// attribute is interpreted as a request to be sampled.
if lsp.samplingPriority != "" {
priorityThreshold := lsp.logRecordToPriorityThreshold(logRec)
if priorityThreshold == sampling.NeverSampleThreshold {
threshold = priorityThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
} else if sampling.ThresholdLessThan(priorityThreshold, threshold) {
threshold = priorityThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
}
}
return rnd, threshold
}
func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sampling.Threshold {
if localPriority, ok := logRec.Attributes().Get(lsp.samplingPriority); ok {
// Potentially raise the sampling probability to minProb
minProb := 0.0
switch localPriority.Type() {
case pcommon.ValueTypeDouble:
minProb = localPriority.Double() / 100.0
case pcommon.ValueTypeInt:
minProb = float64(localPriority.Int()) / 100.0
}
if minProb != 0 {
if th, err := sampling.ProbabilityToThresholdWithPrecision(minProb, lsp.precision); err == nil {
// The record has supplied a valid alternative sampling probability
return th
}
}
}
return sampling.NeverSampleThreshold
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"errors"
"fmt"
"strconv"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
)
const (
// These four can happen at runtime and be returned by
// randomnessFromXXX()
ErrInconsistentArrivingTValue samplerError = "inconsistent arriving threshold: item should not have been sampled"
ErrMissingRandomness samplerError = "missing randomness"
ErrRandomnessInUse samplerError = "item has sampling randomness, equalizing or proportional mode recommended"
ErrThresholdInUse samplerError = "item has sampling threshold, equalizing or proportional mode recommended"
)
const (
// Hashing method: The constants below help translate user friendly percentages
// to numbers direct used in sampling.
numHashBucketsLg2 = 14
numHashBuckets = 0x4000 // Using a power of 2 to avoid division.
bitMaskHashBuckets = numHashBuckets - 1
percentageScaleFactor = numHashBuckets / 100.0
)
// samplerErrors are conditions reported by the sampler that are somewhat
// ordinary and should log as info-level.
type samplerError string
var _ error = samplerError("")
func (s samplerError) Error() string {
return string(s)
}
// SamplerMode determines which of several modes is used for the
// sampling decision.
type SamplerMode string
const (
// HashSeed applies the hash/fnv hash function originally used in this component.
HashSeed SamplerMode = "hash_seed"
// Equalizing uses OpenTelemetry consistent probability
// sampling information (OTEP 235), applies an absolute
// threshold to equalize incoming sampling probabilities.
Equalizing SamplerMode = "equalizing"
// Proportional uses OpenTelemetry consistent probability
// sampling information (OTEP 235), multiplies incoming
// sampling probabilities.
Proportional SamplerMode = "proportional"
// defaultHashSeed is applied when the mode is unset.
defaultMode SamplerMode = HashSeed
// modeUnset indicates the user has not configured the mode.
modeUnset SamplerMode = ""
)
type randomnessNamer interface {
randomness() sampling.Randomness
policyName() string
}
type randomnessMethod sampling.Randomness
func (rm randomnessMethod) randomness() sampling.Randomness {
return sampling.Randomness(rm)
}
type (
traceIDHashingMethod struct{ randomnessMethod }
traceIDW3CSpecMethod struct{ randomnessMethod }
samplingRandomnessMethod struct{ randomnessMethod }
samplingPriorityMethod struct{ randomnessMethod }
)
type missingRandomnessMethod struct{}
func (rm missingRandomnessMethod) randomness() sampling.Randomness {
return sampling.AllProbabilitiesRandomness
}
func (missingRandomnessMethod) policyName() string {
return "missing_randomness"
}
type attributeHashingMethod struct {
randomnessMethod
attribute string
}
func (am attributeHashingMethod) policyName() string {
return am.attribute
}
func (traceIDHashingMethod) policyName() string {
return "trace_id_hash"
}
func (samplingRandomnessMethod) policyName() string {
return "sampling_randomness"
}
func (traceIDW3CSpecMethod) policyName() string {
return "trace_id_w3c"
}
func (samplingPriorityMethod) policyName() string {
return "sampling_priority"
}
var (
_ randomnessNamer = missingRandomnessMethod{}
_ randomnessNamer = traceIDHashingMethod{}
_ randomnessNamer = traceIDW3CSpecMethod{}
_ randomnessNamer = samplingRandomnessMethod{}
_ randomnessNamer = samplingPriorityMethod{}
)
func newMissingRandomnessMethod() randomnessNamer {
return missingRandomnessMethod{}
}
func isMissing(rnd randomnessNamer) bool {
_, ok := rnd.(missingRandomnessMethod)
return ok
}
func newSamplingRandomnessMethod(rnd sampling.Randomness) randomnessNamer {
return samplingRandomnessMethod{randomnessMethod(rnd)}
}
func newTraceIDW3CSpecMethod(rnd sampling.Randomness) randomnessNamer {
return traceIDW3CSpecMethod{randomnessMethod(rnd)}
}
func newTraceIDHashingMethod(rnd sampling.Randomness) randomnessNamer {
return traceIDHashingMethod{randomnessMethod(rnd)}
}
func newSamplingPriorityMethod(rnd sampling.Randomness) randomnessNamer {
return samplingPriorityMethod{randomnessMethod(rnd)}
}
func newAttributeHashingMethod(attribute string, rnd sampling.Randomness) randomnessNamer {
return attributeHashingMethod{
randomnessMethod: randomnessMethod(rnd),
attribute: attribute,
}
}
// samplingCarrier conveys information about the underlying data item
// (whether span or log record) through the sampling decision.
type samplingCarrier interface {
// explicitRandomness returns a randomness value and a boolean
// indicating whether the item had sampling randomness
// explicitly set.
explicitRandomness() (randomnessNamer, bool)
// setExplicitRandomness updates the item with the signal-specific
// encoding for an explicit randomness value.
setExplicitRandomness(randomnessNamer)
// clearThreshold unsets a sampling threshold, which is used to
// clear information that breaks the expected sampling invariants
// described in OTEP 235.
clearThreshold()
// threshold returns a sampling threshold and a boolean
// indicating whether the item had sampling threshold
// explicitly set.
threshold() (sampling.Threshold, bool)
// updateThreshold modifies the sampling threshold. This
// returns an error if the updated sampling threshold has a
// lower adjusted account; the only permissible updates raise
// adjusted count (i.e., reduce sampling probability).
updateThreshold(sampling.Threshold) error
// reserialize re-encodes the updated sampling information
// into the item, if necessary. For Spans, this re-encodes
// the tracestate. This is a no-op for logs records.
reserialize() error
}
// dataSampler implements the logic of a sampling mode.
type dataSampler interface {
// decide reports the result based on a probabilistic decision.
decide(carrier samplingCarrier) sampling.Threshold
// randomnessFromSpan extracts randomness and returns a carrier specific to traces data.
randomnessFromSpan(s ptrace.Span) (randomness randomnessNamer, carrier samplingCarrier, err error)
// randomnessFromLogRecord extracts randomness and returns a carrier specific to logs data.
randomnessFromLogRecord(s plog.LogRecord) (randomness randomnessNamer, carrier samplingCarrier, err error)
}
func (sm *SamplerMode) UnmarshalText(in []byte) error {
switch mode := SamplerMode(in); mode {
case HashSeed,
Equalizing,
Proportional,
modeUnset:
*sm = mode
return nil
default:
return fmt.Errorf("unsupported sampler mode %q", mode)
}
}
// hashingSampler is the original hash-based calculation. It is an
// equalizing sampler with randomness calculation that matches the
// original implementation. This hash-based implementation is limited
// to 14 bits of precision.
type hashingSampler struct {
hashSeed uint32
tvalueThreshold sampling.Threshold
// Logs only: name of attribute to obtain randomness
logsRandomnessSourceAttribute string
// Logs only: name of attribute to obtain randomness
logsTraceIDEnabled bool
}
func (th *hashingSampler) decide(_ samplingCarrier) sampling.Threshold {
return th.tvalueThreshold
}
// consistentTracestateCommon contains the common aspects of the
// Proportional and Equalizing sampler modes. These samplers sample
// using the TraceID and do not support use of logs source attribute.
type consistentTracestateCommon struct{}
// neverSampler always decides false.
type neverSampler struct{}
func (*neverSampler) decide(_ samplingCarrier) sampling.Threshold {
return sampling.NeverSampleThreshold
}
// equalizingSampler raises thresholds up to a fixed value.
type equalizingSampler struct {
// TraceID-randomness-based calculation
tvalueThreshold sampling.Threshold
consistentTracestateCommon
}
func (te *equalizingSampler) decide(carrier samplingCarrier) sampling.Threshold {
if tv, has := carrier.threshold(); has && sampling.ThresholdLessThan(te.tvalueThreshold, tv) {
return tv
}
return te.tvalueThreshold
}
// proportionalSampler raises thresholds relative to incoming value.
type proportionalSampler struct {
// ratio in the range [2**-56, 1]
ratio float64
// precision is the precision in number of hex digits
precision int
consistentTracestateCommon
}
func (tp *proportionalSampler) decide(carrier samplingCarrier) sampling.Threshold {
incoming := 1.0
if tv, has := carrier.threshold(); has {
incoming = tv.Probability()
}
// There is a potential here for the product probability to
// underflow, which is checked here.
threshold, err := sampling.ProbabilityToThresholdWithPrecision(incoming*tp.ratio, tp.precision)
// Check the only known error condition.
if errors.Is(err, sampling.ErrProbabilityRange) {
// Considered valid, a case where the sampling probability
// has fallen below the minimum supported value and simply
// becomes unsampled.
return sampling.NeverSampleThreshold
}
return threshold
}
func getBytesFromValue(value pcommon.Value) []byte {
if value.Type() == pcommon.ValueTypeBytes {
return value.Bytes().AsRaw()
}
return []byte(value.AsString())
}
func randomnessFromBytes(b []byte, hashSeed uint32) sampling.Randomness {
hashed32 := computeHash(b, hashSeed)
hashed := uint64(hashed32 & bitMaskHashBuckets)
// Ordinarily, hashed is compared against an acceptance
// threshold i.e., sampled when hashed < scaledSamplerate,
// which has the form R < T with T in [1, 2^14] and
// R in [0, 2^14-1].
//
// Here, modify R to R' and T to T', so that the sampling
// equation has identical form to the specification, i.e., T'
// <= R', using:
//
// T' = numHashBuckets-T
// R' = numHashBuckets-1-R
//
// As a result, R' has the correct most-significant 14 bits to
// use in an R-value.
rprime14 := numHashBuckets - 1 - hashed
// There are 18 unused bits from the FNV hash function.
unused18 := uint64(hashed32 >> (32 - numHashBucketsLg2))
mixed28 := unused18 ^ (unused18 << 10)
// The 56 bit quantity here consists of, most- to least-significant:
// - 14 bits: R' = numHashBuckets - 1 - hashed
// - 28 bits: mixture of unused 18 bits
// - 14 bits: original `hashed`.
rnd56 := (rprime14 << 42) | (mixed28 << 14) | hashed
// Note: by construction:
// - OTel samplers make the same probabilistic decision with this r-value,
// - only 14 out of 56 bits are used in the sampling decision,
// - there are only 32 actual random bits.
rnd, _ := sampling.UnsignedToRandomness(rnd56)
return rnd
}
func consistencyCheck(rnd randomnessNamer, carrier samplingCarrier) error {
// Without randomness, do not check the threshold.
if isMissing(rnd) {
return ErrMissingRandomness
}
// When the carrier is nil, it means there was trouble parsing the
// tracestate or trace-related attributes. In this case, skip the
// consistency check.
if carrier == nil {
return nil
}
// Consistency check: if the TraceID is out of range, the
// TValue is a lie. If inconsistent, clear it and return an error.
if tv, has := carrier.threshold(); has {
if !tv.ShouldSample(rnd.randomness()) {
// In case we fail open, the threshold is cleared as
// recommended in the OTel spec.
carrier.clearThreshold()
return ErrInconsistentArrivingTValue
}
}
return nil
}
// makeSample constructs a sampler. There are no errors, as the only
// potential error, out-of-range probability, is corrected automatically
// according to the README, which allows percents >100 to equal 100%.
//
// Extending this logic, we round very small probabilities up to the
// minimum supported value(s) which varies according to sampler mode.
func makeSampler(cfg *Config, isLogs bool) dataSampler {
// README allows percents >100 to equal 100%.
pct := cfg.SamplingPercentage
if pct > 100 {
pct = 100
}
mode := cfg.Mode
if mode == modeUnset {
// Reasons to choose the legacy behavior include:
// (a) having set the hash seed
// (b) logs signal w/o trace ID source
if cfg.HashSeed != 0 || (isLogs && cfg.AttributeSource != traceIDAttributeSource) {
mode = HashSeed
} else {
mode = defaultMode
}
}
if pct == 0 {
return &neverSampler{}
}
// Note: Convert to float64 before dividing by 100, otherwise loss of precision.
// If the probability is too small, round it up to the minimum.
ratio := float64(pct) / 100
// Like the pct > 100 test above, but for values too small to
// express in 14 bits of precision.
if ratio < sampling.MinSamplingProbability {
ratio = sampling.MinSamplingProbability
}
switch mode {
case Equalizing:
// The error case below is ignored, we have rounded the probability so
// that it is in-range
threshold, _ := sampling.ProbabilityToThresholdWithPrecision(ratio, cfg.SamplingPrecision)
return &equalizingSampler{
tvalueThreshold: threshold,
}
case Proportional:
return &proportionalSampler{
ratio: ratio,
precision: cfg.SamplingPrecision,
}
default: // i.e., HashSeed
// Note: the original hash function used in this code
// is preserved to ensure consistency across updates.
//
// uint32(pct * percentageScaleFactor)
//
// (a) carried out the multiplication in 32-bit precision
// (b) rounded to zero instead of nearest.
scaledSamplerate := uint32(pct * percentageScaleFactor)
if scaledSamplerate == 0 {
return &neverSampler{}
}
// Convert the accept threshold to a reject threshold,
// then shift it into 56-bit value.
reject := numHashBuckets - scaledSamplerate
reject56 := uint64(reject) << 42
threshold, _ := sampling.UnsignedToThreshold(reject56)
return &hashingSampler{
tvalueThreshold: threshold,
hashSeed: cfg.HashSeed,
// Logs specific:
logsTraceIDEnabled: cfg.AttributeSource == traceIDAttributeSource,
logsRandomnessSourceAttribute: cfg.FromAttribute,
}
}
}
// randFunc returns randomness (w/ named policy), a carrier, and the error.
type randFunc[T any] func(T) (randomnessNamer, samplingCarrier, error)
// priorityFunc makes changes resulting from sampling priority.
type priorityFunc[T any] func(T, randomnessNamer, sampling.Threshold) (randomnessNamer, sampling.Threshold)
// commonShouldSampleLogic implements sampling on a per-item basis
// independent of the signal type, as embodied in the functional
// parameters:
func commonShouldSampleLogic[T any](
ctx context.Context,
item T,
sampler dataSampler,
failClosed bool,
randFunc randFunc[T],
priorityFunc priorityFunc[T],
description string,
logger *zap.Logger,
counter metric.Int64Counter,
) bool {
rnd, carrier, err := randFunc(item)
if err == nil {
err = consistencyCheck(rnd, carrier)
}
var threshold sampling.Threshold
if err != nil {
var se samplerError
if errors.As(err, &se) {
logger.Debug(description, zap.Error(err))
} else {
logger.Info(description, zap.Error(err))
}
if failClosed {
threshold = sampling.NeverSampleThreshold
} else {
threshold = sampling.AlwaysSampleThreshold
}
} else {
threshold = sampler.decide(carrier)
}
rnd, threshold = priorityFunc(item, rnd, threshold)
sampled := threshold.ShouldSample(rnd.randomness())
if sampled && carrier != nil {
// Note: updateThreshold limits loss of adjusted count, by
// preventing the threshold from being lowered, only allowing
// probability to fall and never to rise.
if err := carrier.updateThreshold(threshold); err != nil {
if errors.Is(err, sampling.ErrInconsistentSampling) {
// This is working-as-intended. You can't lower
// the threshold, it's illogical.
logger.Debug(description, zap.Error(err))
} else {
logger.Info(description, zap.Error(err))
}
}
if err := carrier.reserialize(); err != nil {
logger.Info(description, zap.Error(err))
}
}
counter.Add(ctx, 1, metric.WithAttributes(attribute.String("policy", rnd.policyName()), attribute.String("sampled", strconv.FormatBool(sampled))))
return sampled
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"strconv"
"strings"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
// samplingPriority has the semantic result of parsing the "sampling.priority"
// attribute per OpenTracing semantic conventions.
type samplingPriority int
const (
// deferDecision means that the decision if a span will be "sampled" (ie.:
// forwarded by the collector) is made by hashing the trace ID according
// to the configured sampling rate.
deferDecision samplingPriority = iota
// mustSampleSpan indicates that the span had a "sampling.priority" attribute
// greater than zero and it is going to be sampled, ie.: forwarded by the
// collector.
mustSampleSpan
// doNotSampleSpan indicates that the span had a "sampling.priority" attribute
// equal zero and it is NOT going to be sampled, ie.: it won't be forwarded
// by the collector.
doNotSampleSpan
)
type traceProcessor struct {
sampler dataSampler
failClosed bool
logger *zap.Logger
telemetryBuilder *metadata.TelemetryBuilder
}
// tracestateCarrier conveys information about sampled spans between
// the call to parse incoming randomness/threshold and the call to
// decide.
type tracestateCarrier struct {
span ptrace.Span
sampling.W3CTraceState
}
var _ samplingCarrier = &tracestateCarrier{}
func newTracestateCarrier(s ptrace.Span) (samplingCarrier, error) {
var err error
tsc := &tracestateCarrier{
span: s,
}
tsc.W3CTraceState, err = sampling.NewW3CTraceState(s.TraceState().AsRaw())
return tsc, err
}
func (tc *tracestateCarrier) threshold() (sampling.Threshold, bool) {
return tc.W3CTraceState.OTelValue().TValueThreshold()
}
func (tc *tracestateCarrier) explicitRandomness() (randomnessNamer, bool) {
rnd, ok := tc.W3CTraceState.OTelValue().RValueRandomness()
if !ok {
return newMissingRandomnessMethod(), false
}
return newSamplingRandomnessMethod(rnd), true
}
func (tc *tracestateCarrier) updateThreshold(th sampling.Threshold) error {
return tc.W3CTraceState.OTelValue().UpdateTValueWithSampling(th)
}
func (tc *tracestateCarrier) setExplicitRandomness(rnd randomnessNamer) {
tc.W3CTraceState.OTelValue().SetRValue(rnd.randomness())
}
func (tc *tracestateCarrier) clearThreshold() {
tc.W3CTraceState.OTelValue().ClearTValue()
}
func (tc *tracestateCarrier) reserialize() error {
var w strings.Builder
err := tc.W3CTraceState.Serialize(&w)
if err == nil {
tc.span.TraceState().FromRaw(w.String())
}
return err
}
// newTracesProcessor returns a processor.TracesProcessor that will
// perform intermediate span sampling according to the given
// configuration.
func newTracesProcessor(ctx context.Context, set processor.Settings, cfg *Config, nextConsumer consumer.Traces) (processor.Traces, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
tp := &traceProcessor{
sampler: makeSampler(cfg, false),
failClosed: cfg.FailClosed,
logger: set.Logger,
telemetryBuilder: telemetryBuilder,
}
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
tp.processTraces,
processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}))
}
func (th *hashingSampler) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) {
tid := s.TraceID()
tsc, err := newTracestateCarrier(s)
rnd := newMissingRandomnessMethod()
if !tid.IsEmpty() {
rnd = newTraceIDHashingMethod(randomnessFromBytes(tid[:], th.hashSeed))
}
// If the tracestate contains a proper R-value or T-value, we
// have to leave it alone. The user should not be using this
// sampler mode if they are using specified forms of consistent
// sampling in OTel.
if err != nil {
return rnd, nil, err
} else if _, has := tsc.explicitRandomness(); has {
err = ErrRandomnessInUse
tsc = nil
} else if _, has := tsc.threshold(); has {
err = ErrThresholdInUse
tsc = nil
} else {
// When no sampling information is present, add a
// Randomness value.
tsc.setExplicitRandomness(rnd)
}
return rnd, tsc, err
}
func (ctc *consistentTracestateCommon) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) {
rnd := newMissingRandomnessMethod()
tsc, err := newTracestateCarrier(s)
if err != nil {
tsc = nil
} else if rv, has := tsc.explicitRandomness(); has {
// When the tracestate is OK and has r-value, use it.
rnd = rv
} else if !s.TraceID().IsEmpty() {
rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(s.TraceID()))
}
return rnd, tsc, err
}
func (th *neverSampler) randomnessFromSpan(span ptrace.Span) (randomnessNamer, samplingCarrier, error) {
// We return a fake randomness value, since it will not be used.
// This avoids a consistency check error for missing randomness.
tsc, err := newTracestateCarrier(span)
return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), tsc, err
}
func (tp *traceProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool {
rs.ScopeSpans().RemoveIf(func(ils ptrace.ScopeSpans) bool {
ils.Spans().RemoveIf(func(s ptrace.Span) bool {
return !commonShouldSampleLogic(
ctx,
s,
tp.sampler,
tp.failClosed,
tp.sampler.randomnessFromSpan,
tp.priorityFunc,
"traces sampler",
tp.logger,
tp.telemetryBuilder.ProcessorProbabilisticSamplerCountTracesSampled,
)
})
// Filter out empty ScopeMetrics
return ils.Spans().Len() == 0
})
// Filter out empty ResourceMetrics
return rs.ScopeSpans().Len() == 0
})
if td.ResourceSpans().Len() == 0 {
return td, processorhelper.ErrSkipProcessingData
}
return td, nil
}
func (tp *traceProcessor) priorityFunc(s ptrace.Span, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) {
switch parseSpanSamplingPriority(s) {
case doNotSampleSpan:
// OpenTracing mentions this as a "hint". We take a stronger
// approach and do not sample the span since some may use it to
// remove specific spans from traces.
threshold = sampling.NeverSampleThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
case mustSampleSpan:
threshold = sampling.AlwaysSampleThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
case deferDecision:
// Note that the logs processor has very different logic here,
// but that in tracing the priority can only force to never or
// always.
}
return rnd, threshold
}
// parseSpanSamplingPriority checks if the span has the "sampling.priority" tag to
// decide if the span should be sampled or not. The usage of the tag follows the
// OpenTracing semantic tags:
// https://github.com/opentracing/specification/blob/main/semantic_conventions.md#span-tags-table
func parseSpanSamplingPriority(span ptrace.Span) samplingPriority {
attribMap := span.Attributes()
if attribMap.Len() <= 0 {
return deferDecision
}
samplingPriorityAttrib, ok := attribMap.Get("sampling.priority")
if !ok {
return deferDecision
}
// By default defer the decision.
decision := deferDecision
// Try check for different types since there are various client libraries
// using different conventions regarding "sampling.priority". Besides the
// client libraries it is also possible that the type was lost in translation
// between different formats.
switch samplingPriorityAttrib.Type() {
case pcommon.ValueTypeInt:
value := samplingPriorityAttrib.Int()
if value == 0 {
decision = doNotSampleSpan
} else if value > 0 {
decision = mustSampleSpan
}
case pcommon.ValueTypeDouble:
value := samplingPriorityAttrib.Double()
if value == 0.0 {
decision = doNotSampleSpan
} else if value > 0.0 {
decision = mustSampleSpan
}
case pcommon.ValueTypeStr:
attribVal := samplingPriorityAttrib.Str()
if value, err := strconv.ParseFloat(attribVal, 64); err == nil {
if value == 0.0 {
decision = doNotSampleSpan
} else if value > 0.0 {
decision = mustSampleSpan
}
}
}
return decision
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"fmt"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// aggregateAttributesProcessor
type aggregateAttributesProcessor struct {
aggregations []*aggregation
}
type aggregation struct {
attribute string
prefixes []string
}
func newAggregateAttributesProcessor(config []aggregationPair) *aggregateAttributesProcessor {
aggregations := []*aggregation{}
for i := 0; i < len(config); i++ {
pair := &aggregation{
attribute: config[i].Attribute,
prefixes: config[i].Prefixes,
}
aggregations = append(aggregations, pair)
}
return &aggregateAttributesProcessor{aggregations: aggregations}
}
func (proc *aggregateAttributesProcessor) processLogs(logs plog.Logs) error {
for i := 0; i < logs.ResourceLogs().Len(); i++ {
resourceLogs := logs.ResourceLogs().At(i)
err := proc.processAttributes(resourceLogs.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ {
scopeLogs := resourceLogs.ScopeLogs().At(j)
for k := 0; k < scopeLogs.LogRecords().Len(); k++ {
err := proc.processAttributes(scopeLogs.LogRecords().At(k).Attributes())
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error {
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
resourceMetrics := metrics.ResourceMetrics().At(i)
err := proc.processAttributes(resourceMetrics.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ {
scopeMetrics := resourceMetrics.ScopeMetrics().At(j)
for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
err := processMetricLevelAttributes(proc, scopeMetrics.Metrics().At(k))
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) processTraces(traces ptrace.Traces) error {
for i := 0; i < traces.ResourceSpans().Len(); i++ {
resourceSpans := traces.ResourceSpans().At(i)
err := proc.processAttributes(resourceSpans.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ {
scopeSpans := resourceSpans.ScopeSpans().At(j)
for k := 0; k < scopeSpans.Spans().Len(); k++ {
err := proc.processAttributes(scopeSpans.Spans().At(k).Attributes())
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) isEnabled() bool {
return len(proc.aggregations) != 0
}
func (*aggregateAttributesProcessor) ConfigPropertyName() string {
return "aggregate_attributes"
}
func (proc *aggregateAttributesProcessor) processAttributes(attributes pcommon.Map) error {
for i := 0; i < len(proc.aggregations); i++ {
curr := proc.aggregations[i]
names := []string{}
attrs := []pcommon.Value{}
for j := 0; j < len(curr.prefixes); j++ {
prefix := curr.prefixes[j]
// Create a new map. Unused keys will be added here,
// so we can check them against other prefixes.
newMap := pcommon.NewMap()
newMap.EnsureCapacity(attributes.Len())
attributes.Range(func(key string, value pcommon.Value) bool {
ok, trimmedKey := getNewKey(key, prefix)
if ok {
// TODO: Potential name conflict to resolve, eg.:
// pod_* matches pod_foo
// pod2_* matches pod2_foo
// both will be renamed to foo
// ref: https://github.com/SumoLogic/sumologic-otel-collector/issues/1263
names = append(names, trimmedKey)
val := pcommon.NewValueEmpty()
value.CopyTo(val)
attrs = append(attrs, val)
} else {
value.CopyTo(newMap.PutEmpty(key))
}
return true
})
newMap.CopyTo(attributes)
}
if len(names) != len(attrs) {
return fmt.Errorf(
"internal error: number of values does not equal the number of keys; len(keys) = %d, len(values) = %d",
len(names),
len(attrs),
)
}
// Add a new attribute only if there's anything that should be put under it.
if len(names) > 0 {
aggregated := attributes.PutEmptyMap(curr.attribute)
for j := 0; j < len(names); j++ {
attrs[j].CopyTo(aggregated.PutEmpty(names[j]))
}
}
}
return nil
}
// Checks if the key has given prefix and trims it if so.
func getNewKey(key string, prefix string) (bool, string) {
if strings.HasPrefix(key, prefix) {
return true, strings.TrimPrefix(key, prefix)
}
return false, ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"fmt"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
)
// This file contains some common functionalities for subprocessors that modify attributes (represented by pcommon.Map)
type attributesProcessor interface {
processAttributes(pcommon.Map) error
}
func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metric) error {
switch metric.Type() {
case pmetric.MetricTypeEmpty:
return nil
case pmetric.MetricTypeSum:
dp := metric.Sum().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeGauge:
dp := metric.Gauge().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeHistogram:
dp := metric.Histogram().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeExponentialHistogram:
dp := metric.ExponentialHistogram().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeSummary:
dp := metric.Summary().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
}
return fmt.Errorf("unknown metric type: %s", metric.Type().String())
}
func mapToPcommonMap(m map[string]pcommon.Value) pcommon.Map {
attrs := pcommon.NewMap()
for k, v := range m {
v.CopyTo(attrs.PutEmpty(k))
}
return attrs
}
func mapToPcommonValue(m map[string]pcommon.Value) pcommon.Value {
attrs := pcommon.NewValueMap()
for k, v := range m {
v.CopyTo(attrs.Map().PutEmpty(k))
}
return attrs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
)
// cloudNamespaceProcessor adds the `cloud.namespace` resource attribute to logs, metrics and traces.
type cloudNamespaceProcessor struct {
addCloudNamespace bool
}
const (
cloudNamespaceAttributeName = "cloud.namespace"
cloudNamespaceAwsEc2 = "aws/ec2"
cloudNamespaceAwsEcs = "ecs"
cloudNamespaceAwsBeanstalk = "ElasticBeanstalk"
)
func newCloudNamespaceProcessor(addCloudNamespace bool) *cloudNamespaceProcessor {
return &cloudNamespaceProcessor{
addCloudNamespace: addCloudNamespace,
}
}
func (*cloudNamespaceProcessor) processLogs(logs plog.Logs) error {
for i := 0; i < logs.ResourceLogs().Len(); i++ {
addCloudNamespaceAttribute(logs.ResourceLogs().At(i).Resource().Attributes())
}
return nil
}
func (*cloudNamespaceProcessor) processMetrics(metrics pmetric.Metrics) error {
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
addCloudNamespaceAttribute(metrics.ResourceMetrics().At(i).Resource().Attributes())
}
return nil
}
func (*cloudNamespaceProcessor) processTraces(traces ptrace.Traces) error {
for i := 0; i < traces.ResourceSpans().Len(); i++ {
addCloudNamespaceAttribute(traces.ResourceSpans().At(i).Resource().Attributes())
}
return nil
}
func (proc *cloudNamespaceProcessor) isEnabled() bool {
return proc.addCloudNamespace
}
func (*cloudNamespaceProcessor) ConfigPropertyName() string {
return "add_cloud_namespace"
}
// addCloudNamespaceAttribute adds the `cloud.namespace` attribute
// to a collection of attributes that already contains a `cloud.platform` attribute.
// It does not add the `cloud.namespace` attribute for all `cloud.platform` values,
// but only for a few specific ones - namely AWS EC2, AWS ECS, and AWS Elastic Beanstalk.
func addCloudNamespaceAttribute(attributes pcommon.Map) {
cloudPlatformAttributeValue, found := attributes.Get(conventions.AttributeCloudPlatform)
if !found {
return
}
switch cloudPlatformAttributeValue.Str() {
case conventions.AttributeCloudPlatformAWSEC2:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEc2)
case conventions.AttributeCloudPlatformAWSECS:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEcs)
case conventions.AttributeCloudPlatformAWSElasticBeanstalk:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsBeanstalk)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/component"
)
type Config struct {
AddCloudNamespace bool `mapstructure:"add_cloud_namespace"`
TranslateAttributes bool `mapstructure:"translate_attributes"`
TranslateTelegrafAttributes bool `mapstructure:"translate_telegraf_attributes"`
NestAttributes *NestingProcessorConfig `mapstructure:"nest_attributes"`
AggregateAttributes []aggregationPair `mapstructure:"aggregate_attributes"`
LogFieldsAttributes *logFieldAttributesConfig `mapstructure:"field_attributes"`
TranslateDockerMetrics bool `mapstructure:"translate_docker_metrics"`
}
type aggregationPair struct {
Attribute string `mapstructure:"attribute"`
Prefixes []string `mapstructure:"prefixes"`
}
const (
defaultAddCloudNamespace = true
defaultTranslateAttributes = true
defaultTranslateTelegrafAttributes = true
defaultTranslateDockerMetrics = false
// Nesting processor default config
defaultNestingEnabled = false
defaultNestingSeparator = "."
defaultNestingSquashSingleValues = false
defaultAddSeverityNumberAttribute = false
defaultAddSeverityTextAttribute = false
defaultAddSpanIDAttribute = false
defaultAddTraceIDAttribute = false
)
var _ component.Config = (*Config)(nil)
func defaultNestingInclude() []string {
return []string{}
}
func defaultNestingExclude() []string {
return []string{}
}
func defaultAggregateAttributes() []aggregationPair {
return []aggregationPair{}
}
func createDefaultConfig() component.Config {
return &Config{
AddCloudNamespace: defaultAddCloudNamespace,
TranslateAttributes: defaultTranslateAttributes,
TranslateTelegrafAttributes: defaultTranslateTelegrafAttributes,
NestAttributes: &NestingProcessorConfig{
Separator: defaultNestingSeparator,
Enabled: defaultNestingEnabled,
Include: defaultNestingInclude(),
Exclude: defaultNestingExclude(),
SquashSingleValues: defaultNestingSquashSingleValues,
},
AggregateAttributes: defaultAggregateAttributes(),
LogFieldsAttributes: &logFieldAttributesConfig{
SeverityNumberAttribute: &logFieldAttribute{defaultAddSeverityNumberAttribute, SeverityNumberAttributeName},
SeverityTextAttribute: &logFieldAttribute{defaultAddSeverityTextAttribute, SeverityTextAttributeName},
SpanIDAttribute: &logFieldAttribute{defaultAddSpanIDAttribute, SpanIDAttributeName},
TraceIDAttribute: &logFieldAttribute{defaultAddTraceIDAttribute, TraceIDAttributeName},
},
TranslateDockerMetrics: defaultTranslateDockerMetrics,
}
}
// Validate config
func (cfg *Config) Validate() error {
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//
//go:generate mdatagen metadata.yaml
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor/internal/metadata"
)
var processorCapabilities = consumer.Capabilities{MutatesData: true}
// NewFactory returns a new factory for the processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
)
}
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
processor.processLogs,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
func createMetricsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Metrics,
) (processor.Metrics, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewMetrics(
ctx,
set,
cfg,
nextConsumer,
processor.processMetrics,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
processor.processTraces,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
const (
SeverityNumberAttributeName = "loglevel"
SeverityTextAttributeName = "severitytext"
SpanIDAttributeName = "spanid"
TraceIDAttributeName = "traceid"
)
type logFieldAttribute struct {
Enabled bool `mapstructure:"enabled"`
Name string `mapstructure:"name"`
}
type logFieldAttributesConfig struct {
SeverityNumberAttribute *logFieldAttribute `mapstructure:"severity_number"`
SeverityTextAttribute *logFieldAttribute `mapstructure:"severity_text"`
SpanIDAttribute *logFieldAttribute `mapstructure:"span_id"`
TraceIDAttribute *logFieldAttribute `mapstructure:"trace_id"`
}
// spanIDToHexOrEmptyString returns a hex string from SpanID.
// An empty string is returned, if SpanID is empty.
func spanIDToHexOrEmptyString(id pcommon.SpanID) string {
if id.IsEmpty() {
return ""
}
return hex.EncodeToString(id[:])
}
// traceIDToHexOrEmptyString returns a hex string from TraceID.
// An empty string is returned, if TraceID is empty.
func traceIDToHexOrEmptyString(id pcommon.TraceID) string {
if id.IsEmpty() {
return ""
}
return hex.EncodeToString(id[:])
}
var severityNumberToLevel = map[string]string{
plog.SeverityNumberUnspecified.String(): "UNSPECIFIED",
plog.SeverityNumberTrace.String(): "TRACE",
plog.SeverityNumberTrace2.String(): "TRACE2",
plog.SeverityNumberTrace3.String(): "TRACE3",
plog.SeverityNumberTrace4.String(): "TRACE4",
plog.SeverityNumberDebug.String(): "DEBUG",
plog.SeverityNumberDebug2.String(): "DEBUG2",
plog.SeverityNumberDebug3.String(): "DEBUG3",
plog.SeverityNumberDebug4.String(): "DEBUG4",
plog.SeverityNumberInfo.String(): "INFO",
plog.SeverityNumberInfo2.String(): "INFO2",
plog.SeverityNumberInfo3.String(): "INFO3",
plog.SeverityNumberInfo4.String(): "INFO4",
plog.SeverityNumberWarn.String(): "WARN",
plog.SeverityNumberWarn2.String(): "WARN2",
plog.SeverityNumberWarn3.String(): "WARN3",
plog.SeverityNumberWarn4.String(): "WARN4",
plog.SeverityNumberError.String(): "ERROR",
plog.SeverityNumberError2.String(): "ERROR2",
plog.SeverityNumberError3.String(): "ERROR3",
plog.SeverityNumberError4.String(): "ERROR4",
plog.SeverityNumberFatal.String(): "FATAL",
plog.SeverityNumberFatal2.String(): "FATAL2",
plog.SeverityNumberFatal3.String(): "FATAL3",
plog.SeverityNumberFatal4.String(): "FATAL4",
}
// logFieldsConversionProcessor converts specific log entries to attributes which leads to presenting them as fields
// in the backend
type logFieldsConversionProcessor struct {
LogFieldsAttributes *logFieldAttributesConfig
}
func newLogFieldConversionProcessor(logFieldsAttributes *logFieldAttributesConfig) *logFieldsConversionProcessor {
return &logFieldsConversionProcessor{
logFieldsAttributes,
}
}
func (proc *logFieldsConversionProcessor) addAttributes(log plog.LogRecord) {
if log.SeverityNumber() != plog.SeverityNumberUnspecified {
if _, found := log.Attributes().Get(SeverityNumberAttributeName); !found &&
proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled {
level := severityNumberToLevel[log.SeverityNumber().String()]
log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityNumberAttribute.Name, level)
}
}
if _, found := log.Attributes().Get(SeverityTextAttributeName); !found &&
proc.LogFieldsAttributes.SeverityTextAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityTextAttribute.Name, log.SeverityText())
}
if _, found := log.Attributes().Get(SpanIDAttributeName); !found &&
proc.LogFieldsAttributes.SpanIDAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.SpanIDAttribute.Name, spanIDToHexOrEmptyString(log.SpanID()))
}
if _, found := log.Attributes().Get(TraceIDAttributeName); !found &&
proc.LogFieldsAttributes.TraceIDAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.TraceIDAttribute.Name, traceIDToHexOrEmptyString(log.TraceID()))
}
}
func (proc *logFieldsConversionProcessor) processLogs(logs plog.Logs) error {
if !proc.isEnabled() {
return nil
}
rls := logs.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
ills := rls.At(i).ScopeLogs()
for j := 0; j < ills.Len(); j++ {
logs := ills.At(j).LogRecords()
for k := 0; k < logs.Len(); k++ {
proc.addAttributes(logs.At(k))
}
}
}
return nil
}
func (proc *logFieldsConversionProcessor) processMetrics(_ pmetric.Metrics) error {
// No-op. Metrics should not be translated.
return nil
}
func (proc *logFieldsConversionProcessor) processTraces(_ ptrace.Traces) error {
// No-op. Traces should not be translated.
return nil
}
func (proc *logFieldsConversionProcessor) isEnabled() bool {
return proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled ||
proc.LogFieldsAttributes.SeverityTextAttribute.Enabled ||
proc.LogFieldsAttributes.SpanIDAttribute.Enabled ||
proc.LogFieldsAttributes.TraceIDAttribute.Enabled
}
func (*logFieldsConversionProcessor) ConfigPropertyName() string {
return "field_attributes"
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
type NestingProcessorConfig struct {
Separator string `mapstructure:"separator"`
Enabled bool `mapstructure:"enabled"`
Include []string `mapstructure:"include"`
Exclude []string `mapstructure:"exclude"`
SquashSingleValues bool `mapstructure:"squash_single_values"`
}
type NestingProcessor struct {
separator string
enabled bool
allowlist []string
denylist []string
squashSingleValues bool
}
func newNestingProcessor(config *NestingProcessorConfig) *NestingProcessor {
proc := &NestingProcessor{
separator: config.Separator,
enabled: config.Enabled,
allowlist: config.Include,
denylist: config.Exclude,
squashSingleValues: config.SquashSingleValues,
}
return proc
}
func (proc *NestingProcessor) processLogs(logs plog.Logs) error {
if !proc.enabled {
return nil
}
for i := 0; i < logs.ResourceLogs().Len(); i++ {
rl := logs.ResourceLogs().At(i)
if err := proc.processAttributes(rl.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rl.ScopeLogs().Len(); j++ {
logsRecord := rl.ScopeLogs().At(j).LogRecords()
for k := 0; k < logsRecord.Len(); k++ {
if err := proc.processAttributes(logsRecord.At(k).Attributes()); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.enabled {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
if err := proc.processAttributes(rm.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
if err := processMetricLevelAttributes(proc, metricsSlice.At(k)); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processTraces(traces ptrace.Traces) error {
if !proc.enabled {
return nil
}
for i := 0; i < traces.ResourceSpans().Len(); i++ {
rs := traces.ResourceSpans().At(i)
if err := proc.processAttributes(rs.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rs.ScopeSpans().Len(); j++ {
spans := rs.ScopeSpans().At(j).Spans()
for k := 0; k < spans.Len(); k++ {
if err := proc.processAttributes(spans.At(k).Attributes()); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processAttributes(attributes pcommon.Map) error {
newMap := pcommon.NewMap()
attributes.Range(func(k string, v pcommon.Value) bool {
// If key is not on allow list or is on deny list, skip translating it.
if !proc.shouldTranslateKey(k) {
v.CopyTo(newMap.PutEmpty(k))
return true
}
keys := strings.Split(k, proc.separator)
if len(keys) == 0 {
// Split returns empty slice only if both string and separator are empty
// set map[""] = v and return
newVal := newMap.PutEmpty(k)
v.CopyTo(newVal)
return true
}
prevValue := pcommon.NewValueMap()
nextMap := prevValue.Map()
newMap.CopyTo(nextMap)
for i := 0; i < len(keys); i++ {
if prevValue.Type() != pcommon.ValueTypeMap {
// If previous value was not a map, change it into a map.
// The former value will be set under the key "".
tempMap := pcommon.NewValueMap()
prevValue.CopyTo(tempMap.Map().PutEmpty(""))
tempMap.CopyTo(prevValue)
}
newValue, ok := prevValue.Map().Get(keys[i])
if ok {
prevValue = newValue
} else {
if i == len(keys)-1 {
// If we're checking the last key, insert empty value, to which v will be copied.
prevValue = prevValue.Map().PutEmpty(keys[i])
} else {
// If we're not checking the last key, put a map.
prevValue = prevValue.Map().PutEmpty(keys[i])
prevValue.SetEmptyMap()
}
}
}
if prevValue.Type() == pcommon.ValueTypeMap {
// Now check the value we want to copy. If it is a map, we should merge both maps.
// Else, just place the value under the key "".
if v.Type() == pcommon.ValueTypeMap {
v.Map().Range(func(k string, val pcommon.Value) bool {
val.CopyTo(prevValue.Map().PutEmpty(k))
return true
})
} else {
v.CopyTo(prevValue.Map().PutEmpty(""))
}
} else {
v.CopyTo(prevValue)
}
nextMap.CopyTo(newMap)
return true
})
if proc.squashSingleValues {
newMap = proc.squash(newMap)
}
newMap.CopyTo(attributes)
return nil
}
// Checks if given key fulfills the following conditions:
// - has a prefix that exists in the allowlist (if it's not empty)
// - does not have a prefix that exists in the denylist
func (proc *NestingProcessor) shouldTranslateKey(k string) bool {
if len(proc.allowlist) > 0 {
isOk := false
for i := 0; i < len(proc.allowlist); i++ {
if strings.HasPrefix(k, proc.allowlist[i]) {
isOk = true
break
}
}
if !isOk {
return false
}
}
if len(proc.denylist) > 0 {
for i := 0; i < len(proc.denylist); i++ {
if strings.HasPrefix(k, proc.denylist[i]) {
return false
}
}
}
return true
}
// Squashes maps that have single values, eg. map {"a": {"b": {"c": "C", "d": "D"}}}}
// gets squashes into {"a.b": {"c": "C", "d": "D"}}}
func (proc *NestingProcessor) squash(attributes pcommon.Map) pcommon.Map {
newMap := pcommon.NewValueMap()
attributes.CopyTo(newMap.Map())
key := proc.squashAttribute(newMap)
if key != "" {
retMap := pcommon.NewMap()
newMap.Map().CopyTo(retMap.PutEmptyMap(key))
return retMap
}
return newMap.Map()
}
// A function that squashes keys in a value.
// If this value contained a map with one element, it gets squished and its key gets returned.
//
// If this value contained a map with many elements, this function is called on these elements,
// and the key gets replaced if needed, "" is returned.
//
// Else, nothing happens and "" is returned.
func (proc *NestingProcessor) squashAttribute(value pcommon.Value) string {
if value.Type() != pcommon.ValueTypeMap {
return ""
}
m := value.Map()
if m.Len() == 1 {
// If the map contains only one key-value pair, squash it.
key := ""
val := pcommon.NewValueEmpty()
// This will iterate only over one value (the only one)
m.Range(func(k string, v pcommon.Value) bool {
keySuffix := proc.squashAttribute(v)
key = proc.squashKey(k, keySuffix)
val = v
return false
})
val.CopyTo(value)
return key
}
// This map doesn't get squashed, but its content might have keys replaced.
newMap := pcommon.NewMap()
m.Range(func(k string, v pcommon.Value) bool {
keySuffix := proc.squashAttribute(v)
// If "" was returned, the value was not a one-element map and did not get squashed.
if keySuffix == "" {
v.CopyTo(newMap.PutEmpty(k))
} else {
v.CopyTo(newMap.PutEmpty(proc.squashKey(k, keySuffix)))
}
return true
})
newMap.CopyTo(value.Map())
return ""
}
func (proc *NestingProcessor) squashKey(key string, keySuffix string) string {
if keySuffix == "" {
return key
}
return key + proc.separator + keySuffix
}
func (proc *NestingProcessor) isEnabled() bool {
return proc.enabled
}
func (*NestingProcessor) ConfigPropertyName() string {
return "nest_attributes"
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type sumologicSubprocessor interface {
processLogs(plog.Logs) error
processMetrics(pmetric.Metrics) error
processTraces(ptrace.Traces) error
isEnabled() bool
ConfigPropertyName() string
}
type sumologicProcessor struct {
logger *zap.Logger
subprocessors []sumologicSubprocessor
}
func newsumologicProcessor(set processor.Settings, config *Config) *sumologicProcessor {
cloudNamespaceProcessor := newCloudNamespaceProcessor(config.AddCloudNamespace)
translateAttributesProcessor := newTranslateAttributesProcessor(config.TranslateAttributes)
translateTelegrafMetricsProcessor := newTranslateTelegrafMetricsProcessor(config.TranslateTelegrafAttributes)
nestingProcessor := newNestingProcessor(config.NestAttributes)
aggregateAttributesProcessor := newAggregateAttributesProcessor(config.AggregateAttributes)
logFieldsConversionProcessor := newLogFieldConversionProcessor(config.LogFieldsAttributes)
translateDockerMetricsProcessor := newTranslateDockerMetricsProcessor(config.TranslateDockerMetrics)
processors := []sumologicSubprocessor{
cloudNamespaceProcessor,
translateAttributesProcessor,
translateTelegrafMetricsProcessor,
nestingProcessor,
aggregateAttributesProcessor,
logFieldsConversionProcessor,
translateDockerMetricsProcessor,
}
processor := &sumologicProcessor{
logger: set.Logger,
subprocessors: processors,
}
return processor
}
func (processor *sumologicProcessor) start(_ context.Context, _ component.Host) error {
enabledSubprocessors := []zapcore.Field{}
for _, proc := range processor.subprocessors {
enabledSubprocessors = append(enabledSubprocessors, zap.Bool(proc.ConfigPropertyName(), proc.isEnabled()))
}
processor.logger.Info("Sumo Logic Processor has started.", enabledSubprocessors...)
return nil
}
func (processor *sumologicProcessor) shutdown(_ context.Context) error {
processor.logger.Info("Sumo Logic Processor has shut down.")
return nil
}
func (processor *sumologicProcessor) processLogs(_ context.Context, logs plog.Logs) (plog.Logs, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processLogs(logs); err != nil {
return logs, fmt.Errorf("failed to process logs for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return logs, nil
}
func (processor *sumologicProcessor) processMetrics(_ context.Context, metrics pmetric.Metrics) (pmetric.Metrics, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processMetrics(metrics); err != nil {
return metrics, fmt.Errorf("failed to process metrics for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return metrics, nil
}
func (processor *sumologicProcessor) processTraces(_ context.Context, traces ptrace.Traces) (ptrace.Traces, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processTraces(traces); err != nil {
return traces, fmt.Errorf("failed to process traces for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return traces, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateAttributesProcessor translates attribute names from OpenTelemetry to Sumo Logic convention
type translateAttributesProcessor struct {
shouldTranslate bool
}
// attributeTranslations maps OpenTelemetry attribute names to Sumo Logic attribute names
var attributeTranslations = map[string]string{
"cloud.account.id": "AccountId",
"cloud.availability_zone": "AvailabilityZone",
"cloud.platform": "aws_service",
"cloud.region": "Region",
"host.id": "InstanceId",
"host.name": "host",
"host.type": "InstanceType",
"k8s.cluster.name": "Cluster",
"k8s.container.name": "container",
"k8s.daemonset.name": "daemonset",
"k8s.deployment.name": "deployment",
"k8s.namespace.name": "namespace",
"k8s.node.name": "node",
"k8s.service.name": "service",
"k8s.pod.hostname": "host",
"k8s.pod.name": "pod",
"k8s.pod.uid": "pod_id",
"k8s.replicaset.name": "replicaset",
"k8s.statefulset.name": "statefulset",
"service.name": "service",
"log.file.path_resolved": "_sourceName",
}
func newTranslateAttributesProcessor(shouldTranslate bool) *translateAttributesProcessor {
return &translateAttributesProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateAttributesProcessor) processLogs(logs plog.Logs) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < logs.ResourceLogs().Len(); i++ {
translateAttributes(logs.ResourceLogs().At(i).Resource().Attributes())
}
return nil
}
func (proc *translateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
translateAttributes(metrics.ResourceMetrics().At(i).Resource().Attributes())
}
return nil
}
func (proc *translateAttributesProcessor) processTraces(_ ptrace.Traces) error {
// No-op. Traces should not be translated.
return nil
}
func (proc *translateAttributesProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateAttributesProcessor) ConfigPropertyName() string {
return "translate_attributes"
}
func translateAttributes(attributes pcommon.Map) {
result := pcommon.NewMap()
result.EnsureCapacity(attributes.Len())
attributes.Range(func(otKey string, value pcommon.Value) bool {
if sumoKey, ok := attributeTranslations[otKey]; ok {
// Only insert if it doesn't exist yet to prevent overwriting.
// We have to do it this way since the final return value is not
// ready yet to rely on .Insert() not overwriting.
if _, exists := attributes.Get(sumoKey); !exists {
if _, ok := result.Get(sumoKey); !ok {
value.CopyTo(result.PutEmpty(sumoKey))
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
return true
})
result.CopyTo(attributes)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention
type translateDockerMetricsProcessor struct {
shouldTranslate bool
}
// metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention
var dockerMetricsTranslations = map[string]string{
"container.cpu.percent": "cpu_percentage",
"container.cpu.usage.system": "system_cpu_usage",
"container.cpu.usage.percpu": "cpu_usage.percpu_usage",
"container.cpu.usage.total": "cpu_usage.total_usage",
"container.cpu.usage.kernelmode": "cpu_usage.usage_in_kernelmode",
"container.cpu.usage.usermode": "cpu_usage.usage_in_usermode",
"container.cpu.throttling_data.periods": "throttling_data.periods",
"container.cpu.throttling_data.throttled_periods": "throttling_data.throttled_periods",
"container.cpu.throttling_data.throttled_time": "throttling_data.throttled_time",
"container.memory.usage.limit": "limit",
"container.memory.usage.max": "max_usage",
"container.memory.percent": "memory_percentage",
"container.memory.usage.total": "usage",
"container.memory.active_anon": "stats.active_anon",
"container.memory.active_file": "stats.active_file",
"container.memory.cache": "stats.cache",
"container.memory.hierarchical_memory_limit": "stats.hierarchical_memory_limit",
"container.memory.inactive_anon": "stats.inactive_anon",
"container.memory.inactive_file": "stats.inactive_file",
"container.memory.mapped_file": "stats.mapped_file",
"container.memory.pgfault": "stats.pgfault",
"container.memory.pgmajfault": "stats.pgmajfault",
"container.memory.pgpgin": "stats.pgpgin",
"container.memory.pgpgout": "stats.pgpgout",
"container.memory.rss": "stats.rss",
"container.memory.rss_huge": "stats.rss_huge",
"container.memory.unevictable": "stats.unevictable",
"container.memory.writeback": "stats.writeback",
"container.memory.total_active_anon": "stats.total_active_anon",
"container.memory.total_active_file": "stats.total_active_file",
"container.memory.total_cache": "stats.total_cache",
"container.memory.total_inactive_anon": "stats.total_inactive_anon",
"container.memory.total_mapped_file": "stats.total_mapped_file",
"container.memory.total_pgfault": "stats.total_pgfault",
"container.memory.total_pgmajfault": "stats.total_pgmajfault",
"container.memory.total_pgpgin": "stats.total_pgpgin",
"container.memory.total_pgpgout": "stats.total_pgpgout",
"container.memory.total_rss": "stats.total_rss",
"container.memory.total_rss_huge": "stats.total_rss_huge",
"container.memory.total_unevictable": "stats.total_unevictable",
"container.memory.total_writeback": "stats.total_writeback",
"container.blockio.io_merged_recursive": "io_merged_recursive",
"container.blockio.io_queued_recursive": "io_queue_recursive",
"container.blockio.io_service_bytes_recursive": "io_service_bytes_recursive",
"container.blockio.io_service_time_recursive": "io_service_time_recursive",
"container.blockio.io_serviced_recursive": "io_serviced_recursive",
"container.blockio.io_time_recursive": "io_time_recursive",
"container.blockio.io_wait_time_recursive": "io_wait_time_recursive",
"container.blockio.sectors_recursive": "sectors_recursive",
}
var dockerResourceAttributeTranslations = map[string]string{
"container.id": "container.FullID",
"container.image.name": "container.ImageName",
"container.name": "container.Name",
}
func newTranslateDockerMetricsProcessor(shouldTranslate bool) *translateDockerMetricsProcessor {
return &translateDockerMetricsProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateDockerMetricsProcessor) processLogs(_ plog.Logs) error {
// No-op, this subprocessor doesn't process logs.
return nil
}
func (proc *translateDockerMetricsProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
translateDockerResourceAttributes(rm.Resource().Attributes())
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
translateDockerMetric(metricsSlice.At(k))
}
}
}
return nil
}
func (proc *translateDockerMetricsProcessor) processTraces(_ ptrace.Traces) error {
// No-op, this subprocessor doesn't process traces.
return nil
}
func (proc *translateDockerMetricsProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateDockerMetricsProcessor) ConfigPropertyName() string {
return "translate_docker_metrics"
}
func translateDockerMetric(m pmetric.Metric) {
name, exists := dockerMetricsTranslations[m.Name()]
if exists {
m.SetName(name)
}
}
func translateDockerResourceAttributes(attributes pcommon.Map) {
result := pcommon.NewMap()
result.EnsureCapacity(attributes.Len())
attributes.Range(func(otKey string, value pcommon.Value) bool {
if sumoKey, ok := dockerResourceAttributeTranslations[otKey]; ok {
// Only insert if it doesn't exist yet to prevent overwriting.
// We have to do it this way since the final return value is not
// ready yet to rely on .Insert() not overwriting.
if _, exists := attributes.Get(sumoKey); !exists {
if _, ok := result.Get(sumoKey); !ok {
value.CopyTo(result.PutEmpty(sumoKey))
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
return true
})
result.CopyTo(attributes)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention
type translateTelegrafMetricsProcessor struct {
shouldTranslate bool
}
// metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention
var metricsTranslations = map[string]string{
// CPU metrics
"cpu_usage_active": "CPU_Total",
"cpu_usage_idle": "CPU_Idle",
"cpu_usage_iowait": "CPU_IOWait",
"cpu_usage_irq": "CPU_Irq",
"cpu_usage_nice": "CPU_Nice",
"cpu_usage_softirq": "CPU_SoftIrq",
"cpu_usage_steal": "CPU_Stolen",
"cpu_usage_System": "CPU_Sys",
"cpu_usage_user": "CPU_User",
"system_load1": "CPU_LoadAvg_1min",
"system_load5": "CPU_LoadAvg_5min",
"system_load15": "CPU_LoadAvg_15min",
// Disk metrics
"disk_used": "Disk_Used",
"disk_used_percent": "Disk_UsedPercent",
"disk_inodes_free": "Disk_InodesAvailable",
// Disk IO metrics
"diskio_reads": "Disk_Reads",
"diskio_read_bytes": "Disk_ReadBytes",
"diskio_writes": "Disk_Writes",
"diskio_write_bytes": "Disk_WriteBytes",
// Memory metrics
"mem_total": "Mem_Total",
"mem_free": "Mem_free",
"mem_available": "Mem_ActualFree",
"mem_used": "Mem_ActualUsed",
"mem_used_percent": "Mem_UsedPercent",
"mem_available_percent": "Mem_FreePercent",
// Procstat metrics
"procstat_num_threads": "Proc_Threads",
"procstat_memory_vms": "Proc_VMSize",
"procstat_memory_rss": "Proc_RSSize",
"procstat_cpu_usage": "Proc_CPU",
"procstat_major_faults": "Proc_MajorFaults",
"procstat_minor_faults": "Proc_MinorFaults",
// Net metrics
"net_bytes_sent": "Net_OutBytes",
"net_bytes_recv": "Net_InBytes",
"net_packets_sent": "Net_OutPackets",
"net_packets_recv": "Net_InPackets",
// Netstat metrics
"netstat_tcp_close": "TCP_Close",
"netstat_tcp_close_wait": "TCP_CloseWait",
"netstat_tcp_closing": "TCP_Closing",
"netstat_tcp_established": "TCP_Established",
"netstat_tcp_listen": "TCP_Listen",
"netstat_tcp_time_wait": "TCP_TimeWait",
}
func newTranslateTelegrafMetricsProcessor(shouldTranslate bool) *translateTelegrafMetricsProcessor {
return &translateTelegrafMetricsProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateTelegrafMetricsProcessor) processLogs(_ plog.Logs) error {
// No-op, this subprocessor doesn't process logs.
return nil
}
func (proc *translateTelegrafMetricsProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
translateTelegrafMetric(metricsSlice.At(k))
}
}
}
return nil
}
func (proc *translateTelegrafMetricsProcessor) processTraces(_ ptrace.Traces) error {
// No-op, this subprocessor doesn't process traces.
return nil
}
func (proc *translateTelegrafMetricsProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateTelegrafMetricsProcessor) ConfigPropertyName() string {
return "translate_telegraf_attributes"
}
func translateTelegrafMetric(m pmetric.Metric) {
name, exists := metricsTranslations[m.Name()]
if exists {
m.SetName(name)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
)
func getNewAndPolicy(settings component.TelemetrySettings, config *AndCfg) (sampling.PolicyEvaluator, error) {
subPolicyEvaluators := make([]sampling.PolicyEvaluator, len(config.SubPolicyCfg))
for i := range config.SubPolicyCfg {
policyCfg := &config.SubPolicyCfg[i]
policy, err := getAndSubPolicyEvaluator(settings, policyCfg)
if err != nil {
return nil, err
}
subPolicyEvaluators[i] = policy
}
return sampling.NewAnd(settings.Logger, subPolicyEvaluators), nil
}
// Return instance of and sub-policy
func getAndSubPolicyEvaluator(settings component.TelemetrySettings, cfg *AndSubPolicyCfg) (sampling.PolicyEvaluator, error) {
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
)
func getNewCompositePolicy(settings component.TelemetrySettings, config *CompositeCfg) (sampling.PolicyEvaluator, error) {
subPolicyEvalParams := make([]sampling.SubPolicyEvalParams, len(config.SubPolicyCfg))
rateAllocationsMap := getRateAllocationMap(config)
for i := range config.SubPolicyCfg {
policyCfg := &config.SubPolicyCfg[i]
policy, err := getCompositeSubPolicyEvaluator(settings, policyCfg)
if err != nil {
return nil, err
}
evalParams := sampling.SubPolicyEvalParams{
Evaluator: policy,
MaxSpansPerSecond: int64(rateAllocationsMap[policyCfg.Name]),
}
subPolicyEvalParams[i] = evalParams
}
return sampling.NewComposite(settings.Logger, config.MaxTotalSpansPerSecond, subPolicyEvalParams, sampling.MonotonicClock{}), nil
}
// Apply rate allocations to the sub-policies
func getRateAllocationMap(config *CompositeCfg) map[string]float64 {
rateAllocationsMap := make(map[string]float64)
maxTotalSPS := float64(config.MaxTotalSpansPerSecond)
// Default SPS determined by equally diving number of sub policies
defaultSPS := maxTotalSPS / float64(len(config.SubPolicyCfg))
for _, rAlloc := range config.RateAllocation {
if rAlloc.Percent > 0 {
rateAllocationsMap[rAlloc.Policy] = (float64(rAlloc.Percent) / 100) * maxTotalSPS
} else {
rateAllocationsMap[rAlloc.Policy] = defaultSPS
}
}
return rateAllocationsMap
}
// Return instance of composite sub-policy
func getCompositeSubPolicyEvaluator(settings component.TelemetrySettings, cfg *CompositeSubPolicyCfg) (sampling.PolicyEvaluator, error) {
switch cfg.Type {
case And:
return getNewAndPolicy(settings, &cfg.AndCfg)
default:
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate mdatagen metadata.yaml
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"context"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)
// NewFactory returns a new factory for the Tail Sampling processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability))
}
func createDefaultConfig() component.Config {
return &Config{
DecisionWait: 30 * time.Second,
NumTraces: 50000,
}
}
func createTracesProcessor(
ctx context.Context,
params processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
tCfg := cfg.(*Config)
return newTracesProcessor(ctx, params, nextConsumer, *tCfg)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cache // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
import (
"encoding/binary"
lru "github.com/hashicorp/golang-lru/v2"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// lruDecisionCache implements Cache as a simple LRU cache.
// It holds trace IDs that had sampling decisions made on them.
// It does not specify the type of sampling decision that was made, only that
// a decision was made for an ID. You need separate DecisionCaches for caching
// sampled and not sampled trace IDs.
type lruDecisionCache[V any] struct {
cache *lru.Cache[uint64, V]
}
var _ Cache[any] = (*lruDecisionCache[any])(nil)
// NewLRUDecisionCache returns a new lruDecisionCache.
// The size parameter indicates the amount of keys the cache will hold before it
// starts evicting the least recently used key.
func NewLRUDecisionCache[V any](size int) (Cache[V], error) {
c, err := lru.New[uint64, V](size)
if err != nil {
return nil, err
}
return &lruDecisionCache[V]{cache: c}, nil
}
func (c *lruDecisionCache[V]) Get(id pcommon.TraceID) (V, bool) {
return c.cache.Get(rightHalfTraceID(id))
}
func (c *lruDecisionCache[V]) Put(id pcommon.TraceID, v V) {
_ = c.cache.Add(rightHalfTraceID(id), v)
}
// Delete is no-op since LRU relies on least recently used key being evicting automatically
func (c *lruDecisionCache[V]) Delete(_ pcommon.TraceID) {}
func rightHalfTraceID(id pcommon.TraceID) uint64 {
return binary.LittleEndian.Uint64(id[8:])
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cache // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
import "go.opentelemetry.io/collector/pdata/pcommon"
type nopDecisionCache[V any] struct{}
var _ Cache[any] = (*nopDecisionCache[any])(nil)
func NewNopDecisionCache[V any]() Cache[V] {
return &nopDecisionCache[V]{}
}
func (n *nopDecisionCache[V]) Get(_ pcommon.TraceID) (V, bool) {
var v V
return v, false
}
func (n *nopDecisionCache[V]) Put(_ pcommon.TraceID, _ V) {
}
func (n *nopDecisionCache[V]) Delete(_ pcommon.TraceID) {}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package idbatcher defines a pipeline of fixed size in which the
// elements are batches of ids.
package idbatcher // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher"
import (
"errors"
"sync"
"go.opentelemetry.io/collector/pdata/pcommon"
)
var (
// ErrInvalidNumBatches occurs when an invalid number of batches is specified.
ErrInvalidNumBatches = errors.New("invalid number of batches, it must be greater than zero")
// ErrInvalidBatchChannelSize occurs when an invalid batch channel size is specified.
ErrInvalidBatchChannelSize = errors.New("invalid batch channel size, it must be greater than zero")
)
// Batch is the type of batches held by the Batcher.
type Batch []pcommon.TraceID
// Batcher behaves like a pipeline of batches that has a fixed number of batches in the pipe
// and a new batch being built outside of the pipe. Items can be concurrently added to the batch
// currently being built. When the batch being built is closed, the oldest batch in the pipe
// is pushed out so the one just closed can be put on the end of the pipe (this is done as an
// atomic operation). The caller is in control of when a batch is completed and a new one should
// be started.
type Batcher interface {
// AddToCurrentBatch puts the given id on the batch being currently built. The client is in charge
// of limiting the growth of the current batch if appropriate for its scenario. It can
// either call CloseCurrentAndTakeFirstBatch earlier or stop adding new items depending on what is
// required by the scenario.
AddToCurrentBatch(id pcommon.TraceID)
// CloseCurrentAndTakeFirstBatch takes the batch at the front of the pipe, and moves the current
// batch to the end of the pipe, creating a new batch to receive new items. This operation should
// be atomic.
// It returns the batch that was in front of the pipe and a boolean that if true indicates that
// there are more batches to be retrieved.
CloseCurrentAndTakeFirstBatch() (Batch, bool)
// Stop informs that no more items are going to be batched and the pipeline can be read until it
// is empty. After this method is called attempts to enqueue new items will panic.
Stop()
}
var _ Batcher = (*batcher)(nil)
type batcher struct {
pendingIDs chan pcommon.TraceID // Channel for the ids to be added to the next batch.
batches chan Batch // Channel with already captured batches.
// cbMutex protects the currentBatch storing ids.
cbMutex sync.Mutex
currentBatch Batch
newBatchesInitialCapacity uint64
stopchan chan bool
stopped bool
stopLock sync.RWMutex
}
// New creates a Batcher that will hold numBatches in its pipeline, having a channel with
// batchChannelSize to receive new items. New batches will be created with capacity set to
// newBatchesInitialCapacity.
func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batcher, error) {
if numBatches < 1 {
return nil, ErrInvalidNumBatches
}
if batchChannelSize < 1 {
return nil, ErrInvalidBatchChannelSize
}
batches := make(chan Batch, numBatches)
// First numBatches batches will be empty in order to simplify clients that are running
// CloseCurrentAndTakeFirstBatch on a timer and want to delay the processing of the first
// batch with actual data. This way there is no need for accounting on the client side and
// a single timer can be started immediately.
for i := uint64(0); i < numBatches; i++ {
batches <- nil
}
batcher := &batcher{
pendingIDs: make(chan pcommon.TraceID, batchChannelSize),
batches: batches,
currentBatch: make(Batch, 0, newBatchesInitialCapacity),
newBatchesInitialCapacity: newBatchesInitialCapacity,
stopchan: make(chan bool),
}
// Single goroutine that keeps filling the current batch, contention is expected only
// when the current batch is being switched.
go func() {
for id := range batcher.pendingIDs {
batcher.cbMutex.Lock()
batcher.currentBatch = append(batcher.currentBatch, id)
batcher.cbMutex.Unlock()
}
batcher.stopchan <- true
}()
return batcher, nil
}
func (b *batcher) AddToCurrentBatch(id pcommon.TraceID) {
b.pendingIDs <- id
}
func (b *batcher) CloseCurrentAndTakeFirstBatch() (Batch, bool) {
if readBatch, ok := <-b.batches; ok {
b.stopLock.RLock()
if !b.stopped {
nextBatch := make(Batch, 0, b.newBatchesInitialCapacity)
b.cbMutex.Lock()
b.batches <- b.currentBatch
b.currentBatch = nextBatch
b.cbMutex.Unlock()
}
b.stopLock.RUnlock()
return readBatch, true
}
readBatch := b.currentBatch
b.currentBatch = nil
return readBatch, false
}
func (b *batcher) Stop() {
close(b.pendingIDs)
b.stopLock.Lock()
b.stopped = <-b.stopchan
b.stopLock.Unlock()
close(b.batches)
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorTailSamplingCountSpansSampled metric.Int64Counter
ProcessorTailSamplingCountTracesSampled metric.Int64Counter
ProcessorTailSamplingEarlyReleasesFromCacheDecision metric.Int64Counter
ProcessorTailSamplingGlobalCountTracesSampled metric.Int64Counter
ProcessorTailSamplingNewTraceIDReceived metric.Int64Counter
ProcessorTailSamplingSamplingDecisionLatency metric.Int64Histogram
ProcessorTailSamplingSamplingDecisionTimerLatency metric.Int64Histogram
ProcessorTailSamplingSamplingLateSpanAge metric.Int64Histogram
ProcessorTailSamplingSamplingPolicyEvaluationError metric.Int64Counter
ProcessorTailSamplingSamplingTraceDroppedTooEarly metric.Int64Counter
ProcessorTailSamplingSamplingTraceRemovalAge metric.Int64Histogram
ProcessorTailSamplingSamplingTracesOnMemory metric.Int64Gauge
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorTailSamplingCountSpansSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_count_spans_sampled",
metric.WithDescription("Count of spans that were sampled or not per sampling policy"),
metric.WithUnit("{spans}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_count_traces_sampled",
metric.WithDescription("Count of traces that were sampled or not per sampling policy"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingEarlyReleasesFromCacheDecision, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_early_releases_from_cache_decision",
metric.WithDescription("Number of spans that were able to be immediately released due to a decision cache hit."),
metric.WithUnit("{spans}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingGlobalCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_global_count_traces_sampled",
metric.WithDescription("Global count of traces that were sampled or not by at least one policy"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingNewTraceIDReceived, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_new_trace_id_received",
metric.WithDescription("Counts the arrival of new traces"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingDecisionLatency, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_decision_latency",
metric.WithDescription("Latency (in microseconds) of a given sampling policy"),
metric.WithUnit("µs"),
metric.WithExplicitBucketBoundaries([]float64{1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000}...),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingDecisionTimerLatency, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_decision_timer_latency",
metric.WithDescription("Latency (in microseconds) of each run of the sampling decision timer"),
metric.WithUnit("µs"),
metric.WithExplicitBucketBoundaries([]float64{1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000}...),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingLateSpanAge, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_late_span_age",
metric.WithDescription("Time (in seconds) from the sampling decision was taken and the arrival of a late span"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingPolicyEvaluationError, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_sampling_policy_evaluation_error",
metric.WithDescription("Count of sampling policy evaluation errors"),
metric.WithUnit("{errors}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTraceDroppedTooEarly, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_sampling_trace_dropped_too_early",
metric.WithDescription("Count of traces that needed to be dropped before the configured wait time"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTraceRemovalAge, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_trace_removal_age",
metric.WithDescription("Time (in seconds) from arrival of a new trace until its removal from memory"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTracesOnMemory, err = builder.meter.Int64Gauge(
"otelcol_processor_tail_sampling_sampling_traces_on_memory",
metric.WithDescription("Tracks the number of traces current on memory"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type alwaysSample struct {
logger *zap.Logger
}
var _ PolicyEvaluator = (*alwaysSample)(nil)
// NewAlwaysSample creates a policy evaluator the samples all traces.
func NewAlwaysSample(settings component.TelemetrySettings) PolicyEvaluator {
return &alwaysSample{
logger: settings.Logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (as *alwaysSample) Evaluate(context.Context, pcommon.TraceID, *TraceData) (Decision, error) {
as.logger.Debug("Evaluating spans in always-sample filter")
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type And struct {
// the subpolicy evaluators
subpolicies []PolicyEvaluator
logger *zap.Logger
}
func NewAnd(
logger *zap.Logger,
subpolicies []PolicyEvaluator,
) PolicyEvaluator {
return &And{
subpolicies: subpolicies,
logger: logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *And) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
// The policy iterates over all sub-policies and returns Sampled if all sub-policies returned a Sampled Decision.
// If any subpolicy returns NotSampled or InvertNotSampled, it returns NotSampled Decision.
for _, sub := range c.subpolicies {
decision, err := sub.Evaluate(ctx, traceID, trace)
if err != nil {
return Unspecified, err
}
if decision == NotSampled || decision == InvertNotSampled {
return NotSampled, nil
}
}
return Sampled, nil
}
// OnDroppedSpans is called when the trace needs to be dropped, due to memory
// pressure, before the decision_wait time has been reached.
func (c *And) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) {
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type booleanAttributeFilter struct {
key string
value bool
logger *zap.Logger
invertMatch bool
}
var _ PolicyEvaluator = (*booleanAttributeFilter)(nil)
// NewBooleanAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute that match the supplied boolean value.
func NewBooleanAttributeFilter(settings component.TelemetrySettings, key string, value bool, invertMatch bool) PolicyEvaluator {
return &booleanAttributeFilter{
key: key,
value: value,
logger: settings.Logger,
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (baf *booleanAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if baf.invertMatch {
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(baf.key); ok {
value := v.Bool()
return value != baf.value
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(baf.key); ok {
value := v.Bool()
return value != baf.value
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(baf.key); ok {
value := v.Bool()
return value == baf.value
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(baf.key); ok {
value := v.Bool()
return value == baf.value
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type subpolicy struct {
// the subpolicy evaluator
evaluator PolicyEvaluator
// spans per second allocated to each subpolicy
allocatedSPS int64
// spans per second that each subpolicy sampled in this period
sampledSPS int64
}
// Composite evaluator and its internal data
type Composite struct {
// the subpolicy evaluators
subpolicies []*subpolicy
// maximum total spans per second that must be sampled
maxTotalSPS int64
// current unix timestamp second
currentSecond int64
// The time provider (can be different from clock for testing purposes)
timeProvider TimeProvider
logger *zap.Logger
}
var _ PolicyEvaluator = (*Composite)(nil)
// SubPolicyEvalParams defines the evaluator and max rate for a sub-policy
type SubPolicyEvalParams struct {
Evaluator PolicyEvaluator
MaxSpansPerSecond int64
}
// NewComposite creates a policy evaluator that samples all subpolicies.
func NewComposite(
logger *zap.Logger,
maxTotalSpansPerSecond int64,
subPolicyParams []SubPolicyEvalParams,
timeProvider TimeProvider,
) PolicyEvaluator {
var subpolicies []*subpolicy
for i := 0; i < len(subPolicyParams); i++ {
sub := &subpolicy{}
sub.evaluator = subPolicyParams[i].Evaluator
sub.allocatedSPS = subPolicyParams[i].MaxSpansPerSecond
// We are just starting, so there is no previous input, set it to 0
sub.sampledSPS = 0
subpolicies = append(subpolicies, sub)
}
return &Composite{
maxTotalSPS: maxTotalSpansPerSecond,
subpolicies: subpolicies,
timeProvider: timeProvider,
logger: logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *Composite) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
// Rate limiting works by counting spans that are sampled during each 1 second
// time period. Until the total number of spans during a particular second
// exceeds the allocated number of spans-per-second the traces are sampled,
// once the limit is exceeded the traces are no longer sampled. The counter
// restarts at the beginning of each second.
// Current counters and rate limits are kept separately for each subpolicy.
currSecond := c.timeProvider.getCurSecond()
if c.currentSecond != currSecond {
// This is a new second
c.currentSecond = currSecond
// Reset counters
for i := range c.subpolicies {
c.subpolicies[i].sampledSPS = 0
}
}
for _, sub := range c.subpolicies {
decision, err := sub.evaluator.Evaluate(ctx, traceID, trace)
if err != nil {
return Unspecified, err
}
if decision == Sampled || decision == InvertSampled {
// The subpolicy made a decision to Sample. Now we need to make our decision.
// Calculate resulting SPS counter if we decide to sample this trace
spansInSecondIfSampled := sub.sampledSPS + trace.SpanCount.Load()
// Check if the rate will be within the allocated bandwidth.
if spansInSecondIfSampled <= sub.allocatedSPS && spansInSecondIfSampled <= c.maxTotalSPS {
sub.sampledSPS = spansInSecondIfSampled
// Let the sampling happen
return Sampled, nil
}
// We exceeded the rate limit. Don't sample this trace.
// Note that we will continue evaluating new incoming traces against
// allocated SPS, we do not update sub.sampledSPS here in order to give
// chance to another smaller trace to be accepted later.
return NotSampled, nil
}
}
return NotSampled, nil
}
// OnDroppedSpans is called when the trace needs to be dropped, due to memory
// pressure, before the decision_wait time has been reached.
func (c *Composite) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) {
// Here we have a number of possible solutions:
// 1. Random sample traces based on maxTotalSPS.
// 2. Perform full composite sampling logic by calling Composite.Evaluate(), essentially
// using partial trace data for sampling.
// 3. Sample everything.
//
// It seems that #2 may be the best choice from end user perspective, but
// it is not certain and it is also additional performance penalty when we are
// already under a memory (and possibly CPU) pressure situation.
//
// For now we are playing safe and go with #3. Investigating alternate options
// should be a future task.
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type latency struct {
logger *zap.Logger
thresholdMs int64
upperThresholdMs int64
}
var _ PolicyEvaluator = (*latency)(nil)
// NewLatency creates a policy evaluator sampling traces with a duration greater than a configured threshold
func NewLatency(settings component.TelemetrySettings, thresholdMs int64, upperThresholdMs int64) PolicyEvaluator {
return &latency{
logger: settings.Logger,
thresholdMs: thresholdMs,
upperThresholdMs: upperThresholdMs,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (l *latency) Evaluate(_ context.Context, _ pcommon.TraceID, traceData *TraceData) (Decision, error) {
l.logger.Debug("Evaluating spans in latency filter")
traceData.Lock()
defer traceData.Unlock()
batches := traceData.ReceivedBatches
var minTime pcommon.Timestamp
var maxTime pcommon.Timestamp
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
if minTime == 0 || span.StartTimestamp() < minTime {
minTime = span.StartTimestamp()
}
if maxTime == 0 || span.EndTimestamp() > maxTime {
maxTime = span.EndTimestamp()
}
duration := maxTime.AsTime().Sub(minTime.AsTime())
if l.upperThresholdMs == 0 {
return duration.Milliseconds() >= l.thresholdMs
}
return (l.thresholdMs < duration.Milliseconds() && duration.Milliseconds() <= l.upperThresholdMs)
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type numericAttributeFilter struct {
key string
minValue, maxValue int64
logger *zap.Logger
invertMatch bool
}
var _ PolicyEvaluator = (*numericAttributeFilter)(nil)
// NewNumericAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute in the given numeric range.
func NewNumericAttributeFilter(settings component.TelemetrySettings, key string, minValue, maxValue int64, invertMatch bool) PolicyEvaluator {
return &numericAttributeFilter{
key: key,
minValue: minValue,
maxValue: maxValue,
logger: settings.Logger,
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (naf *numericAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if naf.invertMatch {
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return false
}
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return false
}
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return true
}
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return true
}
}
return false
},
), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"errors"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
)
type ottlConditionFilter struct {
sampleSpanExpr *ottl.ConditionSequence[ottlspan.TransformContext]
sampleSpanEventExpr *ottl.ConditionSequence[ottlspanevent.TransformContext]
errorMode ottl.ErrorMode
logger *zap.Logger
}
var _ PolicyEvaluator = (*ottlConditionFilter)(nil)
// NewOTTLConditionFilter looks at the trace data and returns a corresponding SamplingDecision.
func NewOTTLConditionFilter(settings component.TelemetrySettings, spanConditions, spanEventConditions []string, errMode ottl.ErrorMode) (PolicyEvaluator, error) {
filter := &ottlConditionFilter{
errorMode: errMode,
logger: settings.Logger,
}
var err error
if len(spanConditions) == 0 && len(spanEventConditions) == 0 {
return nil, errors.New("expected at least one OTTL condition to filter on")
}
if len(spanConditions) > 0 {
if filter.sampleSpanExpr, err = filterottl.NewBoolExprForSpan(spanConditions, filterottl.StandardSpanFuncs(), errMode, settings); err != nil {
return nil, err
}
}
if len(spanEventConditions) > 0 {
if filter.sampleSpanEventExpr, err = filterottl.NewBoolExprForSpanEvent(spanEventConditions, filterottl.StandardSpanEventFuncs(), errMode, settings); err != nil {
return nil, err
}
}
return filter, nil
}
func (ocf *ottlConditionFilter) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
ocf.logger.Debug("Evaluating with OTTL conditions filter", zap.String("traceID", traceID.String()))
if ocf.sampleSpanExpr == nil && ocf.sampleSpanEventExpr == nil {
return NotSampled, nil
}
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
for i := 0; i < batches.ResourceSpans().Len(); i++ {
rs := batches.ResourceSpans().At(i)
resource := rs.Resource()
for j := 0; j < rs.ScopeSpans().Len(); j++ {
ss := rs.ScopeSpans().At(j)
scope := ss.Scope()
for k := 0; k < ss.Spans().Len(); k++ {
span := ss.Spans().At(k)
var (
ok bool
err error
)
// Now we reach span level and begin evaluation with parsed expr.
// The evaluation will break when:
// 1. error happened.
// 2. "Sampled" decision made.
// Otherwise, it will keep evaluating and finally exit with "NotSampled" decision.
// Span evaluation
if ocf.sampleSpanExpr != nil {
ok, err = ocf.sampleSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ss, rs))
if err != nil {
return Error, err
}
if ok {
return Sampled, nil
}
}
// Span event evaluation
if ocf.sampleSpanEventExpr != nil {
spanEvents := span.Events()
for l := 0; l < spanEvents.Len(); l++ {
ok, err = ocf.sampleSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvents.At(l), span, scope, resource, ss, rs))
if err != nil {
return Error, err
}
if ok {
return Sampled, nil
}
}
}
}
}
}
return NotSampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"hash/fnv"
"math"
"math/big"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
const (
defaultHashSalt = "default-hash-seed"
)
type probabilisticSampler struct {
logger *zap.Logger
threshold uint64
hashSalt string
}
var _ PolicyEvaluator = (*probabilisticSampler)(nil)
// NewProbabilisticSampler creates a policy evaluator that samples a percentage of
// traces.
func NewProbabilisticSampler(settings component.TelemetrySettings, hashSalt string, samplingPercentage float64) PolicyEvaluator {
if hashSalt == "" {
hashSalt = defaultHashSalt
}
return &probabilisticSampler{
logger: settings.Logger,
// calculate threshold once
threshold: calculateThreshold(samplingPercentage / 100),
hashSalt: hashSalt,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (s *probabilisticSampler) Evaluate(_ context.Context, traceID pcommon.TraceID, _ *TraceData) (Decision, error) {
s.logger.Debug("Evaluating spans in probabilistic filter")
if hashTraceID(s.hashSalt, traceID[:]) <= s.threshold {
return Sampled, nil
}
return NotSampled, nil
}
// calculateThreshold converts a ratio into a value between 0 and MaxUint64
func calculateThreshold(ratio float64) uint64 {
// Use big.Float and big.Int to calculate threshold because directly convert
// math.MaxUint64 to float64 will cause digits/bits to be cut off if the converted value
// doesn't fit into bits that are used to store digits for float64 in Golang
boundary := new(big.Float).SetInt(new(big.Int).SetUint64(math.MaxUint64))
res, _ := boundary.Mul(boundary, big.NewFloat(ratio)).Uint64()
return res
}
// hashTraceID creates a hash using the FNV-1a algorithm.
func hashTraceID(salt string, b []byte) uint64 {
hasher := fnv.New64a()
// the implementation fnv.Write() never returns an error, see hash/fnv/fnv.go
_, _ = hasher.Write([]byte(salt))
_, _ = hasher.Write(b)
return hasher.Sum64()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type rateLimiting struct {
currentSecond int64
spansInCurrentSecond int64
spansPerSecond int64
logger *zap.Logger
}
var _ PolicyEvaluator = (*rateLimiting)(nil)
// NewRateLimiting creates a policy evaluator the samples all traces.
func NewRateLimiting(settings component.TelemetrySettings, spansPerSecond int64) PolicyEvaluator {
return &rateLimiting{
spansPerSecond: spansPerSecond,
logger: settings.Logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (r *rateLimiting) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
r.logger.Debug("Evaluating spans in rate-limiting filter")
currSecond := time.Now().Unix()
if r.currentSecond != currSecond {
r.currentSecond = currSecond
r.spansInCurrentSecond = 0
}
spansInSecondIfSampled := r.spansInCurrentSecond + trace.SpanCount.Load()
if spansInSecondIfSampled < r.spansPerSecond {
r.spansInCurrentSecond = spansInSecondIfSampled
return Sampled, nil
}
return NotSampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type spanCount struct {
logger *zap.Logger
minSpans int32
maxSpans int32
}
var _ PolicyEvaluator = (*spanCount)(nil)
// NewSpanCount creates a policy evaluator sampling traces with more than one span per trace
func NewSpanCount(settings component.TelemetrySettings, minSpans, maxSpans int32) PolicyEvaluator {
return &spanCount{
logger: settings.Logger,
minSpans: minSpans,
maxSpans: maxSpans,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *spanCount) Evaluate(_ context.Context, _ pcommon.TraceID, traceData *TraceData) (Decision, error) {
c.logger.Debug("Evaluating spans counts in filter")
spanCount := int(traceData.SpanCount.Load())
switch {
case c.maxSpans == 0 && spanCount >= int(c.minSpans):
return Sampled, nil
case spanCount >= int(c.minSpans) && spanCount <= int(c.maxSpans):
return Sampled, nil
default:
return NotSampled, nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"errors"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type statusCodeFilter struct {
logger *zap.Logger
statusCodes []ptrace.StatusCode
}
var _ PolicyEvaluator = (*statusCodeFilter)(nil)
// NewStatusCodeFilter creates a policy evaluator that samples all traces with
// a given status code.
func NewStatusCodeFilter(settings component.TelemetrySettings, statusCodeString []string) (PolicyEvaluator, error) {
if len(statusCodeString) == 0 {
return nil, errors.New("expected at least one status code to filter on")
}
statusCodes := make([]ptrace.StatusCode, len(statusCodeString))
for i := range statusCodeString {
switch statusCodeString[i] {
case "OK":
statusCodes[i] = ptrace.StatusCodeOk
case "ERROR":
statusCodes[i] = ptrace.StatusCodeError
case "UNSET":
statusCodes[i] = ptrace.StatusCodeUnset
default:
return nil, fmt.Errorf("unknown status code %q, supported: OK, ERROR, UNSET", statusCodeString[i])
}
}
return &statusCodeFilter{
logger: settings.Logger,
statusCodes: statusCodes,
}, nil
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (r *statusCodeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
r.logger.Debug("Evaluating spans in status code filter")
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
for _, statusCode := range r.statusCodes {
if span.Status().Code() == statusCode {
return true
}
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"regexp"
"github.com/golang/groupcache/lru"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
const defaultCacheSize = 128
type stringAttributeFilter struct {
key string
logger *zap.Logger
// matcher defines the func to match the attribute values in strict string
// or in regular expression
matcher func(string) bool
invertMatch bool
}
type regexStrSetting struct {
matchedAttrs *lru.Cache
filterList []*regexp.Regexp
}
var _ PolicyEvaluator = (*stringAttributeFilter)(nil)
// NewStringAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute in the given numeric range.
func NewStringAttributeFilter(settings component.TelemetrySettings, key string, values []string, regexMatchEnabled bool, evictSize int, invertMatch bool) PolicyEvaluator {
// initialize regex filter rules and LRU cache for matched results
if regexMatchEnabled {
if evictSize <= 0 {
evictSize = defaultCacheSize
}
filterList := addFilters(values)
regexStrSetting := ®exStrSetting{
matchedAttrs: lru.New(evictSize),
filterList: filterList,
}
return &stringAttributeFilter{
key: key,
logger: settings.Logger,
// matcher returns true if the given string matches the regex rules defined in string attribute filters
matcher: func(toMatch string) bool {
if v, ok := regexStrSetting.matchedAttrs.Get(toMatch); ok {
return v.(bool)
}
for _, r := range regexStrSetting.filterList {
if r.MatchString(toMatch) {
regexStrSetting.matchedAttrs.Add(toMatch, true)
return true
}
}
regexStrSetting.matchedAttrs.Add(toMatch, false)
return false
},
invertMatch: invertMatch,
}
}
// initialize the exact value map
valuesMap := make(map[string]struct{})
for _, value := range values {
if value != "" {
valuesMap[value] = struct{}{}
}
}
return &stringAttributeFilter{
key: key,
logger: settings.Logger,
// matcher returns true if the given string matches any of the string attribute filters
matcher: func(toMatch string) bool {
_, matched := valuesMap[toMatch]
return matched
},
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
// The SamplingDecision is made by comparing the attribute values with the matching values,
// which might be static strings or regular expressions.
func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
saf.logger.Debug("Evaluating spans in string-tag filter")
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if saf.invertMatch {
// Invert Match returns true by default, except when key and value are matched
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(saf.key); ok {
if ok := saf.matcher(v.Str()); ok {
return false
}
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(saf.key); ok {
truncatableStr := v.Str()
if len(truncatableStr) > 0 {
if ok := saf.matcher(v.Str()); ok {
return false
}
}
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(saf.key); ok {
if ok := saf.matcher(v.Str()); ok {
return true
}
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(saf.key); ok {
truncatableStr := v.Str()
if len(truncatableStr) > 0 {
if ok := saf.matcher(v.Str()); ok {
return true
}
}
}
return false
},
), nil
}
// addFilters compiles all the given filters and stores them as regexes.
// All regexes are automatically anchored to enforce full string matches.
func addFilters(exprs []string) []*regexp.Regexp {
list := make([]*regexp.Regexp, 0, len(exprs))
for _, entry := range exprs {
rule := regexp.MustCompile(entry)
list = append(list, rule)
}
return list
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"time"
)
// TimeProvider allows to get current Unix second
type TimeProvider interface {
getCurSecond() int64
}
// MonotonicClock provides monotonic real clock-based current Unix second.
// Use it when creating a NewComposite which should measure sample rates
// against a realtime clock (this is almost always what you want to do,
// the exception is usually only automated testing where you may want
// to have fake clocks).
type MonotonicClock struct{}
func (c MonotonicClock) getCurSecond() int64 {
return time.Now().Unix()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
tracesdk "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
type traceStateFilter struct {
key string
logger *zap.Logger
matcher func(string) bool
}
var _ PolicyEvaluator = (*traceStateFilter)(nil)
// NewTraceStateFilter creates a policy evaluator that samples all traces with
// the given value by the specific key in the trace_state.
func NewTraceStateFilter(settings component.TelemetrySettings, key string, values []string) PolicyEvaluator {
// initialize the exact value map
valuesMap := make(map[string]struct{})
for _, value := range values {
// the key-value pair("=" will take one character) in trace_state can't exceed 256 characters
if value != "" && len(key)+len(value) < 256 {
valuesMap[value] = struct{}{}
}
}
return &traceStateFilter{
key: key,
logger: settings.Logger,
matcher: func(toMatch string) bool {
_, matched := valuesMap[toMatch]
return matched
},
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (tsf *traceStateFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
traceState, err := tracesdk.ParseTraceState(span.TraceState().AsRaw())
if err != nil {
return false
}
if ok := tsf.matcher(traceState.Get(tsf.key)); ok {
return true
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// hasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any
// callback returns true.
func hasResourceOrSpanWithCondition(
td ptrace.Traces,
shouldSampleResource func(resource pcommon.Resource) bool,
shouldSampleSpan func(span ptrace.Span) bool,
) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
resource := rs.Resource()
if shouldSampleResource(resource) {
return Sampled
}
if hasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSampleSpan) {
return Sampled
}
}
return NotSampled
}
// invertHasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any
// callback returns false.
func invertHasResourceOrSpanWithCondition(
td ptrace.Traces,
shouldSampleResource func(resource pcommon.Resource) bool,
shouldSampleSpan func(span ptrace.Span) bool,
) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
resource := rs.Resource()
if !shouldSampleResource(resource) {
return InvertNotSampled
}
if !invertHasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSampleSpan) {
return InvertNotSampled
}
}
return InvertSampled
}
// hasSpanWithCondition iterates through all the instrumentation library spans until any callback returns true.
func hasSpanWithCondition(td ptrace.Traces, shouldSample func(span ptrace.Span) bool) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
if hasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSample) {
return Sampled
}
}
return NotSampled
}
func hasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool {
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
for j := 0; j < ils.Spans().Len(); j++ {
span := ils.Spans().At(j)
if check(span) {
return true
}
}
}
return false
}
func invertHasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool {
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
for j := 0; j < ils.Spans().Len(); j++ {
span := ils.Spans().At(j)
if !check(span) {
return false
}
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/telemetry"
import "go.opentelemetry.io/collector/featuregate"
var metricStatCountSpansSampledFeatureGate = featuregate.GlobalRegistry().MustRegister(
"processor.tailsamplingprocessor.metricstatcountspanssampled",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("When enabled, a new metric stat_count_spans_sampled will be available in the tail sampling processor. Differently from stat_count_traces_sampled, this metric will count the number of spans sampled or not per sampling policy, where the original counts traces."),
)
func IsMetricStatCountSpansSampledEnabled() bool {
return metricStatCountSpansSampledFeatureGate.IsEnabled()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"context"
"fmt"
"math"
"runtime"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/telemetry"
)
// policy combines a sampling policy evaluator with the destinations to be
// used for that policy.
type policy struct {
// name used to identify this policy instance.
name string
// evaluator that decides if a trace is sampled or not by this policy instance.
evaluator sampling.PolicyEvaluator
// attribute to use in the telemetry to denote the policy.
attribute metric.MeasurementOption
}
// tailSamplingSpanProcessor handles the incoming trace data and uses the given sampling
// policy to sample traces.
type tailSamplingSpanProcessor struct {
ctx context.Context
set processor.Settings
telemetry *metadata.TelemetryBuilder
logger *zap.Logger
nextConsumer consumer.Traces
maxNumTraces uint64
policies []*policy
idToTrace sync.Map
policyTicker timeutils.TTicker
tickerFrequency time.Duration
decisionBatcher idbatcher.Batcher
sampledIDCache cache.Cache[bool]
nonSampledIDCache cache.Cache[bool]
deleteChan chan pcommon.TraceID
numTracesOnMap *atomic.Uint64
setPolicyMux sync.Mutex
pendingPolicy []PolicyCfg
}
// spanAndScope a structure for holding information about span and its instrumentation scope.
// required for preserving the instrumentation library information while sampling.
// We use pointers there to fast find the span in the map.
type spanAndScope struct {
span *ptrace.Span
instrumentationScope *pcommon.InstrumentationScope
}
var (
attrSampledTrue = metric.WithAttributes(attribute.String("sampled", "true"))
attrSampledFalse = metric.WithAttributes(attribute.String("sampled", "false"))
decisionToAttribute = map[sampling.Decision]metric.MeasurementOption{
sampling.Sampled: attrSampledTrue,
sampling.NotSampled: attrSampledFalse,
sampling.InvertNotSampled: attrSampledFalse,
sampling.InvertSampled: attrSampledTrue,
}
)
type Option func(*tailSamplingSpanProcessor)
// newTracesProcessor returns a processor.TracesProcessor that will perform tail sampling according to the given
// configuration.
func newTracesProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Traces, cfg Config, opts ...Option) (processor.Traces, error) {
telemetrySettings := set.TelemetrySettings
telemetry, err := metadata.NewTelemetryBuilder(telemetrySettings)
if err != nil {
return nil, err
}
nopCache := cache.NewNopDecisionCache[bool]()
sampledDecisions := nopCache
nonSampledDecisions := nopCache
if cfg.DecisionCache.SampledCacheSize > 0 {
sampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.SampledCacheSize)
if err != nil {
return nil, err
}
}
if cfg.DecisionCache.NonSampledCacheSize > 0 {
nonSampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.NonSampledCacheSize)
if err != nil {
return nil, err
}
}
tsp := &tailSamplingSpanProcessor{
ctx: ctx,
set: set,
telemetry: telemetry,
nextConsumer: nextConsumer,
maxNumTraces: cfg.NumTraces,
sampledIDCache: sampledDecisions,
nonSampledIDCache: nonSampledDecisions,
logger: telemetrySettings.Logger,
numTracesOnMap: &atomic.Uint64{},
deleteChan: make(chan pcommon.TraceID, cfg.NumTraces),
}
tsp.policyTicker = &timeutils.PolicyTicker{OnTickFunc: tsp.samplingPolicyOnTick}
for _, opt := range opts {
opt(tsp)
}
if tsp.tickerFrequency == 0 {
tsp.tickerFrequency = time.Second
}
if tsp.policies == nil {
err := tsp.loadSamplingPolicy(cfg.PolicyCfgs)
if err != nil {
return nil, err
}
}
if tsp.decisionBatcher == nil {
// this will start a goroutine in the background, so we run it only if everything went
// well in creating the policies
numDecisionBatches := math.Max(1, cfg.DecisionWait.Seconds())
inBatcher, err := idbatcher.New(uint64(numDecisionBatches), cfg.ExpectedNewTracesPerSec, uint64(2*runtime.NumCPU()))
if err != nil {
return nil, err
}
tsp.decisionBatcher = inBatcher
}
return tsp, nil
}
// withDecisionBatcher sets the batcher used to batch trace IDs for policy evaluation.
func withDecisionBatcher(batcher idbatcher.Batcher) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.decisionBatcher = batcher
}
}
// withPolicies sets the sampling policies to be used by the processor.
func withPolicies(policies []*policy) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.policies = policies
}
}
// withTickerFrequency sets the frequency at which the processor will evaluate the sampling policies.
func withTickerFrequency(frequency time.Duration) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.tickerFrequency = frequency
}
}
// withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs.
func withSampledDecisionCache(c cache.Cache[bool]) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.sampledIDCache = c
}
}
// withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs.
func withNonSampledDecisionCache(c cache.Cache[bool]) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.nonSampledIDCache = c
}
}
func getPolicyEvaluator(settings component.TelemetrySettings, cfg *PolicyCfg) (sampling.PolicyEvaluator, error) {
switch cfg.Type {
case Composite:
return getNewCompositePolicy(settings, &cfg.CompositeCfg)
case And:
return getNewAndPolicy(settings, &cfg.AndCfg)
default:
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
}
func getSharedPolicyEvaluator(settings component.TelemetrySettings, cfg *sharedPolicyCfg) (sampling.PolicyEvaluator, error) {
settings.Logger = settings.Logger.With(zap.Any("policy", cfg.Type))
switch cfg.Type {
case AlwaysSample:
return sampling.NewAlwaysSample(settings), nil
case Latency:
lfCfg := cfg.LatencyCfg
return sampling.NewLatency(settings, lfCfg.ThresholdMs, lfCfg.UpperThresholdmsMs), nil
case NumericAttribute:
nafCfg := cfg.NumericAttributeCfg
return sampling.NewNumericAttributeFilter(settings, nafCfg.Key, nafCfg.MinValue, nafCfg.MaxValue, nafCfg.InvertMatch), nil
case Probabilistic:
pCfg := cfg.ProbabilisticCfg
return sampling.NewProbabilisticSampler(settings, pCfg.HashSalt, pCfg.SamplingPercentage), nil
case StringAttribute:
safCfg := cfg.StringAttributeCfg
return sampling.NewStringAttributeFilter(settings, safCfg.Key, safCfg.Values, safCfg.EnabledRegexMatching, safCfg.CacheMaxSize, safCfg.InvertMatch), nil
case StatusCode:
scfCfg := cfg.StatusCodeCfg
return sampling.NewStatusCodeFilter(settings, scfCfg.StatusCodes)
case RateLimiting:
rlfCfg := cfg.RateLimitingCfg
return sampling.NewRateLimiting(settings, rlfCfg.SpansPerSecond), nil
case SpanCount:
spCfg := cfg.SpanCountCfg
return sampling.NewSpanCount(settings, spCfg.MinSpans, spCfg.MaxSpans), nil
case TraceState:
tsfCfg := cfg.TraceStateCfg
return sampling.NewTraceStateFilter(settings, tsfCfg.Key, tsfCfg.Values), nil
case BooleanAttribute:
bafCfg := cfg.BooleanAttributeCfg
return sampling.NewBooleanAttributeFilter(settings, bafCfg.Key, bafCfg.Value, bafCfg.InvertMatch), nil
case OTTLCondition:
ottlfCfg := cfg.OTTLConditionCfg
return sampling.NewOTTLConditionFilter(settings, ottlfCfg.SpanConditions, ottlfCfg.SpanEventConditions, ottlfCfg.ErrorMode)
default:
return nil, fmt.Errorf("unknown sampling policy type %s", cfg.Type)
}
}
type policyMetrics struct {
idNotFoundOnMapCount, evaluateErrorCount, decisionSampled, decisionNotSampled int64
}
func (tsp *tailSamplingSpanProcessor) loadSamplingPolicy(cfgs []PolicyCfg) error {
telemetrySettings := tsp.set.TelemetrySettings
componentID := tsp.set.ID.Name()
cLen := len(cfgs)
policies := make([]*policy, 0, cLen)
policyNames := make(map[string]struct{}, cLen)
for _, cfg := range cfgs {
if cfg.Name == "" {
return fmt.Errorf("policy name cannot be empty")
}
if _, exists := policyNames[cfg.Name]; exists {
return fmt.Errorf("duplicate policy name %q", cfg.Name)
}
policyNames[cfg.Name] = struct{}{}
eval, err := getPolicyEvaluator(telemetrySettings, &cfg)
if err != nil {
return fmt.Errorf("failed to create policy evaluator for %q: %w", cfg.Name, err)
}
uniquePolicyName := cfg.Name
if componentID != "" {
uniquePolicyName = fmt.Sprintf("%s.%s", componentID, cfg.Name)
}
policies = append(policies, &policy{
name: cfg.Name,
evaluator: eval,
attribute: metric.WithAttributes(attribute.String("policy", uniquePolicyName)),
})
}
tsp.policies = policies
tsp.logger.Debug("Loaded sampling policy", zap.Int("policies.len", len(policies)))
return nil
}
func (tsp *tailSamplingSpanProcessor) SetSamplingPolicy(cfgs []PolicyCfg) {
tsp.logger.Debug("Setting pending sampling policy", zap.Int("pending.len", len(cfgs)))
tsp.setPolicyMux.Lock()
defer tsp.setPolicyMux.Unlock()
tsp.pendingPolicy = cfgs
}
func (tsp *tailSamplingSpanProcessor) loadPendingSamplingPolicy() {
tsp.setPolicyMux.Lock()
defer tsp.setPolicyMux.Unlock()
// Nothing pending, do nothing.
pLen := len(tsp.pendingPolicy)
if pLen == 0 {
return
}
tsp.logger.Debug("Loading pending sampling policy", zap.Int("pending.len", pLen))
err := tsp.loadSamplingPolicy(tsp.pendingPolicy)
// Empty pending regardless of error. If policy is invalid, it will fail on
// every tick, no need to do extra work and flood the log with errors.
tsp.pendingPolicy = nil
if err != nil {
tsp.logger.Error("Failed to load pending sampling policy", zap.Error(err))
tsp.logger.Debug("Continuing to use the previously loaded sampling policy")
}
}
func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() {
tsp.logger.Debug("Sampling Policy Evaluation ticked")
tsp.loadPendingSamplingPolicy()
ctx := context.Background()
metrics := policyMetrics{}
startTime := time.Now()
batch, _ := tsp.decisionBatcher.CloseCurrentAndTakeFirstBatch()
batchLen := len(batch)
for _, id := range batch {
d, ok := tsp.idToTrace.Load(id)
if !ok {
metrics.idNotFoundOnMapCount++
continue
}
trace := d.(*sampling.TraceData)
trace.DecisionTime = time.Now()
decision := tsp.makeDecision(id, trace, &metrics)
tsp.telemetry.ProcessorTailSamplingSamplingDecisionTimerLatency.Record(tsp.ctx, int64(time.Since(startTime)/time.Microsecond))
tsp.telemetry.ProcessorTailSamplingGlobalCountTracesSampled.Add(tsp.ctx, 1, decisionToAttribute[decision])
// Sampled or not, remove the batches
trace.Lock()
allSpans := trace.ReceivedBatches
trace.FinalDecision = decision
trace.ReceivedBatches = ptrace.NewTraces()
trace.Unlock()
switch decision {
case sampling.Sampled:
tsp.releaseSampledTrace(ctx, id, allSpans)
case sampling.NotSampled:
tsp.releaseNotSampledTrace(id)
}
}
tsp.telemetry.ProcessorTailSamplingSamplingTracesOnMemory.Record(tsp.ctx, int64(tsp.numTracesOnMap.Load()))
tsp.telemetry.ProcessorTailSamplingSamplingTraceDroppedTooEarly.Add(tsp.ctx, metrics.idNotFoundOnMapCount)
tsp.telemetry.ProcessorTailSamplingSamplingPolicyEvaluationError.Add(tsp.ctx, metrics.evaluateErrorCount)
tsp.logger.Debug("Sampling policy evaluation completed",
zap.Int("batch.len", batchLen),
zap.Int64("sampled", metrics.decisionSampled),
zap.Int64("notSampled", metrics.decisionNotSampled),
zap.Int64("droppedPriorToEvaluation", metrics.idNotFoundOnMapCount),
zap.Int64("policyEvaluationErrors", metrics.evaluateErrorCount),
)
}
func (tsp *tailSamplingSpanProcessor) makeDecision(id pcommon.TraceID, trace *sampling.TraceData, metrics *policyMetrics) sampling.Decision {
var decisions [8]bool
ctx := context.Background()
startTime := time.Now()
// Check all policies before making a final decision.
for _, p := range tsp.policies {
decision, err := p.evaluator.Evaluate(ctx, id, trace)
latency := time.Since(startTime)
tsp.telemetry.ProcessorTailSamplingSamplingDecisionLatency.Record(ctx, int64(latency/time.Microsecond), p.attribute)
if err != nil {
decisions[sampling.Error] = true
metrics.evaluateErrorCount++
tsp.logger.Debug("Sampling policy error", zap.Error(err))
continue
}
tsp.telemetry.ProcessorTailSamplingCountTracesSampled.Add(ctx, 1, p.attribute, decisionToAttribute[decision])
if telemetry.IsMetricStatCountSpansSampledEnabled() {
tsp.telemetry.ProcessorTailSamplingCountSpansSampled.Add(ctx, trace.SpanCount.Load(), p.attribute, decisionToAttribute[decision])
}
decisions[decision] = true
}
var finalDecision sampling.Decision
switch {
case decisions[sampling.InvertNotSampled]: // InvertNotSampled takes precedence
finalDecision = sampling.NotSampled
case decisions[sampling.Sampled]:
finalDecision = sampling.Sampled
case decisions[sampling.InvertSampled] && !decisions[sampling.NotSampled]:
finalDecision = sampling.Sampled
default:
finalDecision = sampling.NotSampled
}
if finalDecision == sampling.Sampled {
metrics.decisionSampled++
} else {
metrics.decisionNotSampled++
}
return finalDecision
}
// ConsumeTraces is required by the processor.Traces interface.
func (tsp *tailSamplingSpanProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error {
resourceSpans := td.ResourceSpans()
for i := 0; i < resourceSpans.Len(); i++ {
tsp.processTraces(resourceSpans.At(i))
}
return nil
}
func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans ptrace.ResourceSpans) map[pcommon.TraceID][]spanAndScope {
idToSpans := make(map[pcommon.TraceID][]spanAndScope)
ilss := resourceSpans.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
scope := ilss.At(j)
spans := scope.Spans()
is := scope.Scope()
spansLen := spans.Len()
for k := 0; k < spansLen; k++ {
span := spans.At(k)
key := span.TraceID()
idToSpans[key] = append(idToSpans[key], spanAndScope{
span: &span,
instrumentationScope: &is,
})
}
}
return idToSpans
}
func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans ptrace.ResourceSpans) {
currTime := time.Now()
// Group spans per their traceId to minimize contention on idToTrace
idToSpansAndScope := tsp.groupSpansByTraceKey(resourceSpans)
var newTraceIDs int64
for id, spans := range idToSpansAndScope {
// If the trace ID is in the sampled cache, short circuit the decision
if _, ok := tsp.sampledIDCache.Get(id); ok {
tsp.logger.Debug("Trace ID is in the sampled cache", zap.Stringer("id", id))
traceTd := ptrace.NewTraces()
appendToTraces(traceTd, resourceSpans, spans)
tsp.releaseSampledTrace(tsp.ctx, id, traceTd)
metric.WithAttributeSet(attribute.NewSet())
tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision.
Add(tsp.ctx, int64(len(spans)), attrSampledTrue)
continue
}
// If the trace ID is in the non-sampled cache, short circuit the decision
if _, ok := tsp.nonSampledIDCache.Get(id); ok {
tsp.logger.Debug("Trace ID is in the non-sampled cache", zap.Stringer("id", id))
tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision.
Add(tsp.ctx, int64(len(spans)), attrSampledFalse)
continue
}
lenSpans := int64(len(spans))
d, loaded := tsp.idToTrace.Load(id)
if !loaded {
spanCount := &atomic.Int64{}
spanCount.Store(lenSpans)
td := &sampling.TraceData{
ArrivalTime: currTime,
SpanCount: spanCount,
ReceivedBatches: ptrace.NewTraces(),
}
if d, loaded = tsp.idToTrace.LoadOrStore(id, td); !loaded {
newTraceIDs++
tsp.decisionBatcher.AddToCurrentBatch(id)
tsp.numTracesOnMap.Add(1)
postDeletion := false
for !postDeletion {
select {
case tsp.deleteChan <- id:
postDeletion = true
default:
traceKeyToDrop := <-tsp.deleteChan
tsp.dropTrace(traceKeyToDrop, currTime)
}
}
}
}
actualData := d.(*sampling.TraceData)
if loaded {
actualData.SpanCount.Add(lenSpans)
}
actualData.Lock()
finalDecision := actualData.FinalDecision
if finalDecision == sampling.Unspecified {
// If the final decision hasn't been made, add the new spans under the lock.
appendToTraces(actualData.ReceivedBatches, resourceSpans, spans)
actualData.Unlock()
continue
}
actualData.Unlock()
switch finalDecision {
case sampling.Sampled:
traceTd := ptrace.NewTraces()
appendToTraces(traceTd, resourceSpans, spans)
tsp.releaseSampledTrace(tsp.ctx, id, traceTd)
case sampling.NotSampled:
tsp.releaseNotSampledTrace(id)
default:
tsp.logger.Warn("Unexpected sampling decision", zap.Int("decision", int(finalDecision)))
}
if !actualData.DecisionTime.IsZero() {
tsp.telemetry.ProcessorTailSamplingSamplingLateSpanAge.Record(tsp.ctx, int64(time.Since(actualData.DecisionTime)/time.Second))
}
}
tsp.telemetry.ProcessorTailSamplingNewTraceIDReceived.Add(tsp.ctx, newTraceIDs)
}
func (tsp *tailSamplingSpanProcessor) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: false}
}
// Start is invoked during service startup.
func (tsp *tailSamplingSpanProcessor) Start(context.Context, component.Host) error {
tsp.policyTicker.Start(tsp.tickerFrequency)
return nil
}
// Shutdown is invoked during service shutdown.
func (tsp *tailSamplingSpanProcessor) Shutdown(context.Context) error {
tsp.decisionBatcher.Stop()
tsp.policyTicker.Stop()
return nil
}
func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pcommon.TraceID, deletionTime time.Time) {
var trace *sampling.TraceData
if d, ok := tsp.idToTrace.Load(traceID); ok {
trace = d.(*sampling.TraceData)
tsp.idToTrace.Delete(traceID)
// Subtract one from numTracesOnMap per https://godoc.org/sync/atomic#AddUint64
tsp.numTracesOnMap.Add(^uint64(0))
}
if trace == nil {
tsp.logger.Debug("Attempt to delete trace ID not on table", zap.Stringer("id", traceID))
return
}
tsp.telemetry.ProcessorTailSamplingSamplingTraceRemovalAge.Record(tsp.ctx, int64(deletionTime.Sub(trace.ArrivalTime)/time.Second))
}
// releaseSampledTrace sends the trace data to the next consumer. It
// additionally adds the trace ID to the cache of sampled trace IDs. If the
// trace ID is cached, it deletes the spans from the internal map.
func (tsp *tailSamplingSpanProcessor) releaseSampledTrace(ctx context.Context, id pcommon.TraceID, td ptrace.Traces) {
tsp.sampledIDCache.Put(id, true)
if err := tsp.nextConsumer.ConsumeTraces(ctx, td); err != nil {
tsp.logger.Warn(
"Error sending spans to destination",
zap.Error(err))
}
_, ok := tsp.sampledIDCache.Get(id)
if ok {
tsp.dropTrace(id, time.Now())
}
}
// releaseNotSampledTrace adds the trace ID to the cache of not sampled trace
// IDs. If the trace ID is cached, it deletes the spans from the internal map.
func (tsp *tailSamplingSpanProcessor) releaseNotSampledTrace(id pcommon.TraceID) {
tsp.nonSampledIDCache.Put(id, true)
_, ok := tsp.nonSampledIDCache.Get(id)
if ok {
tsp.dropTrace(id, time.Now())
}
}
func appendToTraces(dest ptrace.Traces, rss ptrace.ResourceSpans, spanAndScopes []spanAndScope) {
rs := dest.ResourceSpans().AppendEmpty()
rss.Resource().CopyTo(rs.Resource())
scopePointerToNewScope := make(map[*pcommon.InstrumentationScope]*ptrace.ScopeSpans)
for _, spanAndScope := range spanAndScopes {
// If the scope of the spanAndScope is not in the map, add it to the map and the destination.
if scope, ok := scopePointerToNewScope[spanAndScope.instrumentationScope]; !ok {
is := rs.ScopeSpans().AppendEmpty()
spanAndScope.instrumentationScope.CopyTo(is.Scope())
scopePointerToNewScope[spanAndScope.instrumentationScope] = &is
sp := is.Spans().AppendEmpty()
spanAndScope.span.CopyTo(sp)
} else {
sp := scope.Spans().AppendEmpty()
spanAndScope.span.CopyTo(sp)
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"errors"
"fmt"
"net"
"go.opentelemetry.io/collector/config/configtls"
"go.uber.org/multierr"
)
// Config holds all the parameters to start an HTTP server that can be sent logs from CloudFlare
type Config struct {
Logs LogsConfig `mapstructure:"logs"`
}
type LogsConfig struct {
Secret string `mapstructure:"secret"`
Endpoint string `mapstructure:"endpoint"`
TLS *configtls.ServerConfig `mapstructure:"tls"`
Attributes map[string]string `mapstructure:"attributes"`
TimestampField string `mapstructure:"timestamp_field"`
}
var (
errNoEndpoint = errors.New("an endpoint must be specified")
errNoCert = errors.New("tls was configured, but no cert file was specified")
errNoKey = errors.New("tls was configured, but no key file was specified")
defaultTimestampField = "EdgeStartTimestamp"
)
func (c *Config) Validate() error {
if c.Logs.Endpoint == "" {
return errNoEndpoint
}
var errs error
if c.Logs.TLS != nil {
// Missing key
if c.Logs.TLS.KeyFile == "" {
errs = multierr.Append(errs, errNoKey)
}
// Missing cert
if c.Logs.TLS.CertFile == "" {
errs = multierr.Append(errs, errNoCert)
}
}
_, _, err := net.SplitHostPort(c.Logs.Endpoint)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err))
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata"
)
// NewFactory returns the component factory for the cloudflarereceiver
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithLogs(createLogsReceiver, metadata.LogsStability),
)
}
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
cfg := rConf.(*Config)
return newLogsReceiver(params, cfg, consumer)
}
func createDefaultConfig() component.Config {
return &Config{
Logs: LogsConfig{
TimestampField: defaultTimestampField,
},
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"strconv"
"sync"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata"
)
type logsReceiver struct {
logger *zap.Logger
cfg *LogsConfig
server *http.Server
consumer consumer.Logs
wg *sync.WaitGroup
id component.ID // ID of the receiver component
telemetrySettings component.TelemetrySettings
}
const secretHeaderName = "X-CF-Secret"
func newLogsReceiver(params rcvr.Settings, cfg *Config, consumer consumer.Logs) (*logsReceiver, error) {
recv := &logsReceiver{
cfg: &cfg.Logs,
consumer: consumer,
logger: params.Logger,
wg: &sync.WaitGroup{},
telemetrySettings: params.TelemetrySettings,
id: params.ID,
}
recv.server = &http.Server{
Handler: http.HandlerFunc(recv.handleRequest),
ReadHeaderTimeout: 20 * time.Second,
}
if recv.cfg.TLS != nil {
tlsConfig, err := recv.cfg.TLS.LoadTLSConfig(context.Background())
if err != nil {
return nil, err
}
recv.server.TLSConfig = tlsConfig
}
return recv, nil
}
func (l *logsReceiver) Start(ctx context.Context, host component.Host) error {
return l.startListening(ctx, host)
}
func (l *logsReceiver) Shutdown(ctx context.Context) error {
l.logger.Debug("Shutting down server")
err := l.server.Shutdown(ctx)
if err != nil {
return err
}
l.logger.Debug("Waiting for shutdown to complete.")
l.wg.Wait()
return nil
}
func (l *logsReceiver) startListening(ctx context.Context, host component.Host) error {
l.logger.Debug("starting receiver HTTP server")
// We use l.server.Serve* over l.server.ListenAndServe*
// So that we can catch and return errors relating to binding to network interface on start.
var lc net.ListenConfig
listener, err := lc.Listen(ctx, "tcp", l.cfg.Endpoint)
if err != nil {
return err
}
l.wg.Add(1)
go func() {
defer l.wg.Done()
if l.cfg.TLS != nil {
l.logger.Debug("Starting ServeTLS",
zap.String("address", l.cfg.Endpoint),
zap.String("certfile", l.cfg.TLS.CertFile),
zap.String("keyfile", l.cfg.TLS.KeyFile))
err := l.server.ServeTLS(listener, l.cfg.TLS.CertFile, l.cfg.TLS.KeyFile)
l.logger.Debug("ServeTLS done")
if !errors.Is(err, http.ErrServerClosed) {
l.logger.Error("ServeTLS failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
} else {
l.logger.Debug("Starting Serve",
zap.String("address", l.cfg.Endpoint))
err := l.server.Serve(listener)
l.logger.Debug("Serve done")
if !errors.Is(err, http.ErrServerClosed) {
l.logger.Error("Serve failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}
}()
return nil
}
func (l *logsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) {
if l.cfg.Secret != "" {
secretHeader := req.Header.Get(secretHeaderName)
if secretHeader == "" {
rw.WriteHeader(http.StatusUnauthorized)
l.logger.Debug("Got payload with no Secret when it was specified in config, dropping...")
return
} else if secretHeader != l.cfg.Secret {
rw.WriteHeader(http.StatusUnauthorized)
l.logger.Debug("Got payload with invalid Secret, dropping...")
return
}
}
var payload []byte
if req.Header.Get("Content-Encoding") == "gzip" {
reader, err := gzip.NewReader(req.Body)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err))
return
}
defer reader.Close()
// Read the decompressed response body
payload, err = io.ReadAll(reader)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err))
return
}
} else {
var err error
payload, err = io.ReadAll(req.Body)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
}
if string(payload) == "test" {
l.logger.Info("Received test request from Cloudflare")
rw.WriteHeader(http.StatusOK)
return
}
logs, err := parsePayload(payload)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Error("Failed to convert cloudflare request payload to maps", zap.Error(err))
return
}
if err := l.consumer.ConsumeLogs(req.Context(), l.processLogs(pcommon.NewTimestampFromTime(time.Now()), logs)); err != nil {
errorutil.HTTPError(rw, err)
l.logger.Error("Failed to consumer alert as log", zap.Error(err))
return
}
rw.WriteHeader(http.StatusOK)
}
func parsePayload(payload []byte) ([]map[string]any, error) {
lines := bytes.Split(payload, []byte("\n"))
logs := make([]map[string]any, 0, len(lines))
for _, line := range lines {
if len(line) == 0 {
continue
}
var log map[string]any
err := json.Unmarshal(line, &log)
if err != nil {
return logs, err
}
logs = append(logs, log)
}
return logs, nil
}
func (l *logsReceiver) processLogs(now pcommon.Timestamp, logs []map[string]any) plog.Logs {
pLogs := plog.NewLogs()
// Group logs by ZoneName field if it was configured so it can be used as a resource attribute
groupedLogs := make(map[string][]map[string]any)
for _, log := range logs {
zone := ""
if v, ok := log["ZoneName"]; ok {
if stringV, ok := v.(string); ok {
zone = stringV
}
}
groupedLogs[zone] = append(groupedLogs[zone], log)
}
for zone, logGroup := range groupedLogs {
resourceLogs := pLogs.ResourceLogs().AppendEmpty()
if zone != "" {
resource := resourceLogs.Resource()
resource.Attributes().PutStr("cloudflare.zone", zone)
}
scopeLogs := resourceLogs.ScopeLogs().AppendEmpty()
scopeLogs.Scope().SetName(metadata.ScopeName)
for _, log := range logGroup {
logRecord := scopeLogs.LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
if v, ok := log[l.cfg.TimestampField]; ok {
if stringV, ok := v.(string); ok {
ts, err := time.Parse(time.RFC3339, stringV)
if err != nil {
l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Error(err), zap.String("value", stringV))
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
} else {
l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Any("value", v))
}
}
if v, ok := log["EdgeResponseStatus"]; ok {
sev := plog.SeverityNumberUnspecified
switch v := v.(type) {
case string:
intV, err := strconv.ParseInt(v, 10, 64)
if err != nil {
l.logger.Warn("unable to parse EdgeResponseStatus", zap.Error(err), zap.String("value", v))
} else {
sev = severityFromStatusCode(intV)
}
case int64:
sev = severityFromStatusCode(v)
case float64:
sev = severityFromStatusCode(int64(v))
}
if sev != plog.SeverityNumberUnspecified {
logRecord.SetSeverityNumber(sev)
logRecord.SetSeverityText(sev.String())
}
}
attrs := logRecord.Attributes()
for field, attribute := range l.cfg.Attributes {
if v, ok := log[field]; ok {
switch v := v.(type) {
case string:
attrs.PutStr(attribute, v)
case int:
attrs.PutInt(attribute, int64(v))
case int64:
attrs.PutInt(attribute, v)
case float64:
attrs.PutDouble(attribute, v)
case bool:
attrs.PutBool(attribute, v)
default:
l.logger.Warn("unable to translate field to attribute, unsupported type", zap.String("field", field), zap.Any("value", v), zap.String("type", fmt.Sprintf("%T", v)))
}
}
}
err := logRecord.Body().SetEmptyMap().FromRaw(log)
if err != nil {
l.logger.Warn("unable to set body", zap.Error(err))
}
}
}
return pLogs
}
// severityFromStatusCode translates HTTP status code to OpenTelemetry severity number.
func severityFromStatusCode(statusCode int64) plog.SeverityNumber {
switch {
case statusCode < 300:
return plog.SeverityNumberInfo
case statusCode < 400:
return plog.SeverityNumberInfo2
case statusCode < 500:
return plog.SeverityNumberWarn
case statusCode < 600:
return plog.SeverityNumberError
default:
return plog.SeverityNumberUnspecified
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"compress/flate"
"compress/gzip"
"fmt"
"io"
"math"
"mime"
"net/http"
"github.com/grafana/loki/pkg/push"
)
var (
contentType = http.CanonicalHeaderKey("Content-Type")
contentEnc = http.CanonicalHeaderKey("Content-Encoding")
)
const applicationJSON = "application/json"
func ParseRequest(req *http.Request) (*push.PushRequest, error) {
var body io.Reader
contentEncoding := req.Header.Get(contentEnc)
switch contentEncoding {
case "", "snappy":
body = req.Body
case "gzip":
gzipReader, err := gzip.NewReader(req.Body)
if err != nil {
return nil, err
}
defer gzipReader.Close()
body = gzipReader
case "deflate":
flateReader := flate.NewReader(req.Body)
defer flateReader.Close()
body = flateReader
default:
return nil, fmt.Errorf("Content-Encoding %q not supported", contentEncoding)
}
var pushRequest push.PushRequest
reqContentType := req.Header.Get(contentType)
reqContentType, _ /* params */, err := mime.ParseMediaType(reqContentType)
if err != nil {
return nil, err
}
switch reqContentType {
case applicationJSON:
if err = decodePushRequest(body, &pushRequest); err != nil {
return nil, err
}
default:
// When no content-type header is set or when it is set to
// `application/x-protobuf`: expect snappy compression.
if err := parseProtoReader(body, int(req.ContentLength), math.MaxInt32, &pushRequest); err != nil {
return nil, err
}
return &pushRequest, nil
}
return &pushRequest, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"io"
"sort"
"strconv"
"strings"
"time"
"unsafe"
"github.com/buger/jsonparser"
"github.com/grafana/loki/pkg/push"
jsoniter "github.com/json-iterator/go"
)
// PushRequest models a log stream push but is unmarshalled to proto push format.
type PushRequest struct {
Streams []Stream `json:"streams"`
}
// Stream helps with unmarshalling of each log stream for push request.
type Stream push.Stream
func (s *Stream) UnmarshalJSON(data []byte) error {
err := jsonparser.ObjectEach(data, func(key, val []byte, ty jsonparser.ValueType, _ int) error {
switch string(key) {
case "stream":
var labels LabelSet
if err := labels.UnmarshalJSON(val); err != nil {
return err
}
s.Labels = labels.String()
case "values":
if ty == jsonparser.Null {
return nil
}
entries, err := unmarshalHTTPToLogProtoEntries(val)
if err != nil {
return err
}
s.Entries = entries
}
return nil
})
return err
}
func unmarshalHTTPToLogProtoEntries(data []byte) ([]push.Entry, error) {
var (
entries []push.Entry
parseError error
)
if _, err := jsonparser.ArrayEach(data, func(value []byte, ty jsonparser.ValueType, _ int, err error) {
if err != nil || parseError != nil {
return
}
if ty == jsonparser.Null {
return
}
e, err := unmarshalHTTPToLogProtoEntry(value)
if err != nil {
parseError = err
return
}
entries = append(entries, e)
}); err != nil {
parseError = err
}
if parseError != nil {
return nil, parseError
}
return entries, nil
}
func unmarshalHTTPToLogProtoEntry(data []byte) (push.Entry, error) {
var (
i int
parseError error
e push.Entry
)
_, err := jsonparser.ArrayEach(data, func(value []byte, t jsonparser.ValueType, _ int, _ error) {
// assert that both items in array are of type string
if (i == 0 || i == 1) && t != jsonparser.String {
parseError = jsonparser.MalformedStringError
return
} else if i == 2 && t != jsonparser.Object {
parseError = jsonparser.MalformedObjectError
return
}
switch i {
case 0: // timestamp
ts, err := jsonparser.ParseInt(value)
if err != nil {
parseError = err
return
}
e.Timestamp = time.Unix(0, ts)
case 1: // value
v, err := jsonparser.ParseString(value)
if err != nil {
parseError = err
return
}
e.Line = v
case 2: // structuredMetadata
var structuredMetadata []push.LabelAdapter
err := jsonparser.ObjectEach(value, func(key, val []byte, dataType jsonparser.ValueType, _ int) error {
if dataType != jsonparser.String {
return jsonparser.MalformedStringError
}
structuredMetadata = append(structuredMetadata, push.LabelAdapter{
Name: string(key),
Value: string(val),
})
return nil
})
if err != nil {
parseError = err
return
}
e.StructuredMetadata = structuredMetadata
}
i++
})
if parseError != nil {
return e, parseError
}
return e, err
}
// LabelSet is a key/value pair mapping of labels
type LabelSet map[string]string
func (l *LabelSet) UnmarshalJSON(data []byte) error {
if *l == nil {
*l = make(LabelSet)
}
return jsonparser.ObjectEach(data, func(key, val []byte, _ jsonparser.ValueType, _ int) error {
v, err := jsonparser.ParseString(val)
if err != nil {
return err
}
k, err := jsonparser.ParseString(key)
if err != nil {
return err
}
(*l)[k] = v
return nil
})
}
// String implements the Stringer interface. It returns a formatted/sorted set of label key/value pairs.
func (l LabelSet) String() string {
var b strings.Builder
keys := make([]string, 0, len(l))
for k := range l {
keys = append(keys, k)
}
sort.Strings(keys)
b.WriteByte('{')
for i, k := range keys {
if i > 0 {
b.WriteByte(',')
b.WriteByte(' ')
}
b.WriteString(k)
b.WriteByte('=')
b.WriteString(strconv.Quote(l[k]))
}
b.WriteByte('}')
return b.String()
}
// decodePushRequest directly decodes json to a push.PushRequest
func decodePushRequest(b io.Reader, r *push.PushRequest) error {
var request PushRequest
if err := jsoniter.NewDecoder(b).Decode(&request); err != nil {
return err
}
*r = push.PushRequest{
Streams: *(*[]push.Stream)(unsafe.Pointer(&request.Streams)),
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"bytes"
"fmt"
"io"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
)
const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)"
// parseProtoReader parses a compressed proto from an io.Reader.
func parseProtoReader(reader io.Reader, expectedSize, maxSize int, req proto.Message) error {
body, err := decompressRequest(reader, expectedSize, maxSize)
if err != nil {
return err
}
// We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first,
// which we can't override without upsetting golint.
req.Reset()
if u, ok := req.(proto.Unmarshaler); ok {
err = u.Unmarshal(body)
} else {
err = proto.NewBuffer(body).Unmarshal(req)
}
if err != nil {
return err
}
return nil
}
func decompressRequest(reader io.Reader, expectedSize, maxSize int) (body []byte, err error) {
defer func() {
if err != nil && len(body) > maxSize {
err = fmt.Errorf(messageSizeLargerErrFmt, len(body), maxSize)
}
}()
if expectedSize > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, expectedSize, maxSize)
}
buffer, ok := tryBufferFromReader(reader)
if ok {
body, err = decompressFromBuffer(buffer, maxSize)
return
}
body, err = decompressFromReader(reader, expectedSize, maxSize)
return
}
func decompressFromReader(reader io.Reader, expectedSize, maxSize int) ([]byte, error) {
var (
buf bytes.Buffer
body []byte
err error
)
if expectedSize > 0 {
buf.Grow(expectedSize + bytes.MinRead) // extra space guarantees no reallocation
}
// Read from LimitReader with limit max+1. So if the underlying
// reader is over limit, the result will be bigger than max.
reader = io.LimitReader(reader, int64(maxSize)+1)
_, err = buf.ReadFrom(reader)
if err != nil {
return nil, err
}
body, err = decompressFromBuffer(&buf, maxSize)
return body, err
}
func decompressFromBuffer(buffer *bytes.Buffer, maxSize int) ([]byte, error) {
if len(buffer.Bytes()) > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, len(buffer.Bytes()), maxSize)
}
size, err := snappy.DecodedLen(buffer.Bytes())
if err != nil {
return nil, err
}
if size > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, size, maxSize)
}
body, err := snappy.Decode(nil, buffer.Bytes())
if err != nil {
return nil, err
}
return body, nil
}
// tryBufferFromReader attempts to cast the reader to a `*bytes.Buffer` this is possible when using httpgrpc.
// If it fails it will return nil and false.
func tryBufferFromReader(reader io.Reader) (*bytes.Buffer, bool) {
if bufReader, ok := reader.(interface {
BytesBuffer() *bytes.Buffer
}); ok && bufReader != nil {
return bufReader.BytesBuffer(), true
}
return nil, false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
)
const (
accessLogStorageKey = "last_endtime_access_logs_%s"
defaultAccessLogsPollInterval = 5 * time.Minute
defaultAccessLogsPageSize = 20000
defaultAccessLogsMaxPages = 10
)
type accessLogStorageRecord struct {
ClusterName string `json:"cluster_name"`
NextPollStartTime time.Time `json:"next_poll_start_time"`
}
type accessLogClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetClusters(ctx context.Context, groupID string) ([]mongodbatlas.Cluster, error)
GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *internal.GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error)
}
type accessLogsReceiver struct {
client accessLogClient
logger *zap.Logger
storageClient storage.Client
cfg *Config
consumer consumer.Logs
record map[string][]*accessLogStorageRecord
authResult *bool
wg *sync.WaitGroup
cancel context.CancelFunc
}
func newAccessLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *accessLogsReceiver {
r := &accessLogsReceiver{
cancel: func() {},
client: internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger),
cfg: cfg,
logger: settings.Logger,
consumer: consumer,
wg: &sync.WaitGroup{},
storageClient: storage.NewNopClient(),
record: make(map[string][]*accessLogStorageRecord),
}
for _, p := range cfg.Logs.Projects {
p.populateIncludesAndExcludes()
if p.AccessLogs != nil && p.AccessLogs.IsEnabled() {
if p.AccessLogs.PageSize <= 0 {
p.AccessLogs.PageSize = defaultAccessLogsPageSize
}
if p.AccessLogs.MaxPages <= 0 {
p.AccessLogs.MaxPages = defaultAccessLogsMaxPages
}
if p.AccessLogs.PollInterval == 0 {
p.AccessLogs.PollInterval = defaultAccessLogsPollInterval
}
}
}
return r
}
func (alr *accessLogsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error {
alr.logger.Debug("Starting up access log receiver")
cancelCtx, cancel := context.WithCancel(ctx)
alr.cancel = cancel
alr.storageClient = storageClient
return alr.startPolling(cancelCtx)
}
func (alr *accessLogsReceiver) Shutdown(_ context.Context) error {
alr.logger.Debug("Shutting down accessLog receiver")
alr.cancel()
alr.wg.Wait()
return nil
}
func (alr *accessLogsReceiver) startPolling(ctx context.Context) error {
for _, pc := range alr.cfg.Logs.Projects {
if pc.AccessLogs == nil || !pc.AccessLogs.IsEnabled() {
continue
}
t := time.NewTicker(pc.AccessLogs.PollInterval)
alr.wg.Add(1)
go func() {
defer alr.wg.Done()
for {
select {
case <-t.C:
if err := alr.pollAccessLogs(ctx, pc); err != nil {
alr.logger.Error("error while polling for accessLog", zap.Error(err))
}
case <-ctx.Done():
return
}
}
}()
}
return nil
}
func (alr *accessLogsReceiver) pollAccessLogs(ctx context.Context, pc *LogsProjectConfig) error {
st := pcommon.NewTimestampFromTime(time.Now().Add(-1 * pc.AccessLogs.PollInterval)).AsTime()
et := time.Now()
project, err := alr.client.GetProject(ctx, pc.Name)
if err != nil {
alr.logger.Error("error retrieving project information", zap.Error(err), zap.String("project", pc.Name))
return err
}
alr.loadCheckpoint(ctx, project.ID)
clusters, err := alr.client.GetClusters(ctx, project.ID)
if err != nil {
alr.logger.Error("error retrieving cluster information", zap.Error(err), zap.String("project", pc.Name))
return err
}
filteredClusters, err := filterClusters(clusters, pc.ProjectConfig)
if err != nil {
alr.logger.Error("error filtering clusters", zap.Error(err), zap.String("project", pc.Name))
return err
}
for _, cluster := range filteredClusters {
clusterCheckpoint := alr.getClusterCheckpoint(project.ID, cluster.Name)
if clusterCheckpoint == nil {
clusterCheckpoint = &accessLogStorageRecord{
ClusterName: cluster.Name,
NextPollStartTime: st,
}
alr.setClusterCheckpoint(project.ID, clusterCheckpoint)
}
clusterCheckpoint.NextPollStartTime = alr.pollCluster(ctx, pc, project, cluster, clusterCheckpoint.NextPollStartTime, et)
if err = alr.checkpoint(ctx, project.ID); err != nil {
alr.logger.Warn("error checkpointing", zap.Error(err), zap.String("project", pc.Name))
}
}
return nil
}
func (alr *accessLogsReceiver) pollCluster(ctx context.Context, pc *LogsProjectConfig, project *mongodbatlas.Project, cluster mongodbatlas.Cluster, startTime, now time.Time) time.Time {
nowTimestamp := pcommon.NewTimestampFromTime(now)
opts := &internal.GetAccessLogsOptions{
MaxDate: now,
MinDate: startTime,
AuthResult: alr.authResult,
NLogs: int(pc.AccessLogs.PageSize),
}
pageCount := 0
// Assume failure, in which case we poll starting with the same startTime
// unless we successfully make request(s) for access logs and they are successfully sent to the consumer
nextPollStartTime := startTime
for {
accessLogs, err := alr.client.GetAccessLogs(ctx, project.ID, cluster.Name, opts)
pageCount++
if err != nil {
alr.logger.Error("unable to get access logs", zap.Error(err), zap.String("project", project.Name),
zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
// No logs retrieved, try again on next interval with the same start time as the API may not have
// all logs for the given time available to be queried yet (undocumented behavior)
if len(accessLogs) == 0 {
return nextPollStartTime
}
logs := transformAccessLogs(nowTimestamp, accessLogs, project, cluster, alr.logger)
if err = alr.consumer.ConsumeLogs(ctx, logs); err != nil {
alr.logger.Error("error consuming project cluster log", zap.Error(err), zap.String("project", project.Name),
zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
// The first page of results will have the latest data, so we want to update the nextPollStartTime
// There is risk of data loss at this point if we are unable to then process the remaining pages
// of data, but that is a limitation of the API that we can't work around.
if pageCount == 1 {
// This slice access is safe as we have previously confirmed that the slice is not empty
mostRecentLogTimestamp, tsErr := getTimestamp(accessLogs[0])
if tsErr != nil {
alr.logger.Error("error getting latest log timestamp for calculating next poll timestamps", zap.Error(tsErr),
zap.String("project", project.Name), zap.String("clusterName", cluster.Name))
// If we are not able to get the latest log timestamp, we have to assume that we are collecting all
// data and don't want to risk duplicated data by re-polling the same data again.
nextPollStartTime = now
} else {
nextPollStartTime = mostRecentLogTimestamp.Add(100 * time.Millisecond)
}
}
// If we get back less than the maximum number of logs, we can assume that we've retrieved all of the logs
// that are currently available for this time period, though some logs may not be available in the API yet.
if len(accessLogs) < int(pc.AccessLogs.PageSize) {
return nextPollStartTime
}
if pageCount >= int(pc.AccessLogs.MaxPages) {
alr.logger.Warn(`reached maximum number of pages of access logs, increase 'max_pages' or
frequency of 'poll_interval' to ensure all access logs are retrieved`, zap.Int("maxPages", int(pc.AccessLogs.MaxPages)))
return nextPollStartTime
}
// If we get back the maximum number of logs, we need to re-query with a new end time. While undocumented, the API
// returns the most recent logs first. If we get the maximum number of logs back, we can assume that
// there are more logs to be retrieved. We'll re-query with the same start time, but the end
// time set to just before the timestamp of the oldest log entry returned.
oldestLogTimestampFromPage, err := getTimestamp(accessLogs[len(accessLogs)-1])
if err != nil {
alr.logger.Error("error getting oldest log timestamp for calculating next request timestamps", zap.Error(err),
zap.String("project", project.Name), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
opts.MaxDate = oldestLogTimestampFromPage.Add(-1 * time.Millisecond)
// If the new max date is before the min date, we've retrieved all of the logs for this time period
// and receiving the maximum number of logs back is a coincidence.
if opts.MaxDate.Before(opts.MinDate) {
break
}
}
return now
}
func getTimestamp(log *mongodbatlas.AccessLogs) (time.Time, error) {
body, err := parseLogMessage(log)
if err != nil {
// If body couldn't be parsed, we'll still use the outer Timestamp field to determine the new max date.
body = map[string]any{}
}
return getTimestampPreparsedBody(log, body)
}
func getTimestampPreparsedBody(log *mongodbatlas.AccessLogs, body map[string]any) (time.Time, error) {
// If the log message has a timestamp, use that. When present, it has more precision than the timestamp from the access log entry.
if tMap, ok := body["t"]; ok {
if dateMap, ok := tMap.(map[string]any); ok {
if v, ok := dateMap["$date"]; ok {
if dateStr, ok := v.(string); ok {
return time.Parse(time.RFC3339, dateStr)
}
}
}
}
// If the log message doesn't have a timestamp, use the timestamp from the outer access log entry.
t, err := time.Parse(time.RFC3339, log.Timestamp)
if err != nil {
// The documentation claims ISO8601/RFC3339, but the API has been observed returning timestamps in UnixDate format
// UnixDate looks like Wed Apr 26 02:38:56 GMT 2023
unixDate, err2 := time.Parse(time.UnixDate, log.Timestamp)
if err2 != nil {
// Return the original error as the documentation claims ISO8601
return time.Time{}, err
}
return unixDate, nil
}
return t, nil
}
func parseLogMessage(log *mongodbatlas.AccessLogs) (map[string]any, error) {
var body map[string]any
if err := json.Unmarshal([]byte(log.LogLine), &body); err != nil {
return nil, err
}
return body, nil
}
func transformAccessLogs(now pcommon.Timestamp, accessLogs []*mongodbatlas.AccessLogs, p *mongodbatlas.Project, c mongodbatlas.Cluster, logger *zap.Logger) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.project.name", p.Name)
ra.PutStr("mongodbatlas.project.id", p.ID)
ra.PutStr("mongodbatlas.region.name", c.ProviderSettings.RegionName)
ra.PutStr("mongodbatlas.provider.name", c.ProviderSettings.ProviderName)
ra.PutStr("mongodbatlas.org.id", p.OrgID)
ra.PutStr("mongodbatlas.cluster.name", c.Name)
// Expected format documented https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Access-Tracking/operation/listAccessLogsByClusterName
logRecords := resourceLogs.ScopeLogs().AppendEmpty().LogRecords()
for _, accessLog := range accessLogs {
logRecord := logRecords.AppendEmpty()
logBody, err := parseLogMessage(accessLog)
if err != nil {
logger.Error("unable to unmarshal access log into body string", zap.Error(err))
continue
}
err = logRecord.Body().SetEmptyMap().FromRaw(logBody)
if err != nil {
logger.Error("unable to set log record body as map", zap.Error(err))
logRecord.Body().SetStr(accessLog.LogLine)
}
ts, err := getTimestampPreparsedBody(accessLog, logBody)
if err != nil {
logger.Warn("unable to interpret when an access log event was recorded, timestamp not parsed", zap.Error(err), zap.String("timestamp", accessLog.Timestamp))
logRecord.SetTimestamp(now)
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
logRecord.SetObservedTimestamp(now)
attrs := logRecord.Attributes()
attrs.PutStr("event.domain", "mongodbatlas")
logRecord.SetSeverityNumber(plog.SeverityNumberInfo)
logRecord.SetSeverityText(plog.SeverityNumberInfo.String())
if accessLog.AuthResult != nil {
status := "success"
if !*accessLog.AuthResult {
logRecord.SetSeverityNumber(plog.SeverityNumberWarn)
logRecord.SetSeverityText(plog.SeverityNumberWarn.String())
status = "failure"
}
attrs.PutStr("auth.result", status)
}
if accessLog.FailureReason != "" {
attrs.PutStr("auth.failure_reason", accessLog.FailureReason)
}
attrs.PutStr("auth.source", accessLog.AuthSource)
attrs.PutStr("username", accessLog.Username)
attrs.PutStr("hostname", accessLog.Hostname)
attrs.PutStr("remote.ip", accessLog.IPAddress)
}
return logs
}
func accessLogsCheckpointKey(groupID string) string {
return fmt.Sprintf(accessLogStorageKey, groupID)
}
func (alr *accessLogsReceiver) checkpoint(ctx context.Context, groupID string) error {
marshalBytes, err := json.Marshal(alr.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return alr.storageClient.Set(ctx, accessLogsCheckpointKey(groupID), marshalBytes)
}
func (alr *accessLogsReceiver) loadCheckpoint(ctx context.Context, groupID string) {
cBytes, err := alr.storageClient.Get(ctx, accessLogsCheckpointKey(groupID))
if err != nil {
alr.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err))
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
return
}
if cBytes == nil {
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
return
}
var record []*accessLogStorageRecord
if err = json.Unmarshal(cBytes, &record); err != nil {
alr.logger.Error("unable to decode stored record for access logs, continuing without a checkpoint", zap.Error(err))
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
}
}
func (alr *accessLogsReceiver) getClusterCheckpoint(groupID, clusterName string) *accessLogStorageRecord {
for key, value := range alr.record {
if key == groupID {
for _, v := range value {
if v.ClusterName == clusterName {
return v
}
}
}
}
return nil
}
func (alr *accessLogsReceiver) setClusterCheckpoint(groupID string, clusterCheckpoint *accessLogStorageRecord) {
groupCheckpoints, ok := alr.record[groupID]
if !ok {
alr.record[groupID] = []*accessLogStorageRecord{clusterCheckpoint}
}
var found bool
for idx, v := range groupCheckpoints {
if v.ClusterName == clusterCheckpoint.ClusterName {
found = true
alr.record[groupID][idx] = clusterCheckpoint
}
}
if !found {
alr.record[groupID] = append(alr.record[groupID], clusterCheckpoint)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha1" // #nosec G505 -- SHA1 is the algorithm mongodbatlas uses, it must be used to calculate the HMAC signature
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"strconv"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/multierr"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
// maxContentLength is the maximum payload size we will accept from incoming requests.
// Requests are generally ~1000 bytes, so we overshoot that by an order of magnitude.
// This is to protect from overly large requests.
const (
maxContentLength int64 = 16384
signatureHeaderName string = "X-MMS-Signature"
alertModeListen = "listen"
alertModePoll = "poll"
alertCacheKey = "last_recorded_alert"
defaultAlertsPollInterval = 5 * time.Minute
// defaults were based off API docs https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/
defaultAlertsPageSize = 100
defaultAlertsMaxPages = 10
)
type alertsClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetAlerts(ctx context.Context, groupID string, opts *internal.AlertPollOptions) ([]mongodbatlas.Alert, bool, error)
}
type alertsReceiver struct {
addr string
secret string
server *http.Server
mode string
tlsSettings *configtls.ServerConfig
consumer consumer.Logs
wg *sync.WaitGroup
// only relevant in `poll` mode
projects []*ProjectConfig
client alertsClient
privateKey string
publicKey string
backoffConfig configretry.BackOffConfig
pollInterval time.Duration
record *alertRecord
pageSize int64
maxPages int64
doneChan chan bool
storageClient storage.Client
telemetrySettings component.TelemetrySettings
}
func newAlertsReceiver(params rcvr.Settings, baseConfig *Config, consumer consumer.Logs) (*alertsReceiver, error) {
cfg := baseConfig.Alerts
var tlsConfig *tls.Config
if cfg.TLS != nil {
var err error
tlsConfig, err = cfg.TLS.LoadTLSConfig(context.Background())
if err != nil {
return nil, err
}
}
for _, p := range cfg.Projects {
p.populateIncludesAndExcludes()
}
recv := &alertsReceiver{
addr: cfg.Endpoint,
secret: string(cfg.Secret),
tlsSettings: cfg.TLS,
consumer: consumer,
mode: cfg.Mode,
projects: cfg.Projects,
backoffConfig: baseConfig.BackOffConfig,
publicKey: baseConfig.PublicKey,
privateKey: string(baseConfig.PrivateKey),
wg: &sync.WaitGroup{},
pollInterval: baseConfig.Alerts.PollInterval,
maxPages: baseConfig.Alerts.MaxPages,
pageSize: baseConfig.Alerts.PageSize,
doneChan: make(chan bool, 1),
telemetrySettings: params.TelemetrySettings,
}
if recv.mode == alertModePoll {
recv.client = internal.NewMongoDBAtlasClient(recv.publicKey, recv.privateKey, recv.backoffConfig, recv.telemetrySettings.Logger)
return recv, nil
}
s := &http.Server{
TLSConfig: tlsConfig,
Handler: http.HandlerFunc(recv.handleRequest),
ReadHeaderTimeout: 20 * time.Second,
}
recv.server = s
return recv, nil
}
func (a *alertsReceiver) Start(ctx context.Context, host component.Host, storageClient storage.Client) error {
if a.mode == alertModePoll {
return a.startPolling(ctx, storageClient)
}
return a.startListening(ctx, host)
}
func (a *alertsReceiver) startPolling(ctx context.Context, storageClient storage.Client) error {
a.telemetrySettings.Logger.Debug("starting alerts receiver in retrieval mode")
a.storageClient = storageClient
err := a.syncPersistence(ctx)
if err != nil {
a.telemetrySettings.Logger.Error("there was an error syncing the receiver with checkpoint", zap.Error(err))
}
t := time.NewTicker(a.pollInterval)
a.wg.Add(1)
go func() {
defer a.wg.Done()
for {
select {
case <-t.C:
if err := a.retrieveAndProcessAlerts(ctx); err != nil {
a.telemetrySettings.Logger.Error("unable to retrieve alerts", zap.Error(err))
}
case <-a.doneChan:
return
case <-ctx.Done():
return
}
}
}()
return nil
}
func (a *alertsReceiver) retrieveAndProcessAlerts(ctx context.Context) error {
for _, p := range a.projects {
project, err := a.client.GetProject(ctx, p.Name)
if err != nil {
a.telemetrySettings.Logger.Error("error retrieving project "+p.Name+":", zap.Error(err))
continue
}
a.pollAndProcess(ctx, p, project)
}
return a.writeCheckpoint(ctx)
}
func (a *alertsReceiver) pollAndProcess(ctx context.Context, pc *ProjectConfig, project *mongodbatlas.Project) {
for pageNum := 1; pageNum <= int(a.maxPages); pageNum++ {
projectAlerts, hasNext, err := a.client.GetAlerts(ctx, project.ID, &internal.AlertPollOptions{
PageNum: pageNum,
PageSize: int(a.pageSize),
})
if err != nil {
a.telemetrySettings.Logger.Error("unable to get alerts for project", zap.Error(err))
break
}
filteredAlerts := a.applyFilters(pc, projectAlerts)
now := pcommon.NewTimestampFromTime(time.Now())
logs, err := a.convertAlerts(now, filteredAlerts, project)
if err != nil {
a.telemetrySettings.Logger.Error("error processing alerts", zap.Error(err))
break
}
if logs.LogRecordCount() > 0 {
if err = a.consumer.ConsumeLogs(ctx, logs); err != nil {
a.telemetrySettings.Logger.Error("error consuming alerts", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (a *alertsReceiver) startListening(ctx context.Context, host component.Host) error {
a.telemetrySettings.Logger.Debug("starting alerts receiver in listening mode")
// We use a.server.Serve* over a.server.ListenAndServe*
// So that we can catch and return errors relating to binding to network interface on start.
var lc net.ListenConfig
l, err := lc.Listen(ctx, "tcp", a.addr)
if err != nil {
return err
}
a.wg.Add(1)
if a.tlsSettings != nil {
go func() {
defer a.wg.Done()
a.telemetrySettings.Logger.Debug("Starting ServeTLS",
zap.String("address", a.addr),
zap.String("certfile", a.tlsSettings.CertFile),
zap.String("keyfile", a.tlsSettings.KeyFile))
err := a.server.ServeTLS(l, a.tlsSettings.CertFile, a.tlsSettings.KeyFile)
a.telemetrySettings.Logger.Debug("Serve TLS done")
if err != http.ErrServerClosed {
a.telemetrySettings.Logger.Error("ServeTLS failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}()
} else {
go func() {
defer a.wg.Done()
a.telemetrySettings.Logger.Debug("Starting Serve", zap.String("address", a.addr))
err := a.server.Serve(l)
a.telemetrySettings.Logger.Debug("Serve done")
if err != http.ErrServerClosed {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}()
}
return nil
}
func (a *alertsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) {
if req.ContentLength < 0 {
rw.WriteHeader(http.StatusLengthRequired)
a.telemetrySettings.Logger.Debug("Got request with no Content-Length specified", zap.String("remote", req.RemoteAddr))
return
}
if req.ContentLength > maxContentLength {
rw.WriteHeader(http.StatusRequestEntityTooLarge)
a.telemetrySettings.Logger.Debug("Got request with large Content-Length specified",
zap.String("remote", req.RemoteAddr),
zap.Int64("content-length", req.ContentLength),
zap.Int64("max-content-length", maxContentLength))
return
}
payloadSigHeader := req.Header.Get(signatureHeaderName)
if payloadSigHeader == "" {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Got payload with no HMAC signature, dropping...")
return
}
payload := make([]byte, req.ContentLength)
_, err := io.ReadFull(req.Body, payload)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
if err = verifyHMACSignature(a.secret, payload, payloadSigHeader); err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Got payload with invalid HMAC signature, dropping...", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
logs, err := payloadToLogs(time.Now(), payload)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Error("Failed to convert log payload to log record", zap.Error(err))
return
}
if err := a.consumer.ConsumeLogs(req.Context(), logs); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
a.telemetrySettings.Logger.Error("Failed to consumer alert as log", zap.Error(err))
return
}
rw.WriteHeader(http.StatusOK)
}
func (a *alertsReceiver) Shutdown(ctx context.Context) error {
if a.mode == alertModePoll {
return a.shutdownPoller(ctx)
}
return a.shutdownListener(ctx)
}
func (a *alertsReceiver) shutdownListener(ctx context.Context) error {
a.telemetrySettings.Logger.Debug("Shutting down server")
err := a.server.Shutdown(ctx)
if err != nil {
return err
}
a.telemetrySettings.Logger.Debug("Waiting for shutdown to complete.")
a.wg.Wait()
return nil
}
func (a *alertsReceiver) shutdownPoller(ctx context.Context) error {
a.telemetrySettings.Logger.Debug("Shutting down client")
close(a.doneChan)
a.wg.Wait()
return a.writeCheckpoint(ctx)
}
func (a *alertsReceiver) convertAlerts(now pcommon.Timestamp, alerts []mongodbatlas.Alert, project *mongodbatlas.Project) (plog.Logs, error) {
logs := plog.NewLogs()
var errs error
for i := range alerts {
alert := alerts[i]
resourceLogs := logs.ResourceLogs().AppendEmpty()
resourceAttrs := resourceLogs.Resource().Attributes()
resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID)
resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID)
resourceAttrs.PutStr("mongodbatlas.org.id", project.OrgID)
resourceAttrs.PutStr("mongodbatlas.project.name", project.Name)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", &alert.ClusterName)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", &alert.ReplicaSetName)
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts, err := time.Parse(time.RFC3339, alert.Updated)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated))
continue
}
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.SetSeverityNumber(severityFromAPIAlert(alert.Status))
logRecord.SetSeverityText(alert.Status)
// this could be fairly expensive to do, expecting not too many issues unless there are a ton
// of unrecognized alerts to process.
bodyBytes, err := json.Marshal(alert)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to marshal alert into a body string")
continue
}
logRecord.Body().SetStr(string(bodyBytes))
attrs := logRecord.Attributes()
// These attributes are always present
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("event.name", alert.EventTypeName)
attrs.PutStr("status", alert.Status)
attrs.PutStr("created", alert.Created)
attrs.PutStr("updated", alert.Updated)
attrs.PutStr("id", alert.ID)
// These attributes are optional and may not be present, depending on the alert type.
putStringToMapNotNil(attrs, "metric.name", &alert.MetricName)
putStringToMapNotNil(attrs, "type_name", &alert.EventTypeName)
putStringToMapNotNil(attrs, "last_notified", &alert.LastNotified)
putStringToMapNotNil(attrs, "resolved", &alert.Resolved)
putStringToMapNotNil(attrs, "acknowledgement.comment", &alert.AcknowledgementComment)
putStringToMapNotNil(attrs, "acknowledgement.username", &alert.AcknowledgingUsername)
putStringToMapNotNil(attrs, "acknowledgement.until", &alert.AcknowledgedUntil)
if alert.CurrentValue != nil {
attrs.PutDouble("metric.value", *alert.CurrentValue.Number)
attrs.PutStr("metric.units", alert.CurrentValue.Units)
}
// Only present for HOST, HOST_METRIC, and REPLICA_SET alerts
if alert.HostnameAndPort == "" {
continue
}
host, portStr, err := net.SplitHostPort(alert.HostnameAndPort)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split host:port %s: %w", alert.HostnameAndPort, err))
continue
}
port, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to parse port %s: %w", portStr, err))
continue
}
attrs.PutStr("net.peer.name", host)
attrs.PutInt("net.peer.port", port)
}
return logs, errs
}
func verifyHMACSignature(secret string, payload []byte, signatureHeader string) error {
b64Decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(signatureHeader))
payloadSig, err := io.ReadAll(b64Decoder)
if err != nil {
return err
}
h := hmac.New(sha1.New, []byte(secret))
h.Write(payload)
calculatedSig := h.Sum(nil)
if !hmac.Equal(calculatedSig, payloadSig) {
return errors.New("calculated signature does not equal header signature")
}
return nil
}
func payloadToLogs(now time.Time, payload []byte) (plog.Logs, error) {
var alert model.Alert
err := json.Unmarshal(payload, &alert)
if err != nil {
return plog.Logs{}, err
}
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(now))
logRecord.SetTimestamp(timestampFromAlert(alert))
logRecord.SetSeverityNumber(severityFromAlert(alert))
logRecord.Body().SetStr(string(payload))
resourceAttrs := resourceLogs.Resource().Attributes()
resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID)
resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", alert.ClusterName)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", alert.ReplicaSetName)
attrs := logRecord.Attributes()
// These attributes are always present
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("event.name", alert.EventType)
attrs.PutStr("message", alert.HumanReadable)
attrs.PutStr("status", alert.Status)
attrs.PutStr("created", alert.Created)
attrs.PutStr("updated", alert.Updated)
attrs.PutStr("id", alert.ID)
// These attributes are optional and may not be present, depending on the alert type.
putStringToMapNotNil(attrs, "metric.name", alert.MetricName)
putStringToMapNotNil(attrs, "type_name", alert.TypeName)
putStringToMapNotNil(attrs, "user_alias", alert.UserAlias)
putStringToMapNotNil(attrs, "last_notified", alert.LastNotified)
putStringToMapNotNil(attrs, "resolved", alert.Resolved)
putStringToMapNotNil(attrs, "acknowledgement.comment", alert.AcknowledgementComment)
putStringToMapNotNil(attrs, "acknowledgement.username", alert.AcknowledgementUsername)
putStringToMapNotNil(attrs, "acknowledgement.until", alert.AcknowledgedUntil)
if alert.CurrentValue != nil {
attrs.PutDouble("metric.value", alert.CurrentValue.Number)
attrs.PutStr("metric.units", alert.CurrentValue.Units)
}
if alert.HostNameAndPort != nil {
host, portStr, err := net.SplitHostPort(*alert.HostNameAndPort)
if err != nil {
return plog.Logs{}, fmt.Errorf("failed to split host:port %s: %w", *alert.HostNameAndPort, err)
}
port, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
return plog.Logs{}, fmt.Errorf("failed to parse port %s: %w", portStr, err)
}
attrs.PutStr("net.peer.name", host)
attrs.PutInt("net.peer.port", port)
}
return logs, nil
}
// alertRecord wraps a sync Map so it is goroutine safe as well as
// can have custom marshaling
type alertRecord struct {
sync.Mutex
LastRecordedTime *time.Time `mapstructure:"last_recorded"`
}
func (a *alertRecord) SetLastRecorded(lastUpdated *time.Time) {
a.Lock()
a.LastRecordedTime = lastUpdated
a.Unlock()
}
func (a *alertsReceiver) syncPersistence(ctx context.Context) error {
if a.storageClient == nil {
return nil
}
cBytes, err := a.storageClient.Get(ctx, alertCacheKey)
if err != nil || cBytes == nil {
a.record = &alertRecord{}
return nil
}
var cache alertRecord
if err = json.Unmarshal(cBytes, &cache); err != nil {
return fmt.Errorf("unable to decode stored cache: %w", err)
}
a.record = &cache
return nil
}
func (a *alertsReceiver) writeCheckpoint(ctx context.Context) error {
if a.storageClient == nil {
a.telemetrySettings.Logger.Error("unable to write checkpoint since no storage client was found")
return errors.New("missing non-nil storage client")
}
marshalBytes, err := json.Marshal(&a.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return a.storageClient.Set(ctx, alertCacheKey, marshalBytes)
}
func (a *alertsReceiver) applyFilters(pConf *ProjectConfig, alerts []mongodbatlas.Alert) []mongodbatlas.Alert {
filtered := []mongodbatlas.Alert{}
lastRecordedTime := pcommon.Timestamp(0).AsTime()
if a.record.LastRecordedTime != nil {
lastRecordedTime = *a.record.LastRecordedTime
}
// we need to maintain two timestamps in order to not conflict while iterating
latestInPayload := pcommon.Timestamp(0).AsTime()
for _, alert := range alerts {
updatedTime, err := time.Parse(time.RFC3339, alert.Updated)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated))
continue
}
if updatedTime.Before(lastRecordedTime) || updatedTime.Equal(lastRecordedTime) {
// already processed if the updated time was before or equal to the last recorded
continue
}
if len(pConf.excludesByClusterName) > 0 {
if _, ok := pConf.excludesByClusterName[alert.ClusterName]; ok {
continue
}
}
if len(pConf.IncludeClusters) > 0 {
if _, ok := pConf.includesByClusterName[alert.ClusterName]; !ok {
continue
}
}
filtered = append(filtered, alert)
if updatedTime.After(latestInPayload) {
latestInPayload = updatedTime
}
}
if latestInPayload.After(lastRecordedTime) {
a.record.SetLastRecorded(&latestInPayload)
}
return filtered
}
func timestampFromAlert(a model.Alert) pcommon.Timestamp {
if time, err := time.Parse(time.RFC3339, a.Updated); err == nil {
return pcommon.NewTimestampFromTime(time)
}
return pcommon.Timestamp(0)
}
// severityFromAlert maps the alert to a severity number.
// Currently, it just maps "OPEN" alerts to WARN, and everything else to INFO.
func severityFromAlert(a model.Alert) plog.SeverityNumber {
// Status is defined here: https://www.mongodb.com/docs/atlas/reference/api/alerts-get-alert/#response-elements
// It may also be "INFORMATIONAL" for single-fire alerts (events)
switch a.Status {
case "OPEN":
return plog.SeverityNumberWarn
default:
return plog.SeverityNumberInfo
}
}
// severityFromAPIAlert is a workaround for shared types between the API and the model
func severityFromAPIAlert(a string) plog.SeverityNumber {
switch a {
case "OPEN":
return plog.SeverityNumberWarn
default:
return plog.SeverityNumberInfo
}
}
func putStringToMapNotNil(m pcommon.Map, k string, v *string) {
if v != nil {
m.PutStr(k, *v)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter"
)
// combinedLogsReceiver wraps alerts and log receivers in a single log receiver to be consumed by the factory
type combinedLogsReceiver struct {
alerts *alertsReceiver
logs *logsReceiver
events *eventsReceiver
accessLogs *accessLogsReceiver
storageID *component.ID
id component.ID
}
// Starts up the combined MongoDB Atlas Logs and Alert Receiver
func (c *combinedLogsReceiver) Start(ctx context.Context, host component.Host) error {
var errs error
storageClient, err := adapter.GetStorageClient(ctx, host, c.storageID, c.id)
if err != nil {
return fmt.Errorf("failed to get storage client: %w", err)
}
if c.alerts != nil {
if err := c.alerts.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.logs != nil {
if err := c.logs.Start(ctx, host); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.events != nil {
if err := c.events.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.accessLogs != nil {
if err := c.accessLogs.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
return errs
}
// Shuts down the combined MongoDB Atlas Logs and Alert Receiver
func (c *combinedLogsReceiver) Shutdown(ctx context.Context) error {
var errs error
if c.alerts != nil {
if err := c.alerts.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.logs != nil {
if err := c.logs.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.events != nil {
if err := c.events.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.accessLogs != nil {
if err := c.accessLogs.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"errors"
"fmt"
"net"
"strings"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configopaque"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/scraper/scraperhelper"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
var _ component.Config = (*Config)(nil)
type Config struct {
scraperhelper.ControllerConfig `mapstructure:",squash"`
PublicKey string `mapstructure:"public_key"`
PrivateKey configopaque.String `mapstructure:"private_key"`
Granularity string `mapstructure:"granularity"`
MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"`
Projects []*ProjectConfig `mapstructure:"projects"`
Alerts AlertConfig `mapstructure:"alerts"`
Events *EventsConfig `mapstructure:"events"`
Logs LogConfig `mapstructure:"logs"`
BackOffConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"`
StorageID *component.ID `mapstructure:"storage"`
}
type AlertConfig struct {
Enabled bool `mapstructure:"enabled"`
Endpoint string `mapstructure:"endpoint"`
Secret configopaque.String `mapstructure:"secret"`
TLS *configtls.ServerConfig `mapstructure:"tls"`
Mode string `mapstructure:"mode"`
// these parameters are only relevant in retrieval mode
Projects []*ProjectConfig `mapstructure:"projects"`
PollInterval time.Duration `mapstructure:"poll_interval"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
}
type LogConfig struct {
Enabled bool `mapstructure:"enabled"`
Projects []*LogsProjectConfig `mapstructure:"projects"`
}
// EventsConfig is the configuration options for events collection
type EventsConfig struct {
Projects []*ProjectConfig `mapstructure:"projects"`
Organizations []*OrgConfig `mapstructure:"organizations"`
PollInterval time.Duration `mapstructure:"poll_interval"`
Types []string `mapstructure:"types"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
}
type LogsProjectConfig struct {
ProjectConfig `mapstructure:",squash"`
EnableAuditLogs bool `mapstructure:"collect_audit_logs"`
EnableHostLogs *bool `mapstructure:"collect_host_logs"`
AccessLogs *AccessLogsConfig `mapstructure:"access_logs"`
}
type AccessLogsConfig struct {
Enabled *bool `mapstructure:"enabled"`
PollInterval time.Duration `mapstructure:"poll_interval"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
AuthResult *bool `mapstructure:"auth_result"`
}
func (alc *AccessLogsConfig) IsEnabled() bool {
return alc.Enabled == nil || *alc.Enabled
}
type ProjectConfig struct {
Name string `mapstructure:"name"`
ExcludeClusters []string `mapstructure:"exclude_clusters"`
IncludeClusters []string `mapstructure:"include_clusters"`
includesByClusterName map[string]struct{}
excludesByClusterName map[string]struct{}
}
type OrgConfig struct {
ID string `mapstructure:"id"`
}
func (pc *ProjectConfig) populateIncludesAndExcludes() {
pc.includesByClusterName = map[string]struct{}{}
for _, inclusion := range pc.IncludeClusters {
pc.includesByClusterName[inclusion] = struct{}{}
}
pc.excludesByClusterName = map[string]struct{}{}
for _, exclusion := range pc.ExcludeClusters {
pc.excludesByClusterName[exclusion] = struct{}{}
}
}
var (
// Alerts Receiver Errors
errNoEndpoint = errors.New("an endpoint must be specified")
errNoSecret = errors.New("a webhook secret must be specified")
errNoCert = errors.New("tls was configured, but no cert file was specified")
errNoKey = errors.New("tls was configured, but no key file was specified")
errNoModeRecognized = fmt.Errorf("alert mode not recognized for mode. Known alert modes are: %s", strings.Join([]string{
alertModeListen,
alertModePoll,
}, ","))
errPageSizeIncorrect = errors.New("page size must be a value between 1 and 500")
// Logs Receiver Errors
errNoProjects = errors.New("at least one 'project' must be specified")
errNoEvents = errors.New("at least one 'project' or 'organizations' event type must be specified")
errClusterConfig = errors.New("only one of 'include_clusters' or 'exclude_clusters' may be specified")
// Access Logs Errors
errMaxPageSize = errors.New("the maximum value for 'page_size' is 20000")
)
func (c *Config) Validate() error {
var errs error
for _, project := range c.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
}
errs = multierr.Append(errs, c.Alerts.validate())
errs = multierr.Append(errs, c.Logs.validate())
if c.Events != nil {
errs = multierr.Append(errs, c.Events.validate())
}
return errs
}
func (l *LogConfig) validate() error {
if !l.Enabled {
return nil
}
var errs error
if len(l.Projects) == 0 {
errs = multierr.Append(errs, errNoProjects)
}
for _, project := range l.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
if project.AccessLogs != nil && project.AccessLogs.IsEnabled() {
if project.AccessLogs.PageSize > 20000 {
errs = multierr.Append(errs, errMaxPageSize)
}
}
}
return errs
}
func (a *AlertConfig) validate() error {
if !a.Enabled {
// No need to further validate, receiving alerts is disabled.
return nil
}
switch a.Mode {
case alertModePoll:
return a.validatePollConfig()
case alertModeListen:
return a.validateListenConfig()
default:
return errNoModeRecognized
}
}
func (a AlertConfig) validatePollConfig() error {
if len(a.Projects) == 0 {
return errNoProjects
}
// based off API limits https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/
if 0 >= a.PageSize || a.PageSize > 500 {
return errPageSizeIncorrect
}
var errs error
for _, project := range a.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
}
return errs
}
func (a AlertConfig) validateListenConfig() error {
if a.Endpoint == "" {
return errNoEndpoint
}
var errs error
_, _, err := net.SplitHostPort(a.Endpoint)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err))
}
if a.Secret == "" {
errs = multierr.Append(errs, errNoSecret)
}
if a.TLS != nil {
if a.TLS.CertFile == "" {
errs = multierr.Append(errs, errNoCert)
}
if a.TLS.KeyFile == "" {
errs = multierr.Append(errs, errNoKey)
}
}
return errs
}
func (e EventsConfig) validate() error {
if len(e.Projects) == 0 && len(e.Organizations) == 0 {
return errNoEvents
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
)
const (
eventStorageKey = "last_recorded_event"
defaultEventsMaxPages = 25
defaultEventsPageSize = 100
defaultPollInterval = time.Minute
)
type eventsClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetProjectEvents(ctx context.Context, groupID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error)
GetOrganization(ctx context.Context, orgID string) (*mongodbatlas.Organization, error)
GetOrganizationEvents(ctx context.Context, orgID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error)
Shutdown() error
}
type eventsReceiver struct {
client eventsClient
logger *zap.Logger
storageClient storage.Client
cfg *Config
consumer consumer.Logs
maxPages int
pageSize int
pollInterval time.Duration
wg *sync.WaitGroup
record *eventRecord // this record is used for checkpointing last processed events
cancel context.CancelFunc
}
type eventRecord struct {
NextStartTime *time.Time `mapstructure:"next_start_time"`
}
func newEventsReceiver(settings rcvr.Settings, c *Config, consumer consumer.Logs) *eventsReceiver {
r := &eventsReceiver{
client: internal.NewMongoDBAtlasClient(c.PublicKey, string(c.PrivateKey), c.BackOffConfig, settings.Logger),
cfg: c,
logger: settings.Logger,
consumer: consumer,
pollInterval: c.Events.PollInterval,
wg: &sync.WaitGroup{},
maxPages: int(c.Events.MaxPages),
pageSize: int(c.Events.PageSize),
storageClient: storage.NewNopClient(),
}
if r.maxPages == 0 {
r.maxPages = defaultEventsMaxPages
}
if r.pageSize == 0 {
r.pageSize = defaultEventsPageSize
}
if r.pollInterval == 0 {
r.pollInterval = time.Minute
}
return r
}
func (er *eventsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error {
er.logger.Debug("Starting up events receiver")
cancelCtx, cancel := context.WithCancel(ctx)
er.cancel = cancel
er.storageClient = storageClient
er.loadCheckpoint(cancelCtx)
return er.startPolling(cancelCtx)
}
func (er *eventsReceiver) Shutdown(ctx context.Context) error {
er.logger.Debug("Shutting down events receiver")
er.cancel()
er.wg.Wait()
var err []error
err = append(err, er.client.Shutdown())
err = append(err, er.checkpoint(ctx))
return errors.Join(err...)
}
func (er *eventsReceiver) startPolling(ctx context.Context) error {
t := time.NewTicker(er.pollInterval)
er.wg.Add(1)
go func() {
defer er.wg.Done()
for {
select {
case <-t.C:
if err := er.pollEvents(ctx); err != nil {
er.logger.Error("error while polling for events", zap.Error(err))
}
case <-ctx.Done():
return
}
}
}()
return nil
}
func (er *eventsReceiver) pollEvents(ctx context.Context) error {
st := pcommon.NewTimestampFromTime(time.Now().Add(-er.pollInterval)).AsTime()
if er.record.NextStartTime != nil {
st = *er.record.NextStartTime
}
et := time.Now()
for _, pc := range er.cfg.Events.Projects {
project, err := er.client.GetProject(ctx, pc.Name)
if err != nil {
er.logger.Error("error retrieving project information for "+pc.Name+":", zap.Error(err))
return err
}
er.pollProject(ctx, project, pc, st, et)
}
for _, pc := range er.cfg.Events.Organizations {
org, err := er.client.GetOrganization(ctx, pc.ID)
if err != nil {
er.logger.Error("error retrieving org information for "+pc.ID+":", zap.Error(err))
return err
}
er.pollOrg(ctx, org, pc, st, et)
}
er.record.NextStartTime = &et
return er.checkpoint(ctx)
}
func (er *eventsReceiver) pollProject(ctx context.Context, project *mongodbatlas.Project, p *ProjectConfig, startTime, now time.Time) {
for pageN := 1; pageN <= er.maxPages; pageN++ {
opts := &internal.GetEventsOptions{
PageNum: pageN,
EventTypes: er.cfg.Events.Types,
MaxDate: now,
MinDate: startTime,
}
projectEvents, hasNext, err := er.client.GetProjectEvents(ctx, project.ID, opts)
if err != nil {
er.logger.Error("unable to get events for project", zap.Error(err), zap.String("project", p.Name))
break
}
now := pcommon.NewTimestampFromTime(now)
logs := er.transformProjectEvents(now, projectEvents, project)
if logs.LogRecordCount() > 0 {
if err = er.consumer.ConsumeLogs(ctx, logs); err != nil {
er.logger.Error("error consuming project events", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (er *eventsReceiver) pollOrg(ctx context.Context, org *mongodbatlas.Organization, p *OrgConfig, startTime, now time.Time) {
for pageN := 1; pageN <= er.maxPages; pageN++ {
opts := &internal.GetEventsOptions{
PageNum: pageN,
EventTypes: er.cfg.Events.Types,
MaxDate: now,
MinDate: startTime,
}
organizationEvents, hasNext, err := er.client.GetOrganizationEvents(ctx, org.ID, opts)
if err != nil {
er.logger.Error("unable to get events for organization", zap.Error(err), zap.String("organization", p.ID))
break
}
now := pcommon.NewTimestampFromTime(now)
logs := er.transformOrgEvents(now, organizationEvents, org)
if logs.LogRecordCount() > 0 {
if err = er.consumer.ConsumeLogs(ctx, logs); err != nil {
er.logger.Error("error consuming organization events", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (er *eventsReceiver) transformProjectEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, p *mongodbatlas.Project) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.project.name", p.Name)
ra.PutStr("mongodbatlas.org.id", p.OrgID)
er.transformEvents(now, events, &resourceLogs)
return logs
}
func (er *eventsReceiver) transformOrgEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, o *mongodbatlas.Organization) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.org.id", o.ID)
er.transformEvents(now, events, &resourceLogs)
return logs
}
func (er *eventsReceiver) transformEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, resourceLogs *plog.ResourceLogs) {
for _, event := range events {
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
bodyBytes, err := json.Marshal(event)
if err != nil {
er.logger.Error("unable to unmarshal event into body string", zap.Error(err))
continue
}
logRecord.Body().SetStr(string(bodyBytes))
// ISO-8601 formatted
ts, err := time.Parse(time.RFC3339, event.Created)
if err != nil {
er.logger.Warn("unable to interpret when an event was created, expecting a RFC3339 timestamp", zap.String("timestamp", event.Created), zap.String("event", event.ID))
logRecord.SetTimestamp(now)
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
logRecord.SetObservedTimestamp(now)
attrs := logRecord.Attributes()
// always present attributes
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("type", event.EventTypeName)
attrs.PutStr("id", event.ID)
attrs.PutStr("group.id", event.GroupID)
parseOptionalAttributes(&attrs, event)
}
}
func (er *eventsReceiver) checkpoint(ctx context.Context) error {
marshalBytes, err := json.Marshal(er.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return er.storageClient.Set(ctx, eventStorageKey, marshalBytes)
}
func (er *eventsReceiver) loadCheckpoint(ctx context.Context) {
cBytes, err := er.storageClient.Get(ctx, eventStorageKey)
if err != nil {
er.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err))
er.record = &eventRecord{}
return
}
if cBytes == nil {
er.record = &eventRecord{}
return
}
var record eventRecord
if err = json.Unmarshal(cBytes, &record); err != nil {
er.logger.Error("unable to decode stored record for events, continuing without a checkpoint", zap.Error(err))
er.record = &eventRecord{}
return
}
er.record = &record
}
func parseOptionalAttributes(m *pcommon.Map, event *mongodbatlas.Event) {
if event.AlertID != "" {
m.PutStr("alert.id", event.AlertID)
}
if event.AlertConfigID != "" {
m.PutStr("alert.config.id", event.AlertConfigID)
}
if event.Collection != "" {
m.PutStr("collection", event.Collection)
}
if event.Database != "" {
m.PutStr("database", event.Database)
}
if event.Hostname != "" {
m.PutStr("net.peer.name", event.Hostname)
}
if event.Port != 0 {
m.PutInt("net.peer.port", int64(event.Port))
}
if event.InvoiceID != "" {
m.PutStr("invoice.id", event.InvoiceID)
}
if event.Username != "" {
m.PutStr("user.name", event.Username)
}
if event.TargetUsername != "" {
m.PutStr("target.user.name", event.TargetUsername)
}
if event.UserID != "" {
m.PutStr("user.id", event.UserID)
}
if event.TeamID != "" {
m.PutStr("team.id", event.TeamID)
}
if event.RemoteAddress != "" {
m.PutStr("remote.ip", event.RemoteAddress)
}
if event.MetricName != "" {
m.PutStr("metric.name", event.MetricName)
}
if event.OpType != "" {
m.PutStr("event.op_type", event.OpType)
}
if event.PaymentID != "" {
m.PutStr("payment.id", event.PaymentID)
}
if event.ReplicaSetName != "" {
m.PutStr("replica_set.name", event.ReplicaSetName)
}
if event.CurrentValue != nil {
m.PutDouble("metric.value", *event.CurrentValue.Number)
m.PutStr("metric.units", event.CurrentValue.Units)
}
if event.ShardName != "" {
m.PutStr("shard.name", event.ShardName)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"errors"
"fmt"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/scraper/scraperhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
const (
defaultGranularity = "PT1M" // 1-minute, as per https://docs.atlas.mongodb.com/reference/api/process-measurements/
defaultAlertsEnabled = false
defaultLogsEnabled = false
)
// NewFactory creates a factory for MongoDB Atlas receiver
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createCombinedLogReceiver, metadata.LogsStability))
}
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
cfg := rConf.(*Config)
recv := newMongoDBAtlasReceiver(params, cfg)
ms, err := newMongoDBAtlasScraper(recv)
if err != nil {
return nil, fmt.Errorf("unable to create a MongoDB Atlas Scraper instance: %w", err)
}
return scraperhelper.NewMetricsController(&cfg.ControllerConfig, params, consumer, scraperhelper.AddScraper(metadata.Type, ms))
}
func createCombinedLogReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
cfg := rConf.(*Config)
if !cfg.Alerts.Enabled && !cfg.Logs.Enabled && cfg.Events == nil {
return nil, errors.New("one of 'alerts', 'events', or 'logs' must be enabled")
}
var err error
recv := &combinedLogsReceiver{
id: params.ID,
storageID: cfg.StorageID,
}
if cfg.Alerts.Enabled {
recv.alerts, err = newAlertsReceiver(params, cfg, consumer)
if err != nil {
return nil, fmt.Errorf("unable to create a MongoDB Atlas Alerts Receiver instance: %w", err)
}
}
if cfg.Logs.Enabled {
recv.logs = newMongoDBAtlasLogsReceiver(params, cfg, consumer)
// Confirm at least one project is enabled for access logs before adding
for _, project := range cfg.Logs.Projects {
if project.AccessLogs != nil && project.AccessLogs.IsEnabled() {
recv.accessLogs = newAccessLogsReceiver(params, cfg, consumer)
break
}
}
}
if cfg.Events != nil {
recv.events = newEventsReceiver(params, cfg, consumer)
}
return recv, nil
}
func createDefaultConfig() component.Config {
c := &Config{
ControllerConfig: scraperhelper.NewDefaultControllerConfig(),
Granularity: defaultGranularity,
BackOffConfig: configretry.NewDefaultBackOffConfig(),
MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(),
Alerts: AlertConfig{
Enabled: defaultAlertsEnabled,
Mode: alertModeListen,
PollInterval: defaultAlertsPollInterval,
PageSize: defaultAlertsPageSize,
MaxPages: defaultAlertsMaxPages,
},
Logs: LogConfig{
Enabled: defaultLogsEnabled,
Projects: []*LogsProjectConfig{},
},
}
// reset default of 1 minute to be 3 minutes in order to avoid null values for some metrics that do not publish
// more frequently
c.ControllerConfig.CollectionInterval = 3 * time.Minute
return c
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/collector/filter"
)
// MetricConfig provides common config for a particular metric.
type MetricConfig struct {
Enabled bool `mapstructure:"enabled"`
enabledSetByUser bool
}
func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error {
if parser == nil {
return nil
}
err := parser.Unmarshal(ms)
if err != nil {
return err
}
ms.enabledSetByUser = parser.IsSet("enabled")
return nil
}
// MetricsConfig provides config for mongodbatlas metrics.
type MetricsConfig struct {
MongodbatlasDbCounts MetricConfig `mapstructure:"mongodbatlas.db.counts"`
MongodbatlasDbSize MetricConfig `mapstructure:"mongodbatlas.db.size"`
MongodbatlasDiskPartitionIopsAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.average"`
MongodbatlasDiskPartitionIopsMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.max"`
MongodbatlasDiskPartitionLatencyAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.average"`
MongodbatlasDiskPartitionLatencyMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.max"`
MongodbatlasDiskPartitionQueueDepth MetricConfig `mapstructure:"mongodbatlas.disk.partition.queue.depth"`
MongodbatlasDiskPartitionSpaceAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.average"`
MongodbatlasDiskPartitionSpaceMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.max"`
MongodbatlasDiskPartitionThroughput MetricConfig `mapstructure:"mongodbatlas.disk.partition.throughput"`
MongodbatlasDiskPartitionUsageAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.average"`
MongodbatlasDiskPartitionUsageMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.max"`
MongodbatlasDiskPartitionUtilizationAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.average"`
MongodbatlasDiskPartitionUtilizationMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.max"`
MongodbatlasProcessAsserts MetricConfig `mapstructure:"mongodbatlas.process.asserts"`
MongodbatlasProcessBackgroundFlush MetricConfig `mapstructure:"mongodbatlas.process.background_flush"`
MongodbatlasProcessCacheIo MetricConfig `mapstructure:"mongodbatlas.process.cache.io"`
MongodbatlasProcessCacheRatio MetricConfig `mapstructure:"mongodbatlas.process.cache.ratio"`
MongodbatlasProcessCacheSize MetricConfig `mapstructure:"mongodbatlas.process.cache.size"`
MongodbatlasProcessConnections MetricConfig `mapstructure:"mongodbatlas.process.connections"`
MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"`
MongodbatlasProcessCPUChildrenNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.max"`
MongodbatlasProcessCPUChildrenUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.usage.average"`
MongodbatlasProcessCPUChildrenUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.usage.max"`
MongodbatlasProcessCPUNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.normalized.usage.average"`
MongodbatlasProcessCPUNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.normalized.usage.max"`
MongodbatlasProcessCPUUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.usage.average"`
MongodbatlasProcessCPUUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.usage.max"`
MongodbatlasProcessCursors MetricConfig `mapstructure:"mongodbatlas.process.cursors"`
MongodbatlasProcessDbDocumentRate MetricConfig `mapstructure:"mongodbatlas.process.db.document.rate"`
MongodbatlasProcessDbOperationsRate MetricConfig `mapstructure:"mongodbatlas.process.db.operations.rate"`
MongodbatlasProcessDbOperationsTime MetricConfig `mapstructure:"mongodbatlas.process.db.operations.time"`
MongodbatlasProcessDbQueryExecutorScanned MetricConfig `mapstructure:"mongodbatlas.process.db.query_executor.scanned"`
MongodbatlasProcessDbQueryTargetingScannedPerReturned MetricConfig `mapstructure:"mongodbatlas.process.db.query_targeting.scanned_per_returned"`
MongodbatlasProcessDbStorage MetricConfig `mapstructure:"mongodbatlas.process.db.storage"`
MongodbatlasProcessGlobalLock MetricConfig `mapstructure:"mongodbatlas.process.global_lock"`
MongodbatlasProcessIndexBtreeMissRatio MetricConfig `mapstructure:"mongodbatlas.process.index.btree_miss_ratio"`
MongodbatlasProcessIndexCounters MetricConfig `mapstructure:"mongodbatlas.process.index.counters"`
MongodbatlasProcessJournalingCommits MetricConfig `mapstructure:"mongodbatlas.process.journaling.commits"`
MongodbatlasProcessJournalingDataFiles MetricConfig `mapstructure:"mongodbatlas.process.journaling.data_files"`
MongodbatlasProcessJournalingWritten MetricConfig `mapstructure:"mongodbatlas.process.journaling.written"`
MongodbatlasProcessMemoryUsage MetricConfig `mapstructure:"mongodbatlas.process.memory.usage"`
MongodbatlasProcessNetworkIo MetricConfig `mapstructure:"mongodbatlas.process.network.io"`
MongodbatlasProcessNetworkRequests MetricConfig `mapstructure:"mongodbatlas.process.network.requests"`
MongodbatlasProcessOplogRate MetricConfig `mapstructure:"mongodbatlas.process.oplog.rate"`
MongodbatlasProcessOplogTime MetricConfig `mapstructure:"mongodbatlas.process.oplog.time"`
MongodbatlasProcessPageFaults MetricConfig `mapstructure:"mongodbatlas.process.page_faults"`
MongodbatlasProcessRestarts MetricConfig `mapstructure:"mongodbatlas.process.restarts"`
MongodbatlasProcessTickets MetricConfig `mapstructure:"mongodbatlas.process.tickets"`
MongodbatlasSystemCPUNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.cpu.normalized.usage.average"`
MongodbatlasSystemCPUNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.system.cpu.normalized.usage.max"`
MongodbatlasSystemCPUUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.cpu.usage.average"`
MongodbatlasSystemCPUUsageMax MetricConfig `mapstructure:"mongodbatlas.system.cpu.usage.max"`
MongodbatlasSystemFtsCPUNormalizedUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.cpu.normalized.usage"`
MongodbatlasSystemFtsCPUUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.cpu.usage"`
MongodbatlasSystemFtsDiskUsed MetricConfig `mapstructure:"mongodbatlas.system.fts.disk.used"`
MongodbatlasSystemFtsMemoryUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.memory.usage"`
MongodbatlasSystemMemoryUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.memory.usage.average"`
MongodbatlasSystemMemoryUsageMax MetricConfig `mapstructure:"mongodbatlas.system.memory.usage.max"`
MongodbatlasSystemNetworkIoAverage MetricConfig `mapstructure:"mongodbatlas.system.network.io.average"`
MongodbatlasSystemNetworkIoMax MetricConfig `mapstructure:"mongodbatlas.system.network.io.max"`
MongodbatlasSystemPagingIoAverage MetricConfig `mapstructure:"mongodbatlas.system.paging.io.average"`
MongodbatlasSystemPagingIoMax MetricConfig `mapstructure:"mongodbatlas.system.paging.io.max"`
MongodbatlasSystemPagingUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.paging.usage.average"`
MongodbatlasSystemPagingUsageMax MetricConfig `mapstructure:"mongodbatlas.system.paging.usage.max"`
}
func DefaultMetricsConfig() MetricsConfig {
return MetricsConfig{
MongodbatlasDbCounts: MetricConfig{
Enabled: true,
},
MongodbatlasDbSize: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionIopsAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionIopsMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionLatencyAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionLatencyMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionQueueDepth: MetricConfig{
Enabled: false,
},
MongodbatlasDiskPartitionSpaceAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionSpaceMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionThroughput: MetricConfig{
Enabled: false,
},
MongodbatlasDiskPartitionUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUtilizationMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessAsserts: MetricConfig{
Enabled: true,
},
MongodbatlasProcessBackgroundFlush: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCacheIo: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCacheRatio: MetricConfig{
Enabled: false,
},
MongodbatlasProcessCacheSize: MetricConfig{
Enabled: true,
},
MongodbatlasProcessConnections: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCursors: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbDocumentRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbOperationsRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbOperationsTime: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbQueryExecutorScanned: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbQueryTargetingScannedPerReturned: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbStorage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessGlobalLock: MetricConfig{
Enabled: true,
},
MongodbatlasProcessIndexBtreeMissRatio: MetricConfig{
Enabled: true,
},
MongodbatlasProcessIndexCounters: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingCommits: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingDataFiles: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingWritten: MetricConfig{
Enabled: true,
},
MongodbatlasProcessMemoryUsage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessNetworkIo: MetricConfig{
Enabled: true,
},
MongodbatlasProcessNetworkRequests: MetricConfig{
Enabled: true,
},
MongodbatlasProcessOplogRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessOplogTime: MetricConfig{
Enabled: true,
},
MongodbatlasProcessPageFaults: MetricConfig{
Enabled: true,
},
MongodbatlasProcessRestarts: MetricConfig{
Enabled: true,
},
MongodbatlasProcessTickets: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsCPUNormalizedUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsCPUUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsDiskUsed: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsMemoryUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemMemoryUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemMemoryUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemNetworkIoAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemNetworkIoMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingIoAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingIoMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingUsageMax: MetricConfig{
Enabled: true,
},
}
}
// ResourceAttributeConfig provides common config for a particular resource attribute.
type ResourceAttributeConfig struct {
Enabled bool `mapstructure:"enabled"`
// Experimental: MetricsInclude defines a list of filters for attribute values.
// If the list is not empty, only metrics with matching resource attribute values will be emitted.
MetricsInclude []filter.Config `mapstructure:"metrics_include"`
// Experimental: MetricsExclude defines a list of filters for attribute values.
// If the list is not empty, metrics with matching resource attribute values will not be emitted.
// MetricsInclude has higher priority than MetricsExclude.
MetricsExclude []filter.Config `mapstructure:"metrics_exclude"`
enabledSetByUser bool
}
func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error {
if parser == nil {
return nil
}
err := parser.Unmarshal(rac)
if err != nil {
return err
}
rac.enabledSetByUser = parser.IsSet("enabled")
return nil
}
// ResourceAttributesConfig provides config for mongodbatlas resource attributes.
type ResourceAttributesConfig struct {
MongodbAtlasClusterName ResourceAttributeConfig `mapstructure:"mongodb_atlas.cluster.name"`
MongodbAtlasDbName ResourceAttributeConfig `mapstructure:"mongodb_atlas.db.name"`
MongodbAtlasDiskPartition ResourceAttributeConfig `mapstructure:"mongodb_atlas.disk.partition"`
MongodbAtlasHostName ResourceAttributeConfig `mapstructure:"mongodb_atlas.host.name"`
MongodbAtlasOrgName ResourceAttributeConfig `mapstructure:"mongodb_atlas.org_name"`
MongodbAtlasProcessID ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.id"`
MongodbAtlasProcessPort ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.port"`
MongodbAtlasProcessTypeName ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.type_name"`
MongodbAtlasProjectID ResourceAttributeConfig `mapstructure:"mongodb_atlas.project.id"`
MongodbAtlasProjectName ResourceAttributeConfig `mapstructure:"mongodb_atlas.project.name"`
MongodbAtlasProviderName ResourceAttributeConfig `mapstructure:"mongodb_atlas.provider.name"`
MongodbAtlasRegionName ResourceAttributeConfig `mapstructure:"mongodb_atlas.region.name"`
MongodbAtlasUserAlias ResourceAttributeConfig `mapstructure:"mongodb_atlas.user.alias"`
}
func DefaultResourceAttributesConfig() ResourceAttributesConfig {
return ResourceAttributesConfig{
MongodbAtlasClusterName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasDbName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasDiskPartition: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasHostName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasOrgName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessID: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessPort: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessTypeName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProjectID: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProjectName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProviderName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasRegionName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasUserAlias: ResourceAttributeConfig{
Enabled: false,
},
}
}
// MetricsBuilderConfig is a configuration for mongodbatlas metrics builder.
type MetricsBuilderConfig struct {
Metrics MetricsConfig `mapstructure:"metrics"`
ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"`
}
func DefaultMetricsBuilderConfig() MetricsBuilderConfig {
return MetricsBuilderConfig{
Metrics: DefaultMetricsConfig(),
ResourceAttributes: DefaultResourceAttributesConfig(),
}
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeAssertType specifies the value assert_type attribute.
type AttributeAssertType int
const (
_ AttributeAssertType = iota
AttributeAssertTypeRegular
AttributeAssertTypeWarning
AttributeAssertTypeMsg
AttributeAssertTypeUser
)
// String returns the string representation of the AttributeAssertType.
func (av AttributeAssertType) String() string {
switch av {
case AttributeAssertTypeRegular:
return "regular"
case AttributeAssertTypeWarning:
return "warning"
case AttributeAssertTypeMsg:
return "msg"
case AttributeAssertTypeUser:
return "user"
}
return ""
}
// MapAttributeAssertType is a helper map of string to AttributeAssertType attribute value.
var MapAttributeAssertType = map[string]AttributeAssertType{
"regular": AttributeAssertTypeRegular,
"warning": AttributeAssertTypeWarning,
"msg": AttributeAssertTypeMsg,
"user": AttributeAssertTypeUser,
}
// AttributeBtreeCounterType specifies the value btree_counter_type attribute.
type AttributeBtreeCounterType int
const (
_ AttributeBtreeCounterType = iota
AttributeBtreeCounterTypeAccesses
AttributeBtreeCounterTypeHits
AttributeBtreeCounterTypeMisses
)
// String returns the string representation of the AttributeBtreeCounterType.
func (av AttributeBtreeCounterType) String() string {
switch av {
case AttributeBtreeCounterTypeAccesses:
return "accesses"
case AttributeBtreeCounterTypeHits:
return "hits"
case AttributeBtreeCounterTypeMisses:
return "misses"
}
return ""
}
// MapAttributeBtreeCounterType is a helper map of string to AttributeBtreeCounterType attribute value.
var MapAttributeBtreeCounterType = map[string]AttributeBtreeCounterType{
"accesses": AttributeBtreeCounterTypeAccesses,
"hits": AttributeBtreeCounterTypeHits,
"misses": AttributeBtreeCounterTypeMisses,
}
// AttributeCacheDirection specifies the value cache_direction attribute.
type AttributeCacheDirection int
const (
_ AttributeCacheDirection = iota
AttributeCacheDirectionReadInto
AttributeCacheDirectionWrittenFrom
)
// String returns the string representation of the AttributeCacheDirection.
func (av AttributeCacheDirection) String() string {
switch av {
case AttributeCacheDirectionReadInto:
return "read_into"
case AttributeCacheDirectionWrittenFrom:
return "written_from"
}
return ""
}
// MapAttributeCacheDirection is a helper map of string to AttributeCacheDirection attribute value.
var MapAttributeCacheDirection = map[string]AttributeCacheDirection{
"read_into": AttributeCacheDirectionReadInto,
"written_from": AttributeCacheDirectionWrittenFrom,
}
// AttributeCacheRatioType specifies the value cache_ratio_type attribute.
type AttributeCacheRatioType int
const (
_ AttributeCacheRatioType = iota
AttributeCacheRatioTypeCacheFill
AttributeCacheRatioTypeDirtyFill
)
// String returns the string representation of the AttributeCacheRatioType.
func (av AttributeCacheRatioType) String() string {
switch av {
case AttributeCacheRatioTypeCacheFill:
return "cache_fill"
case AttributeCacheRatioTypeDirtyFill:
return "dirty_fill"
}
return ""
}
// MapAttributeCacheRatioType is a helper map of string to AttributeCacheRatioType attribute value.
var MapAttributeCacheRatioType = map[string]AttributeCacheRatioType{
"cache_fill": AttributeCacheRatioTypeCacheFill,
"dirty_fill": AttributeCacheRatioTypeDirtyFill,
}
// AttributeCacheStatus specifies the value cache_status attribute.
type AttributeCacheStatus int
const (
_ AttributeCacheStatus = iota
AttributeCacheStatusDirty
AttributeCacheStatusUsed
)
// String returns the string representation of the AttributeCacheStatus.
func (av AttributeCacheStatus) String() string {
switch av {
case AttributeCacheStatusDirty:
return "dirty"
case AttributeCacheStatusUsed:
return "used"
}
return ""
}
// MapAttributeCacheStatus is a helper map of string to AttributeCacheStatus attribute value.
var MapAttributeCacheStatus = map[string]AttributeCacheStatus{
"dirty": AttributeCacheStatusDirty,
"used": AttributeCacheStatusUsed,
}
// AttributeClusterRole specifies the value cluster_role attribute.
type AttributeClusterRole int
const (
_ AttributeClusterRole = iota
AttributeClusterRolePrimary
AttributeClusterRoleReplica
)
// String returns the string representation of the AttributeClusterRole.
func (av AttributeClusterRole) String() string {
switch av {
case AttributeClusterRolePrimary:
return "primary"
case AttributeClusterRoleReplica:
return "replica"
}
return ""
}
// MapAttributeClusterRole is a helper map of string to AttributeClusterRole attribute value.
var MapAttributeClusterRole = map[string]AttributeClusterRole{
"primary": AttributeClusterRolePrimary,
"replica": AttributeClusterRoleReplica,
}
// AttributeCPUState specifies the value cpu_state attribute.
type AttributeCPUState int
const (
_ AttributeCPUState = iota
AttributeCPUStateKernel
AttributeCPUStateUser
AttributeCPUStateNice
AttributeCPUStateIowait
AttributeCPUStateIrq
AttributeCPUStateSoftirq
AttributeCPUStateGuest
AttributeCPUStateSteal
)
// String returns the string representation of the AttributeCPUState.
func (av AttributeCPUState) String() string {
switch av {
case AttributeCPUStateKernel:
return "kernel"
case AttributeCPUStateUser:
return "user"
case AttributeCPUStateNice:
return "nice"
case AttributeCPUStateIowait:
return "iowait"
case AttributeCPUStateIrq:
return "irq"
case AttributeCPUStateSoftirq:
return "softirq"
case AttributeCPUStateGuest:
return "guest"
case AttributeCPUStateSteal:
return "steal"
}
return ""
}
// MapAttributeCPUState is a helper map of string to AttributeCPUState attribute value.
var MapAttributeCPUState = map[string]AttributeCPUState{
"kernel": AttributeCPUStateKernel,
"user": AttributeCPUStateUser,
"nice": AttributeCPUStateNice,
"iowait": AttributeCPUStateIowait,
"irq": AttributeCPUStateIrq,
"softirq": AttributeCPUStateSoftirq,
"guest": AttributeCPUStateGuest,
"steal": AttributeCPUStateSteal,
}
// AttributeCursorState specifies the value cursor_state attribute.
type AttributeCursorState int
const (
_ AttributeCursorState = iota
AttributeCursorStateTimedOut
AttributeCursorStateOpen
)
// String returns the string representation of the AttributeCursorState.
func (av AttributeCursorState) String() string {
switch av {
case AttributeCursorStateTimedOut:
return "timed_out"
case AttributeCursorStateOpen:
return "open"
}
return ""
}
// MapAttributeCursorState is a helper map of string to AttributeCursorState attribute value.
var MapAttributeCursorState = map[string]AttributeCursorState{
"timed_out": AttributeCursorStateTimedOut,
"open": AttributeCursorStateOpen,
}
// AttributeDirection specifies the value direction attribute.
type AttributeDirection int
const (
_ AttributeDirection = iota
AttributeDirectionReceive
AttributeDirectionTransmit
)
// String returns the string representation of the AttributeDirection.
func (av AttributeDirection) String() string {
switch av {
case AttributeDirectionReceive:
return "receive"
case AttributeDirectionTransmit:
return "transmit"
}
return ""
}
// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
var MapAttributeDirection = map[string]AttributeDirection{
"receive": AttributeDirectionReceive,
"transmit": AttributeDirectionTransmit,
}
// AttributeDiskDirection specifies the value disk_direction attribute.
type AttributeDiskDirection int
const (
_ AttributeDiskDirection = iota
AttributeDiskDirectionRead
AttributeDiskDirectionWrite
AttributeDiskDirectionTotal
)
// String returns the string representation of the AttributeDiskDirection.
func (av AttributeDiskDirection) String() string {
switch av {
case AttributeDiskDirectionRead:
return "read"
case AttributeDiskDirectionWrite:
return "write"
case AttributeDiskDirectionTotal:
return "total"
}
return ""
}
// MapAttributeDiskDirection is a helper map of string to AttributeDiskDirection attribute value.
var MapAttributeDiskDirection = map[string]AttributeDiskDirection{
"read": AttributeDiskDirectionRead,
"write": AttributeDiskDirectionWrite,
"total": AttributeDiskDirectionTotal,
}
// AttributeDiskStatus specifies the value disk_status attribute.
type AttributeDiskStatus int
const (
_ AttributeDiskStatus = iota
AttributeDiskStatusFree
AttributeDiskStatusUsed
)
// String returns the string representation of the AttributeDiskStatus.
func (av AttributeDiskStatus) String() string {
switch av {
case AttributeDiskStatusFree:
return "free"
case AttributeDiskStatusUsed:
return "used"
}
return ""
}
// MapAttributeDiskStatus is a helper map of string to AttributeDiskStatus attribute value.
var MapAttributeDiskStatus = map[string]AttributeDiskStatus{
"free": AttributeDiskStatusFree,
"used": AttributeDiskStatusUsed,
}
// AttributeDocumentStatus specifies the value document_status attribute.
type AttributeDocumentStatus int
const (
_ AttributeDocumentStatus = iota
AttributeDocumentStatusReturned
AttributeDocumentStatusInserted
AttributeDocumentStatusUpdated
AttributeDocumentStatusDeleted
)
// String returns the string representation of the AttributeDocumentStatus.
func (av AttributeDocumentStatus) String() string {
switch av {
case AttributeDocumentStatusReturned:
return "returned"
case AttributeDocumentStatusInserted:
return "inserted"
case AttributeDocumentStatusUpdated:
return "updated"
case AttributeDocumentStatusDeleted:
return "deleted"
}
return ""
}
// MapAttributeDocumentStatus is a helper map of string to AttributeDocumentStatus attribute value.
var MapAttributeDocumentStatus = map[string]AttributeDocumentStatus{
"returned": AttributeDocumentStatusReturned,
"inserted": AttributeDocumentStatusInserted,
"updated": AttributeDocumentStatusUpdated,
"deleted": AttributeDocumentStatusDeleted,
}
// AttributeExecutionType specifies the value execution_type attribute.
type AttributeExecutionType int
const (
_ AttributeExecutionType = iota
AttributeExecutionTypeReads
AttributeExecutionTypeWrites
AttributeExecutionTypeCommands
)
// String returns the string representation of the AttributeExecutionType.
func (av AttributeExecutionType) String() string {
switch av {
case AttributeExecutionTypeReads:
return "reads"
case AttributeExecutionTypeWrites:
return "writes"
case AttributeExecutionTypeCommands:
return "commands"
}
return ""
}
// MapAttributeExecutionType is a helper map of string to AttributeExecutionType attribute value.
var MapAttributeExecutionType = map[string]AttributeExecutionType{
"reads": AttributeExecutionTypeReads,
"writes": AttributeExecutionTypeWrites,
"commands": AttributeExecutionTypeCommands,
}
// AttributeGlobalLockState specifies the value global_lock_state attribute.
type AttributeGlobalLockState int
const (
_ AttributeGlobalLockState = iota
AttributeGlobalLockStateCurrentQueueTotal
AttributeGlobalLockStateCurrentQueueReaders
AttributeGlobalLockStateCurrentQueueWriters
)
// String returns the string representation of the AttributeGlobalLockState.
func (av AttributeGlobalLockState) String() string {
switch av {
case AttributeGlobalLockStateCurrentQueueTotal:
return "current_queue_total"
case AttributeGlobalLockStateCurrentQueueReaders:
return "current_queue_readers"
case AttributeGlobalLockStateCurrentQueueWriters:
return "current_queue_writers"
}
return ""
}
// MapAttributeGlobalLockState is a helper map of string to AttributeGlobalLockState attribute value.
var MapAttributeGlobalLockState = map[string]AttributeGlobalLockState{
"current_queue_total": AttributeGlobalLockStateCurrentQueueTotal,
"current_queue_readers": AttributeGlobalLockStateCurrentQueueReaders,
"current_queue_writers": AttributeGlobalLockStateCurrentQueueWriters,
}
// AttributeMemoryIssueType specifies the value memory_issue_type attribute.
type AttributeMemoryIssueType int
const (
_ AttributeMemoryIssueType = iota
AttributeMemoryIssueTypeExtraInfo
AttributeMemoryIssueTypeGlobalAccessesNotInMemory
AttributeMemoryIssueTypeExceptionsThrown
)
// String returns the string representation of the AttributeMemoryIssueType.
func (av AttributeMemoryIssueType) String() string {
switch av {
case AttributeMemoryIssueTypeExtraInfo:
return "extra_info"
case AttributeMemoryIssueTypeGlobalAccessesNotInMemory:
return "global_accesses_not_in_memory"
case AttributeMemoryIssueTypeExceptionsThrown:
return "exceptions_thrown"
}
return ""
}
// MapAttributeMemoryIssueType is a helper map of string to AttributeMemoryIssueType attribute value.
var MapAttributeMemoryIssueType = map[string]AttributeMemoryIssueType{
"extra_info": AttributeMemoryIssueTypeExtraInfo,
"global_accesses_not_in_memory": AttributeMemoryIssueTypeGlobalAccessesNotInMemory,
"exceptions_thrown": AttributeMemoryIssueTypeExceptionsThrown,
}
// AttributeMemoryState specifies the value memory_state attribute.
type AttributeMemoryState int
const (
_ AttributeMemoryState = iota
AttributeMemoryStateResident
AttributeMemoryStateVirtual
AttributeMemoryStateMapped
AttributeMemoryStateComputed
AttributeMemoryStateShared
AttributeMemoryStateFree
AttributeMemoryStateUsed
)
// String returns the string representation of the AttributeMemoryState.
func (av AttributeMemoryState) String() string {
switch av {
case AttributeMemoryStateResident:
return "resident"
case AttributeMemoryStateVirtual:
return "virtual"
case AttributeMemoryStateMapped:
return "mapped"
case AttributeMemoryStateComputed:
return "computed"
case AttributeMemoryStateShared:
return "shared"
case AttributeMemoryStateFree:
return "free"
case AttributeMemoryStateUsed:
return "used"
}
return ""
}
// MapAttributeMemoryState is a helper map of string to AttributeMemoryState attribute value.
var MapAttributeMemoryState = map[string]AttributeMemoryState{
"resident": AttributeMemoryStateResident,
"virtual": AttributeMemoryStateVirtual,
"mapped": AttributeMemoryStateMapped,
"computed": AttributeMemoryStateComputed,
"shared": AttributeMemoryStateShared,
"free": AttributeMemoryStateFree,
"used": AttributeMemoryStateUsed,
}
// AttributeMemoryStatus specifies the value memory_status attribute.
type AttributeMemoryStatus int
const (
_ AttributeMemoryStatus = iota
AttributeMemoryStatusAvailable
AttributeMemoryStatusBuffers
AttributeMemoryStatusCached
AttributeMemoryStatusFree
AttributeMemoryStatusShared
AttributeMemoryStatusUsed
)
// String returns the string representation of the AttributeMemoryStatus.
func (av AttributeMemoryStatus) String() string {
switch av {
case AttributeMemoryStatusAvailable:
return "available"
case AttributeMemoryStatusBuffers:
return "buffers"
case AttributeMemoryStatusCached:
return "cached"
case AttributeMemoryStatusFree:
return "free"
case AttributeMemoryStatusShared:
return "shared"
case AttributeMemoryStatusUsed:
return "used"
}
return ""
}
// MapAttributeMemoryStatus is a helper map of string to AttributeMemoryStatus attribute value.
var MapAttributeMemoryStatus = map[string]AttributeMemoryStatus{
"available": AttributeMemoryStatusAvailable,
"buffers": AttributeMemoryStatusBuffers,
"cached": AttributeMemoryStatusCached,
"free": AttributeMemoryStatusFree,
"shared": AttributeMemoryStatusShared,
"used": AttributeMemoryStatusUsed,
}
// AttributeObjectType specifies the value object_type attribute.
type AttributeObjectType int
const (
_ AttributeObjectType = iota
AttributeObjectTypeCollection
AttributeObjectTypeIndex
AttributeObjectTypeExtent
AttributeObjectTypeObject
AttributeObjectTypeView
AttributeObjectTypeStorage
AttributeObjectTypeData
)
// String returns the string representation of the AttributeObjectType.
func (av AttributeObjectType) String() string {
switch av {
case AttributeObjectTypeCollection:
return "collection"
case AttributeObjectTypeIndex:
return "index"
case AttributeObjectTypeExtent:
return "extent"
case AttributeObjectTypeObject:
return "object"
case AttributeObjectTypeView:
return "view"
case AttributeObjectTypeStorage:
return "storage"
case AttributeObjectTypeData:
return "data"
}
return ""
}
// MapAttributeObjectType is a helper map of string to AttributeObjectType attribute value.
var MapAttributeObjectType = map[string]AttributeObjectType{
"collection": AttributeObjectTypeCollection,
"index": AttributeObjectTypeIndex,
"extent": AttributeObjectTypeExtent,
"object": AttributeObjectTypeObject,
"view": AttributeObjectTypeView,
"storage": AttributeObjectTypeStorage,
"data": AttributeObjectTypeData,
}
// AttributeOperation specifies the value operation attribute.
type AttributeOperation int
const (
_ AttributeOperation = iota
AttributeOperationCmd
AttributeOperationQuery
AttributeOperationUpdate
AttributeOperationDelete
AttributeOperationGetmore
AttributeOperationInsert
AttributeOperationScanAndOrder
AttributeOperationTTLDeleted
)
// String returns the string representation of the AttributeOperation.
func (av AttributeOperation) String() string {
switch av {
case AttributeOperationCmd:
return "cmd"
case AttributeOperationQuery:
return "query"
case AttributeOperationUpdate:
return "update"
case AttributeOperationDelete:
return "delete"
case AttributeOperationGetmore:
return "getmore"
case AttributeOperationInsert:
return "insert"
case AttributeOperationScanAndOrder:
return "scan_and_order"
case AttributeOperationTTLDeleted:
return "ttl_deleted"
}
return ""
}
// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
var MapAttributeOperation = map[string]AttributeOperation{
"cmd": AttributeOperationCmd,
"query": AttributeOperationQuery,
"update": AttributeOperationUpdate,
"delete": AttributeOperationDelete,
"getmore": AttributeOperationGetmore,
"insert": AttributeOperationInsert,
"scan_and_order": AttributeOperationScanAndOrder,
"ttl_deleted": AttributeOperationTTLDeleted,
}
// AttributeOplogType specifies the value oplog_type attribute.
type AttributeOplogType int
const (
_ AttributeOplogType = iota
AttributeOplogTypeSlaveLagMasterTime
AttributeOplogTypeMasterTime
AttributeOplogTypeMasterLagTimeDiff
)
// String returns the string representation of the AttributeOplogType.
func (av AttributeOplogType) String() string {
switch av {
case AttributeOplogTypeSlaveLagMasterTime:
return "slave_lag_master_time"
case AttributeOplogTypeMasterTime:
return "master_time"
case AttributeOplogTypeMasterLagTimeDiff:
return "master_lag_time_diff"
}
return ""
}
// MapAttributeOplogType is a helper map of string to AttributeOplogType attribute value.
var MapAttributeOplogType = map[string]AttributeOplogType{
"slave_lag_master_time": AttributeOplogTypeSlaveLagMasterTime,
"master_time": AttributeOplogTypeMasterTime,
"master_lag_time_diff": AttributeOplogTypeMasterLagTimeDiff,
}
// AttributeScannedType specifies the value scanned_type attribute.
type AttributeScannedType int
const (
_ AttributeScannedType = iota
AttributeScannedTypeIndexItems
AttributeScannedTypeObjects
)
// String returns the string representation of the AttributeScannedType.
func (av AttributeScannedType) String() string {
switch av {
case AttributeScannedTypeIndexItems:
return "index_items"
case AttributeScannedTypeObjects:
return "objects"
}
return ""
}
// MapAttributeScannedType is a helper map of string to AttributeScannedType attribute value.
var MapAttributeScannedType = map[string]AttributeScannedType{
"index_items": AttributeScannedTypeIndexItems,
"objects": AttributeScannedTypeObjects,
}
// AttributeStorageStatus specifies the value storage_status attribute.
type AttributeStorageStatus int
const (
_ AttributeStorageStatus = iota
AttributeStorageStatusTotal
AttributeStorageStatusDataSize
AttributeStorageStatusIndexSize
AttributeStorageStatusDataSizeWoSystem
)
// String returns the string representation of the AttributeStorageStatus.
func (av AttributeStorageStatus) String() string {
switch av {
case AttributeStorageStatusTotal:
return "total"
case AttributeStorageStatusDataSize:
return "data_size"
case AttributeStorageStatusIndexSize:
return "index_size"
case AttributeStorageStatusDataSizeWoSystem:
return "data_size_wo_system"
}
return ""
}
// MapAttributeStorageStatus is a helper map of string to AttributeStorageStatus attribute value.
var MapAttributeStorageStatus = map[string]AttributeStorageStatus{
"total": AttributeStorageStatusTotal,
"data_size": AttributeStorageStatusDataSize,
"index_size": AttributeStorageStatusIndexSize,
"data_size_wo_system": AttributeStorageStatusDataSizeWoSystem,
}
// AttributeTicketType specifies the value ticket_type attribute.
type AttributeTicketType int
const (
_ AttributeTicketType = iota
AttributeTicketTypeAvailableReads
AttributeTicketTypeAvailableWrites
)
// String returns the string representation of the AttributeTicketType.
func (av AttributeTicketType) String() string {
switch av {
case AttributeTicketTypeAvailableReads:
return "available_reads"
case AttributeTicketTypeAvailableWrites:
return "available_writes"
}
return ""
}
// MapAttributeTicketType is a helper map of string to AttributeTicketType attribute value.
var MapAttributeTicketType = map[string]AttributeTicketType{
"available_reads": AttributeTicketTypeAvailableReads,
"available_writes": AttributeTicketTypeAvailableWrites,
}
type metricMongodbatlasDbCounts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.db.counts metric with initial data.
func (m *metricMongodbatlasDbCounts) init() {
m.data.SetName("mongodbatlas.db.counts")
m.data.SetDescription("Database feature size")
m.data.SetUnit("{objects}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDbCounts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("object_type", objectTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDbCounts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDbCounts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDbCounts(cfg MetricConfig) metricMongodbatlasDbCounts {
m := metricMongodbatlasDbCounts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDbSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.db.size metric with initial data.
func (m *metricMongodbatlasDbSize) init() {
m.data.SetName("mongodbatlas.db.size")
m.data.SetDescription("Database feature size")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("object_type", objectTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDbSize) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDbSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDbSize(cfg MetricConfig) metricMongodbatlasDbSize {
m := metricMongodbatlasDbSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionIopsAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.iops.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionIopsAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.iops.average")
m.data.SetDescription("Disk partition iops")
m.data.SetUnit("{ops}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionIopsAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionIopsAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionIopsAverage {
m := metricMongodbatlasDiskPartitionIopsAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionIopsMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.iops.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionIopsMax) init() {
m.data.SetName("mongodbatlas.disk.partition.iops.max")
m.data.SetDescription("Disk partition iops")
m.data.SetUnit("{ops}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionIopsMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionIopsMax(cfg MetricConfig) metricMongodbatlasDiskPartitionIopsMax {
m := metricMongodbatlasDiskPartitionIopsMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionLatencyAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.latency.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.latency.average")
m.data.SetDescription("Disk partition latency")
m.data.SetUnit("ms")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionLatencyAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionLatencyAverage {
m := metricMongodbatlasDiskPartitionLatencyAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionLatencyMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.latency.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionLatencyMax) init() {
m.data.SetName("mongodbatlas.disk.partition.latency.max")
m.data.SetDescription("Disk partition latency")
m.data.SetUnit("ms")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionLatencyMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionLatencyMax(cfg MetricConfig) metricMongodbatlasDiskPartitionLatencyMax {
m := metricMongodbatlasDiskPartitionLatencyMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionQueueDepth struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.queue.depth metric with initial data.
func (m *metricMongodbatlasDiskPartitionQueueDepth) init() {
m.data.SetName("mongodbatlas.disk.partition.queue.depth")
m.data.SetDescription("Disk queue depth")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionQueueDepth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionQueueDepth) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionQueueDepth) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionQueueDepth(cfg MetricConfig) metricMongodbatlasDiskPartitionQueueDepth {
m := metricMongodbatlasDiskPartitionQueueDepth{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionSpaceAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.space.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.space.average")
m.data.SetDescription("Disk partition space")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionSpaceAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionSpaceAverage {
m := metricMongodbatlasDiskPartitionSpaceAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionSpaceMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.space.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionSpaceMax) init() {
m.data.SetName("mongodbatlas.disk.partition.space.max")
m.data.SetDescription("Disk partition space")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionSpaceMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionSpaceMax(cfg MetricConfig) metricMongodbatlasDiskPartitionSpaceMax {
m := metricMongodbatlasDiskPartitionSpaceMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionThroughput struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.throughput metric with initial data.
func (m *metricMongodbatlasDiskPartitionThroughput) init() {
m.data.SetName("mongodbatlas.disk.partition.throughput")
m.data.SetDescription("Disk throughput")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionThroughput) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionThroughput) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionThroughput(cfg MetricConfig) metricMongodbatlasDiskPartitionThroughput {
m := metricMongodbatlasDiskPartitionThroughput{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.usage.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionUsageAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.usage.average")
m.data.SetDescription("Disk partition usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUsageAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionUsageAverage {
m := metricMongodbatlasDiskPartitionUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.usage.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionUsageMax) init() {
m.data.SetName("mongodbatlas.disk.partition.usage.max")
m.data.SetDescription("Disk partition usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUsageMax(cfg MetricConfig) metricMongodbatlasDiskPartitionUsageMax {
m := metricMongodbatlasDiskPartitionUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUtilizationAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.utilization.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.utilization.average")
m.data.SetDescription("The percentage of time during which requests are being issued to and serviced by the partition.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUtilizationAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionUtilizationAverage {
m := metricMongodbatlasDiskPartitionUtilizationAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUtilizationMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.utilization.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) init() {
m.data.SetName("mongodbatlas.disk.partition.utilization.max")
m.data.SetDescription("The maximum percentage of time during which requests are being issued to and serviced by the partition.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUtilizationMax(cfg MetricConfig) metricMongodbatlasDiskPartitionUtilizationMax {
m := metricMongodbatlasDiskPartitionUtilizationMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessAsserts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.asserts metric with initial data.
func (m *metricMongodbatlasProcessAsserts) init() {
m.data.SetName("mongodbatlas.process.asserts")
m.data.SetDescription("Number of assertions per second")
m.data.SetUnit("{assertions}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, assertTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("assert_type", assertTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessAsserts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessAsserts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessAsserts(cfg MetricConfig) metricMongodbatlasProcessAsserts {
m := metricMongodbatlasProcessAsserts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessBackgroundFlush struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.background_flush metric with initial data.
func (m *metricMongodbatlasProcessBackgroundFlush) init() {
m.data.SetName("mongodbatlas.process.background_flush")
m.data.SetDescription("Amount of data flushed in the background")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessBackgroundFlush) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessBackgroundFlush) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessBackgroundFlush(cfg MetricConfig) metricMongodbatlasProcessBackgroundFlush {
m := metricMongodbatlasProcessBackgroundFlush{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheIo struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.io metric with initial data.
func (m *metricMongodbatlasProcessCacheIo) init() {
m.data.SetName("mongodbatlas.process.cache.io")
m.data.SetDescription("Cache throughput (per second)")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_direction", cacheDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheIo) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheIo) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheIo(cfg MetricConfig) metricMongodbatlasProcessCacheIo {
m := metricMongodbatlasProcessCacheIo{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheRatio struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.ratio metric with initial data.
func (m *metricMongodbatlasProcessCacheRatio) init() {
m.data.SetName("mongodbatlas.process.cache.ratio")
m.data.SetDescription("Cache ratios represented as (%)")
m.data.SetUnit("%")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_ratio_type", cacheRatioTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheRatio) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheRatio) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheRatio(cfg MetricConfig) metricMongodbatlasProcessCacheRatio {
m := metricMongodbatlasProcessCacheRatio{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.size metric with initial data.
func (m *metricMongodbatlasProcessCacheSize) init() {
m.data.SetName("mongodbatlas.process.cache.size")
m.data.SetDescription("Cache sizes")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_status", cacheStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheSize(cfg MetricConfig) metricMongodbatlasProcessCacheSize {
m := metricMongodbatlasProcessCacheSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessConnections struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.connections metric with initial data.
func (m *metricMongodbatlasProcessConnections) init() {
m.data.SetName("mongodbatlas.process.connections")
m.data.SetDescription("Number of current connections")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMongodbatlasProcessConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessConnections) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessConnections) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessConnections(cfg MetricConfig) metricMongodbatlasProcessConnections {
m := metricMongodbatlasProcessConnections{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.average")
m.data.SetDescription("CPU Usage for child processes, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage {
m := metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.max")
m.data.SetDescription("CPU Usage for child processes, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenNormalizedUsageMax {
m := metricMongodbatlasProcessCPUChildrenNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.children.usage.average")
m.data.SetDescription("CPU Usage for child processes (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenUsageAverage {
m := metricMongodbatlasProcessCPUChildrenUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.children.usage.max")
m.data.SetDescription("CPU Usage for child processes (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenUsageMax {
m := metricMongodbatlasProcessCPUChildrenUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.normalized.usage.average")
m.data.SetDescription("CPU Usage, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUNormalizedUsageAverage {
m := metricMongodbatlasProcessCPUNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.normalized.usage.max")
m.data.SetDescription("CPU Usage, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUNormalizedUsageMax {
m := metricMongodbatlasProcessCPUNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.usage.average")
m.data.SetDescription("CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUUsageAverage {
m := metricMongodbatlasProcessCPUUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.usage.max")
m.data.SetDescription("CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUUsageMax {
m := metricMongodbatlasProcessCPUUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCursors struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cursors metric with initial data.
func (m *metricMongodbatlasProcessCursors) init() {
m.data.SetName("mongodbatlas.process.cursors")
m.data.SetDescription("Number of cursors")
m.data.SetUnit("{cursors}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cursorStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cursor_state", cursorStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCursors) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCursors) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCursors(cfg MetricConfig) metricMongodbatlasProcessCursors {
m := metricMongodbatlasProcessCursors{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbDocumentRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.document.rate metric with initial data.
func (m *metricMongodbatlasProcessDbDocumentRate) init() {
m.data.SetName("mongodbatlas.process.db.document.rate")
m.data.SetDescription("Document access rates")
m.data.SetUnit("{documents}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, documentStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("document_status", documentStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbDocumentRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbDocumentRate(cfg MetricConfig) metricMongodbatlasProcessDbDocumentRate {
m := metricMongodbatlasProcessDbDocumentRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbOperationsRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.operations.rate metric with initial data.
func (m *metricMongodbatlasProcessDbOperationsRate) init() {
m.data.SetName("mongodbatlas.process.db.operations.rate")
m.data.SetDescription("DB Operation Rates")
m.data.SetUnit("{operations}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("operation", operationAttributeValue)
dp.Attributes().PutStr("cluster_role", clusterRoleAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbOperationsRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbOperationsRate(cfg MetricConfig) metricMongodbatlasProcessDbOperationsRate {
m := metricMongodbatlasProcessDbOperationsRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbOperationsTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.operations.time metric with initial data.
func (m *metricMongodbatlasProcessDbOperationsTime) init() {
m.data.SetName("mongodbatlas.process.db.operations.time")
m.data.SetDescription("DB Operation Times")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, executionTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("execution_type", executionTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbOperationsTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbOperationsTime(cfg MetricConfig) metricMongodbatlasProcessDbOperationsTime {
m := metricMongodbatlasProcessDbOperationsTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbQueryExecutorScanned struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.query_executor.scanned metric with initial data.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) init() {
m.data.SetName("mongodbatlas.process.db.query_executor.scanned")
m.data.SetDescription("Scanned objects")
m.data.SetUnit("{objects}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("scanned_type", scannedTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbQueryExecutorScanned(cfg MetricConfig) metricMongodbatlasProcessDbQueryExecutorScanned {
m := metricMongodbatlasProcessDbQueryExecutorScanned{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbQueryTargetingScannedPerReturned struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.query_targeting.scanned_per_returned metric with initial data.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) init() {
m.data.SetName("mongodbatlas.process.db.query_targeting.scanned_per_returned")
m.data.SetDescription("Scanned objects per returned")
m.data.SetUnit("{scanned}/{returned}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("scanned_type", scannedTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(cfg MetricConfig) metricMongodbatlasProcessDbQueryTargetingScannedPerReturned {
m := metricMongodbatlasProcessDbQueryTargetingScannedPerReturned{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbStorage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.storage metric with initial data.
func (m *metricMongodbatlasProcessDbStorage) init() {
m.data.SetName("mongodbatlas.process.db.storage")
m.data.SetDescription("Storage used by the database")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, storageStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("storage_status", storageStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbStorage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbStorage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbStorage(cfg MetricConfig) metricMongodbatlasProcessDbStorage {
m := metricMongodbatlasProcessDbStorage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessGlobalLock struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.global_lock metric with initial data.
func (m *metricMongodbatlasProcessGlobalLock) init() {
m.data.SetName("mongodbatlas.process.global_lock")
m.data.SetDescription("Number and status of locks")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, globalLockStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("global_lock_state", globalLockStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessGlobalLock) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessGlobalLock(cfg MetricConfig) metricMongodbatlasProcessGlobalLock {
m := metricMongodbatlasProcessGlobalLock{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessIndexBtreeMissRatio struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.index.btree_miss_ratio metric with initial data.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) init() {
m.data.SetName("mongodbatlas.process.index.btree_miss_ratio")
m.data.SetDescription("Index miss ratio (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessIndexBtreeMissRatio(cfg MetricConfig) metricMongodbatlasProcessIndexBtreeMissRatio {
m := metricMongodbatlasProcessIndexBtreeMissRatio{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessIndexCounters struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.index.counters metric with initial data.
func (m *metricMongodbatlasProcessIndexCounters) init() {
m.data.SetName("mongodbatlas.process.index.counters")
m.data.SetDescription("Indexes")
m.data.SetUnit("{indexes}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("btree_counter_type", btreeCounterTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessIndexCounters) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessIndexCounters(cfg MetricConfig) metricMongodbatlasProcessIndexCounters {
m := metricMongodbatlasProcessIndexCounters{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingCommits struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.commits metric with initial data.
func (m *metricMongodbatlasProcessJournalingCommits) init() {
m.data.SetName("mongodbatlas.process.journaling.commits")
m.data.SetDescription("Journaling commits")
m.data.SetUnit("{commits}")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingCommits) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingCommits(cfg MetricConfig) metricMongodbatlasProcessJournalingCommits {
m := metricMongodbatlasProcessJournalingCommits{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingDataFiles struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.data_files metric with initial data.
func (m *metricMongodbatlasProcessJournalingDataFiles) init() {
m.data.SetName("mongodbatlas.process.journaling.data_files")
m.data.SetDescription("Data file sizes")
m.data.SetUnit("MiBy")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingDataFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingDataFiles) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingDataFiles(cfg MetricConfig) metricMongodbatlasProcessJournalingDataFiles {
m := metricMongodbatlasProcessJournalingDataFiles{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingWritten struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.written metric with initial data.
func (m *metricMongodbatlasProcessJournalingWritten) init() {
m.data.SetName("mongodbatlas.process.journaling.written")
m.data.SetDescription("Journals written")
m.data.SetUnit("MiBy")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingWritten) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingWritten) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingWritten(cfg MetricConfig) metricMongodbatlasProcessJournalingWritten {
m := metricMongodbatlasProcessJournalingWritten{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessMemoryUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.memory.usage metric with initial data.
func (m *metricMongodbatlasProcessMemoryUsage) init() {
m.data.SetName("mongodbatlas.process.memory.usage")
m.data.SetDescription("Memory Usage")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessMemoryUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessMemoryUsage(cfg MetricConfig) metricMongodbatlasProcessMemoryUsage {
m := metricMongodbatlasProcessMemoryUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessNetworkIo struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.network.io metric with initial data.
func (m *metricMongodbatlasProcessNetworkIo) init() {
m.data.SetName("mongodbatlas.process.network.io")
m.data.SetDescription("Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessNetworkIo) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessNetworkIo(cfg MetricConfig) metricMongodbatlasProcessNetworkIo {
m := metricMongodbatlasProcessNetworkIo{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessNetworkRequests struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.network.requests metric with initial data.
func (m *metricMongodbatlasProcessNetworkRequests) init() {
m.data.SetName("mongodbatlas.process.network.requests")
m.data.SetDescription("Network requests")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMongodbatlasProcessNetworkRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessNetworkRequests) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessNetworkRequests(cfg MetricConfig) metricMongodbatlasProcessNetworkRequests {
m := metricMongodbatlasProcessNetworkRequests{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessOplogRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.oplog.rate metric with initial data.
func (m *metricMongodbatlasProcessOplogRate) init() {
m.data.SetName("mongodbatlas.process.oplog.rate")
m.data.SetDescription("Execution rate by operation")
m.data.SetUnit("GiBy/h")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessOplogRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessOplogRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessOplogRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessOplogRate(cfg MetricConfig) metricMongodbatlasProcessOplogRate {
m := metricMongodbatlasProcessOplogRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessOplogTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.oplog.time metric with initial data.
func (m *metricMongodbatlasProcessOplogTime) init() {
m.data.SetName("mongodbatlas.process.oplog.time")
m.data.SetDescription("Execution time by operation")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, oplogTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("oplog_type", oplogTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessOplogTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessOplogTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessOplogTime(cfg MetricConfig) metricMongodbatlasProcessOplogTime {
m := metricMongodbatlasProcessOplogTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessPageFaults struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.page_faults metric with initial data.
func (m *metricMongodbatlasProcessPageFaults) init() {
m.data.SetName("mongodbatlas.process.page_faults")
m.data.SetDescription("Page faults")
m.data.SetUnit("{faults}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_issue_type", memoryIssueTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessPageFaults) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessPageFaults) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessPageFaults(cfg MetricConfig) metricMongodbatlasProcessPageFaults {
m := metricMongodbatlasProcessPageFaults{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessRestarts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.restarts metric with initial data.
func (m *metricMongodbatlasProcessRestarts) init() {
m.data.SetName("mongodbatlas.process.restarts")
m.data.SetDescription("Restarts in last hour")
m.data.SetUnit("{restarts}/h")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessRestarts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessRestarts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessRestarts(cfg MetricConfig) metricMongodbatlasProcessRestarts {
m := metricMongodbatlasProcessRestarts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessTickets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.tickets metric with initial data.
func (m *metricMongodbatlasProcessTickets) init() {
m.data.SetName("mongodbatlas.process.tickets")
m.data.SetDescription("Tickets")
m.data.SetUnit("{tickets}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, ticketTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("ticket_type", ticketTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessTickets) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessTickets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessTickets(cfg MetricConfig) metricMongodbatlasProcessTickets {
m := metricMongodbatlasProcessTickets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.system.cpu.normalized.usage.average")
m.data.SetDescription("System CPU Normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasSystemCPUNormalizedUsageAverage {
m := metricMongodbatlasSystemCPUNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.system.cpu.normalized.usage.max")
m.data.SetDescription("System CPU Normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasSystemCPUNormalizedUsageMax {
m := metricMongodbatlasSystemCPUNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.usage.average metric with initial data.
func (m *metricMongodbatlasSystemCPUUsageAverage) init() {
m.data.SetName("mongodbatlas.system.cpu.usage.average")
m.data.SetDescription("System CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUUsageAverage(cfg MetricConfig) metricMongodbatlasSystemCPUUsageAverage {
m := metricMongodbatlasSystemCPUUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.usage.max metric with initial data.
func (m *metricMongodbatlasSystemCPUUsageMax) init() {
m.data.SetName("mongodbatlas.system.cpu.usage.max")
m.data.SetDescription("System CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUUsageMax(cfg MetricConfig) metricMongodbatlasSystemCPUUsageMax {
m := metricMongodbatlasSystemCPUUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsCPUNormalizedUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.cpu.normalized.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) init() {
m.data.SetName("mongodbatlas.system.fts.cpu.normalized.usage")
m.data.SetDescription("Full text search disk usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsCPUNormalizedUsage(cfg MetricConfig) metricMongodbatlasSystemFtsCPUNormalizedUsage {
m := metricMongodbatlasSystemFtsCPUNormalizedUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsCPUUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.cpu.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsCPUUsage) init() {
m.data.SetName("mongodbatlas.system.fts.cpu.usage")
m.data.SetDescription("Full-text search (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsCPUUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsCPUUsage(cfg MetricConfig) metricMongodbatlasSystemFtsCPUUsage {
m := metricMongodbatlasSystemFtsCPUUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsDiskUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.disk.used metric with initial data.
func (m *metricMongodbatlasSystemFtsDiskUsed) init() {
m.data.SetName("mongodbatlas.system.fts.disk.used")
m.data.SetDescription("Full text search disk usage")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasSystemFtsDiskUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsDiskUsed) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsDiskUsed(cfg MetricConfig) metricMongodbatlasSystemFtsDiskUsed {
m := metricMongodbatlasSystemFtsDiskUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsMemoryUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.memory.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsMemoryUsage) init() {
m.data.SetName("mongodbatlas.system.fts.memory.usage")
m.data.SetDescription("Full-text search")
m.data.SetUnit("MiBy")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsMemoryUsage) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsMemoryUsage(cfg MetricConfig) metricMongodbatlasSystemFtsMemoryUsage {
m := metricMongodbatlasSystemFtsMemoryUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemMemoryUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.memory.usage.average metric with initial data.
func (m *metricMongodbatlasSystemMemoryUsageAverage) init() {
m.data.SetName("mongodbatlas.system.memory.usage.average")
m.data.SetDescription("System Memory Usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_status", memoryStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemMemoryUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemMemoryUsageAverage(cfg MetricConfig) metricMongodbatlasSystemMemoryUsageAverage {
m := metricMongodbatlasSystemMemoryUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemMemoryUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.memory.usage.max metric with initial data.
func (m *metricMongodbatlasSystemMemoryUsageMax) init() {
m.data.SetName("mongodbatlas.system.memory.usage.max")
m.data.SetDescription("System Memory Usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_status", memoryStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemMemoryUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemMemoryUsageMax(cfg MetricConfig) metricMongodbatlasSystemMemoryUsageMax {
m := metricMongodbatlasSystemMemoryUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemNetworkIoAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.network.io.average metric with initial data.
func (m *metricMongodbatlasSystemNetworkIoAverage) init() {
m.data.SetName("mongodbatlas.system.network.io.average")
m.data.SetDescription("System Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemNetworkIoAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemNetworkIoAverage(cfg MetricConfig) metricMongodbatlasSystemNetworkIoAverage {
m := metricMongodbatlasSystemNetworkIoAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemNetworkIoMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.network.io.max metric with initial data.
func (m *metricMongodbatlasSystemNetworkIoMax) init() {
m.data.SetName("mongodbatlas.system.network.io.max")
m.data.SetDescription("System Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemNetworkIoMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemNetworkIoMax(cfg MetricConfig) metricMongodbatlasSystemNetworkIoMax {
m := metricMongodbatlasSystemNetworkIoMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingIoAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.io.average metric with initial data.
func (m *metricMongodbatlasSystemPagingIoAverage) init() {
m.data.SetName("mongodbatlas.system.paging.io.average")
m.data.SetDescription("Swap IO")
m.data.SetUnit("{pages}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingIoAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingIoAverage(cfg MetricConfig) metricMongodbatlasSystemPagingIoAverage {
m := metricMongodbatlasSystemPagingIoAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingIoMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.io.max metric with initial data.
func (m *metricMongodbatlasSystemPagingIoMax) init() {
m.data.SetName("mongodbatlas.system.paging.io.max")
m.data.SetDescription("Swap IO")
m.data.SetUnit("{pages}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingIoMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingIoMax(cfg MetricConfig) metricMongodbatlasSystemPagingIoMax {
m := metricMongodbatlasSystemPagingIoMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.usage.average metric with initial data.
func (m *metricMongodbatlasSystemPagingUsageAverage) init() {
m.data.SetName("mongodbatlas.system.paging.usage.average")
m.data.SetDescription("Swap usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingUsageAverage(cfg MetricConfig) metricMongodbatlasSystemPagingUsageAverage {
m := metricMongodbatlasSystemPagingUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.usage.max metric with initial data.
func (m *metricMongodbatlasSystemPagingUsageMax) init() {
m.data.SetName("mongodbatlas.system.paging.usage.max")
m.data.SetDescription("Swap usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingUsageMax(cfg MetricConfig) metricMongodbatlasSystemPagingUsageMax {
m := metricMongodbatlasSystemPagingUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricMongodbatlasDbCounts metricMongodbatlasDbCounts
metricMongodbatlasDbSize metricMongodbatlasDbSize
metricMongodbatlasDiskPartitionIopsAverage metricMongodbatlasDiskPartitionIopsAverage
metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionIopsMax
metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyAverage
metricMongodbatlasDiskPartitionLatencyMax metricMongodbatlasDiskPartitionLatencyMax
metricMongodbatlasDiskPartitionQueueDepth metricMongodbatlasDiskPartitionQueueDepth
metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceAverage
metricMongodbatlasDiskPartitionSpaceMax metricMongodbatlasDiskPartitionSpaceMax
metricMongodbatlasDiskPartitionThroughput metricMongodbatlasDiskPartitionThroughput
metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageAverage
metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUsageMax
metricMongodbatlasDiskPartitionUtilizationAverage metricMongodbatlasDiskPartitionUtilizationAverage
metricMongodbatlasDiskPartitionUtilizationMax metricMongodbatlasDiskPartitionUtilizationMax
metricMongodbatlasProcessAsserts metricMongodbatlasProcessAsserts
metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessBackgroundFlush
metricMongodbatlasProcessCacheIo metricMongodbatlasProcessCacheIo
metricMongodbatlasProcessCacheRatio metricMongodbatlasProcessCacheRatio
metricMongodbatlasProcessCacheSize metricMongodbatlasProcessCacheSize
metricMongodbatlasProcessConnections metricMongodbatlasProcessConnections
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage
metricMongodbatlasProcessCPUChildrenNormalizedUsageMax metricMongodbatlasProcessCPUChildrenNormalizedUsageMax
metricMongodbatlasProcessCPUChildrenUsageAverage metricMongodbatlasProcessCPUChildrenUsageAverage
metricMongodbatlasProcessCPUChildrenUsageMax metricMongodbatlasProcessCPUChildrenUsageMax
metricMongodbatlasProcessCPUNormalizedUsageAverage metricMongodbatlasProcessCPUNormalizedUsageAverage
metricMongodbatlasProcessCPUNormalizedUsageMax metricMongodbatlasProcessCPUNormalizedUsageMax
metricMongodbatlasProcessCPUUsageAverage metricMongodbatlasProcessCPUUsageAverage
metricMongodbatlasProcessCPUUsageMax metricMongodbatlasProcessCPUUsageMax
metricMongodbatlasProcessCursors metricMongodbatlasProcessCursors
metricMongodbatlasProcessDbDocumentRate metricMongodbatlasProcessDbDocumentRate
metricMongodbatlasProcessDbOperationsRate metricMongodbatlasProcessDbOperationsRate
metricMongodbatlasProcessDbOperationsTime metricMongodbatlasProcessDbOperationsTime
metricMongodbatlasProcessDbQueryExecutorScanned metricMongodbatlasProcessDbQueryExecutorScanned
metricMongodbatlasProcessDbQueryTargetingScannedPerReturned metricMongodbatlasProcessDbQueryTargetingScannedPerReturned
metricMongodbatlasProcessDbStorage metricMongodbatlasProcessDbStorage
metricMongodbatlasProcessGlobalLock metricMongodbatlasProcessGlobalLock
metricMongodbatlasProcessIndexBtreeMissRatio metricMongodbatlasProcessIndexBtreeMissRatio
metricMongodbatlasProcessIndexCounters metricMongodbatlasProcessIndexCounters
metricMongodbatlasProcessJournalingCommits metricMongodbatlasProcessJournalingCommits
metricMongodbatlasProcessJournalingDataFiles metricMongodbatlasProcessJournalingDataFiles
metricMongodbatlasProcessJournalingWritten metricMongodbatlasProcessJournalingWritten
metricMongodbatlasProcessMemoryUsage metricMongodbatlasProcessMemoryUsage
metricMongodbatlasProcessNetworkIo metricMongodbatlasProcessNetworkIo
metricMongodbatlasProcessNetworkRequests metricMongodbatlasProcessNetworkRequests
metricMongodbatlasProcessOplogRate metricMongodbatlasProcessOplogRate
metricMongodbatlasProcessOplogTime metricMongodbatlasProcessOplogTime
metricMongodbatlasProcessPageFaults metricMongodbatlasProcessPageFaults
metricMongodbatlasProcessRestarts metricMongodbatlasProcessRestarts
metricMongodbatlasProcessTickets metricMongodbatlasProcessTickets
metricMongodbatlasSystemCPUNormalizedUsageAverage metricMongodbatlasSystemCPUNormalizedUsageAverage
metricMongodbatlasSystemCPUNormalizedUsageMax metricMongodbatlasSystemCPUNormalizedUsageMax
metricMongodbatlasSystemCPUUsageAverage metricMongodbatlasSystemCPUUsageAverage
metricMongodbatlasSystemCPUUsageMax metricMongodbatlasSystemCPUUsageMax
metricMongodbatlasSystemFtsCPUNormalizedUsage metricMongodbatlasSystemFtsCPUNormalizedUsage
metricMongodbatlasSystemFtsCPUUsage metricMongodbatlasSystemFtsCPUUsage
metricMongodbatlasSystemFtsDiskUsed metricMongodbatlasSystemFtsDiskUsed
metricMongodbatlasSystemFtsMemoryUsage metricMongodbatlasSystemFtsMemoryUsage
metricMongodbatlasSystemMemoryUsageAverage metricMongodbatlasSystemMemoryUsageAverage
metricMongodbatlasSystemMemoryUsageMax metricMongodbatlasSystemMemoryUsageMax
metricMongodbatlasSystemNetworkIoAverage metricMongodbatlasSystemNetworkIoAverage
metricMongodbatlasSystemNetworkIoMax metricMongodbatlasSystemNetworkIoMax
metricMongodbatlasSystemPagingIoAverage metricMongodbatlasSystemPagingIoAverage
metricMongodbatlasSystemPagingIoMax metricMongodbatlasSystemPagingIoMax
metricMongodbatlasSystemPagingUsageAverage metricMongodbatlasSystemPagingUsageAverage
metricMongodbatlasSystemPagingUsageMax metricMongodbatlasSystemPagingUsageMax
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricMongodbatlasDbCounts: newMetricMongodbatlasDbCounts(mbc.Metrics.MongodbatlasDbCounts),
metricMongodbatlasDbSize: newMetricMongodbatlasDbSize(mbc.Metrics.MongodbatlasDbSize),
metricMongodbatlasDiskPartitionIopsAverage: newMetricMongodbatlasDiskPartitionIopsAverage(mbc.Metrics.MongodbatlasDiskPartitionIopsAverage),
metricMongodbatlasDiskPartitionIopsMax: newMetricMongodbatlasDiskPartitionIopsMax(mbc.Metrics.MongodbatlasDiskPartitionIopsMax),
metricMongodbatlasDiskPartitionLatencyAverage: newMetricMongodbatlasDiskPartitionLatencyAverage(mbc.Metrics.MongodbatlasDiskPartitionLatencyAverage),
metricMongodbatlasDiskPartitionLatencyMax: newMetricMongodbatlasDiskPartitionLatencyMax(mbc.Metrics.MongodbatlasDiskPartitionLatencyMax),
metricMongodbatlasDiskPartitionQueueDepth: newMetricMongodbatlasDiskPartitionQueueDepth(mbc.Metrics.MongodbatlasDiskPartitionQueueDepth),
metricMongodbatlasDiskPartitionSpaceAverage: newMetricMongodbatlasDiskPartitionSpaceAverage(mbc.Metrics.MongodbatlasDiskPartitionSpaceAverage),
metricMongodbatlasDiskPartitionSpaceMax: newMetricMongodbatlasDiskPartitionSpaceMax(mbc.Metrics.MongodbatlasDiskPartitionSpaceMax),
metricMongodbatlasDiskPartitionThroughput: newMetricMongodbatlasDiskPartitionThroughput(mbc.Metrics.MongodbatlasDiskPartitionThroughput),
metricMongodbatlasDiskPartitionUsageAverage: newMetricMongodbatlasDiskPartitionUsageAverage(mbc.Metrics.MongodbatlasDiskPartitionUsageAverage),
metricMongodbatlasDiskPartitionUsageMax: newMetricMongodbatlasDiskPartitionUsageMax(mbc.Metrics.MongodbatlasDiskPartitionUsageMax),
metricMongodbatlasDiskPartitionUtilizationAverage: newMetricMongodbatlasDiskPartitionUtilizationAverage(mbc.Metrics.MongodbatlasDiskPartitionUtilizationAverage),
metricMongodbatlasDiskPartitionUtilizationMax: newMetricMongodbatlasDiskPartitionUtilizationMax(mbc.Metrics.MongodbatlasDiskPartitionUtilizationMax),
metricMongodbatlasProcessAsserts: newMetricMongodbatlasProcessAsserts(mbc.Metrics.MongodbatlasProcessAsserts),
metricMongodbatlasProcessBackgroundFlush: newMetricMongodbatlasProcessBackgroundFlush(mbc.Metrics.MongodbatlasProcessBackgroundFlush),
metricMongodbatlasProcessCacheIo: newMetricMongodbatlasProcessCacheIo(mbc.Metrics.MongodbatlasProcessCacheIo),
metricMongodbatlasProcessCacheRatio: newMetricMongodbatlasProcessCacheRatio(mbc.Metrics.MongodbatlasProcessCacheRatio),
metricMongodbatlasProcessCacheSize: newMetricMongodbatlasProcessCacheSize(mbc.Metrics.MongodbatlasProcessCacheSize),
metricMongodbatlasProcessConnections: newMetricMongodbatlasProcessConnections(mbc.Metrics.MongodbatlasProcessConnections),
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage),
metricMongodbatlasProcessCPUChildrenNormalizedUsageMax: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageMax),
metricMongodbatlasProcessCPUChildrenUsageAverage: newMetricMongodbatlasProcessCPUChildrenUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenUsageAverage),
metricMongodbatlasProcessCPUChildrenUsageMax: newMetricMongodbatlasProcessCPUChildrenUsageMax(mbc.Metrics.MongodbatlasProcessCPUChildrenUsageMax),
metricMongodbatlasProcessCPUNormalizedUsageAverage: newMetricMongodbatlasProcessCPUNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUNormalizedUsageAverage),
metricMongodbatlasProcessCPUNormalizedUsageMax: newMetricMongodbatlasProcessCPUNormalizedUsageMax(mbc.Metrics.MongodbatlasProcessCPUNormalizedUsageMax),
metricMongodbatlasProcessCPUUsageAverage: newMetricMongodbatlasProcessCPUUsageAverage(mbc.Metrics.MongodbatlasProcessCPUUsageAverage),
metricMongodbatlasProcessCPUUsageMax: newMetricMongodbatlasProcessCPUUsageMax(mbc.Metrics.MongodbatlasProcessCPUUsageMax),
metricMongodbatlasProcessCursors: newMetricMongodbatlasProcessCursors(mbc.Metrics.MongodbatlasProcessCursors),
metricMongodbatlasProcessDbDocumentRate: newMetricMongodbatlasProcessDbDocumentRate(mbc.Metrics.MongodbatlasProcessDbDocumentRate),
metricMongodbatlasProcessDbOperationsRate: newMetricMongodbatlasProcessDbOperationsRate(mbc.Metrics.MongodbatlasProcessDbOperationsRate),
metricMongodbatlasProcessDbOperationsTime: newMetricMongodbatlasProcessDbOperationsTime(mbc.Metrics.MongodbatlasProcessDbOperationsTime),
metricMongodbatlasProcessDbQueryExecutorScanned: newMetricMongodbatlasProcessDbQueryExecutorScanned(mbc.Metrics.MongodbatlasProcessDbQueryExecutorScanned),
metricMongodbatlasProcessDbQueryTargetingScannedPerReturned: newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(mbc.Metrics.MongodbatlasProcessDbQueryTargetingScannedPerReturned),
metricMongodbatlasProcessDbStorage: newMetricMongodbatlasProcessDbStorage(mbc.Metrics.MongodbatlasProcessDbStorage),
metricMongodbatlasProcessGlobalLock: newMetricMongodbatlasProcessGlobalLock(mbc.Metrics.MongodbatlasProcessGlobalLock),
metricMongodbatlasProcessIndexBtreeMissRatio: newMetricMongodbatlasProcessIndexBtreeMissRatio(mbc.Metrics.MongodbatlasProcessIndexBtreeMissRatio),
metricMongodbatlasProcessIndexCounters: newMetricMongodbatlasProcessIndexCounters(mbc.Metrics.MongodbatlasProcessIndexCounters),
metricMongodbatlasProcessJournalingCommits: newMetricMongodbatlasProcessJournalingCommits(mbc.Metrics.MongodbatlasProcessJournalingCommits),
metricMongodbatlasProcessJournalingDataFiles: newMetricMongodbatlasProcessJournalingDataFiles(mbc.Metrics.MongodbatlasProcessJournalingDataFiles),
metricMongodbatlasProcessJournalingWritten: newMetricMongodbatlasProcessJournalingWritten(mbc.Metrics.MongodbatlasProcessJournalingWritten),
metricMongodbatlasProcessMemoryUsage: newMetricMongodbatlasProcessMemoryUsage(mbc.Metrics.MongodbatlasProcessMemoryUsage),
metricMongodbatlasProcessNetworkIo: newMetricMongodbatlasProcessNetworkIo(mbc.Metrics.MongodbatlasProcessNetworkIo),
metricMongodbatlasProcessNetworkRequests: newMetricMongodbatlasProcessNetworkRequests(mbc.Metrics.MongodbatlasProcessNetworkRequests),
metricMongodbatlasProcessOplogRate: newMetricMongodbatlasProcessOplogRate(mbc.Metrics.MongodbatlasProcessOplogRate),
metricMongodbatlasProcessOplogTime: newMetricMongodbatlasProcessOplogTime(mbc.Metrics.MongodbatlasProcessOplogTime),
metricMongodbatlasProcessPageFaults: newMetricMongodbatlasProcessPageFaults(mbc.Metrics.MongodbatlasProcessPageFaults),
metricMongodbatlasProcessRestarts: newMetricMongodbatlasProcessRestarts(mbc.Metrics.MongodbatlasProcessRestarts),
metricMongodbatlasProcessTickets: newMetricMongodbatlasProcessTickets(mbc.Metrics.MongodbatlasProcessTickets),
metricMongodbatlasSystemCPUNormalizedUsageAverage: newMetricMongodbatlasSystemCPUNormalizedUsageAverage(mbc.Metrics.MongodbatlasSystemCPUNormalizedUsageAverage),
metricMongodbatlasSystemCPUNormalizedUsageMax: newMetricMongodbatlasSystemCPUNormalizedUsageMax(mbc.Metrics.MongodbatlasSystemCPUNormalizedUsageMax),
metricMongodbatlasSystemCPUUsageAverage: newMetricMongodbatlasSystemCPUUsageAverage(mbc.Metrics.MongodbatlasSystemCPUUsageAverage),
metricMongodbatlasSystemCPUUsageMax: newMetricMongodbatlasSystemCPUUsageMax(mbc.Metrics.MongodbatlasSystemCPUUsageMax),
metricMongodbatlasSystemFtsCPUNormalizedUsage: newMetricMongodbatlasSystemFtsCPUNormalizedUsage(mbc.Metrics.MongodbatlasSystemFtsCPUNormalizedUsage),
metricMongodbatlasSystemFtsCPUUsage: newMetricMongodbatlasSystemFtsCPUUsage(mbc.Metrics.MongodbatlasSystemFtsCPUUsage),
metricMongodbatlasSystemFtsDiskUsed: newMetricMongodbatlasSystemFtsDiskUsed(mbc.Metrics.MongodbatlasSystemFtsDiskUsed),
metricMongodbatlasSystemFtsMemoryUsage: newMetricMongodbatlasSystemFtsMemoryUsage(mbc.Metrics.MongodbatlasSystemFtsMemoryUsage),
metricMongodbatlasSystemMemoryUsageAverage: newMetricMongodbatlasSystemMemoryUsageAverage(mbc.Metrics.MongodbatlasSystemMemoryUsageAverage),
metricMongodbatlasSystemMemoryUsageMax: newMetricMongodbatlasSystemMemoryUsageMax(mbc.Metrics.MongodbatlasSystemMemoryUsageMax),
metricMongodbatlasSystemNetworkIoAverage: newMetricMongodbatlasSystemNetworkIoAverage(mbc.Metrics.MongodbatlasSystemNetworkIoAverage),
metricMongodbatlasSystemNetworkIoMax: newMetricMongodbatlasSystemNetworkIoMax(mbc.Metrics.MongodbatlasSystemNetworkIoMax),
metricMongodbatlasSystemPagingIoAverage: newMetricMongodbatlasSystemPagingIoAverage(mbc.Metrics.MongodbatlasSystemPagingIoAverage),
metricMongodbatlasSystemPagingIoMax: newMetricMongodbatlasSystemPagingIoMax(mbc.Metrics.MongodbatlasSystemPagingIoMax),
metricMongodbatlasSystemPagingUsageAverage: newMetricMongodbatlasSystemPagingUsageAverage(mbc.Metrics.MongodbatlasSystemPagingUsageAverage),
metricMongodbatlasSystemPagingUsageMax: newMetricMongodbatlasSystemPagingUsageMax(mbc.Metrics.MongodbatlasSystemPagingUsageMax),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasDbName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.db.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDbName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasDbName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.db.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDbName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.disk.partition"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.disk.partition"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasHostName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.host.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasHostName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasHostName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.host.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasHostName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.org_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.org_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.port"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.port"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.type_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.type_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.project.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.project.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.project.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.project.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.provider.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.provider.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.region.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.region.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.user.alias"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.user.alias"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver")
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricMongodbatlasDbCounts.emit(ils.Metrics())
mb.metricMongodbatlasDbSize.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionIopsAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionQueueDepth.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionThroughput.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUtilizationMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics())
mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheRatio.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics())
mb.metricMongodbatlasProcessConnections.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCursors.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbDocumentRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbOperationsRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbOperationsTime.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbQueryExecutorScanned.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbStorage.emit(ils.Metrics())
mb.metricMongodbatlasProcessGlobalLock.emit(ils.Metrics())
mb.metricMongodbatlasProcessIndexBtreeMissRatio.emit(ils.Metrics())
mb.metricMongodbatlasProcessIndexCounters.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingCommits.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingDataFiles.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingWritten.emit(ils.Metrics())
mb.metricMongodbatlasProcessMemoryUsage.emit(ils.Metrics())
mb.metricMongodbatlasProcessNetworkIo.emit(ils.Metrics())
mb.metricMongodbatlasProcessNetworkRequests.emit(ils.Metrics())
mb.metricMongodbatlasProcessOplogRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessOplogTime.emit(ils.Metrics())
mb.metricMongodbatlasProcessPageFaults.emit(ils.Metrics())
mb.metricMongodbatlasProcessRestarts.emit(ils.Metrics())
mb.metricMongodbatlasProcessTickets.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsCPUUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsDiskUsed.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsMemoryUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemMemoryUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemMemoryUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemNetworkIoAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemNetworkIoMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingIoAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingIoMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingUsageMax.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordMongodbatlasDbCountsDataPoint adds a data point to mongodbatlas.db.counts metric.
func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDbSizeDataPoint adds a data point to mongodbatlas.db.size metric.
func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsAverageDataPoint adds a data point to mongodbatlas.disk.partition.iops.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsMaxDataPoint adds a data point to mongodbatlas.disk.partition.iops.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyAverageDataPoint adds a data point to mongodbatlas.disk.partition.latency.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyMaxDataPoint adds a data point to mongodbatlas.disk.partition.latency.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionQueueDepthDataPoint adds a data point to mongodbatlas.disk.partition.queue.depth metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionQueueDepth.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionSpaceMaxDataPoint adds a data point to mongodbatlas.disk.partition.space.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionThroughputDataPoint adds a data point to mongodbatlas.disk.partition.throughput metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionThroughputDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionThroughput.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageMaxDataPoint adds a data point to mongodbatlas.disk.partition.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint adds a data point to mongodbatlas.disk.partition.utilization.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint adds a data point to mongodbatlas.disk.partition.utilization.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessAssertsDataPoint adds a data point to mongodbatlas.process.asserts metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pcommon.Timestamp, val float64, assertTypeAttributeValue AttributeAssertType) {
mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue.String())
}
// RecordMongodbatlasProcessBackgroundFlushDataPoint adds a data point to mongodbatlas.process.background_flush metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessBackgroundFlush.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessCacheIoDataPoint adds a data point to mongodbatlas.process.cache.io metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue AttributeCacheDirection) {
mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String())
}
// RecordMongodbatlasProcessCacheRatioDataPoint adds a data point to mongodbatlas.process.cache.ratio metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheRatioDataPoint(ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue AttributeCacheRatioType) {
mb.metricMongodbatlasProcessCacheRatio.recordDataPoint(mb.startTime, ts, val, cacheRatioTypeAttributeValue.String())
}
// RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) {
mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
}
// RecordMongodbatlasProcessConnectionsDataPoint adds a data point to mongodbatlas.process.connections metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessConnections.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCursorsDataPoint adds a data point to mongodbatlas.process.cursors metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pcommon.Timestamp, val float64, cursorStateAttributeValue AttributeCursorState) {
mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue.String())
}
// RecordMongodbatlasProcessDbDocumentRateDataPoint adds a data point to mongodbatlas.process.db.document.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pcommon.Timestamp, val float64, documentStatusAttributeValue AttributeDocumentStatus) {
mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsRateDataPoint adds a data point to mongodbatlas.process.db.operations.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue AttributeOperation, clusterRoleAttributeValue AttributeClusterRole) {
mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), clusterRoleAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsTimeDataPoint adds a data point to mongodbatlas.process.db.operations.time metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pcommon.Timestamp, val float64, executionTypeAttributeValue AttributeExecutionType) {
mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint adds a data point to mongodbatlas.process.db.query_executor.scanned metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint adds a data point to mongodbatlas.process.db.query_targeting.scanned_per_returned metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbStorageDataPoint adds a data point to mongodbatlas.process.db.storage metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pcommon.Timestamp, val float64, storageStatusAttributeValue AttributeStorageStatus) {
mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue.String())
}
// RecordMongodbatlasProcessGlobalLockDataPoint adds a data point to mongodbatlas.process.global_lock metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pcommon.Timestamp, val float64, globalLockStateAttributeValue AttributeGlobalLockState) {
mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue.String())
}
// RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint adds a data point to mongodbatlas.process.index.btree_miss_ratio metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessIndexBtreeMissRatio.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessIndexCountersDataPoint adds a data point to mongodbatlas.process.index.counters metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue AttributeBtreeCounterType) {
mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue.String())
}
// RecordMongodbatlasProcessJournalingCommitsDataPoint adds a data point to mongodbatlas.process.journaling.commits metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingCommitsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingCommits.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessJournalingDataFilesDataPoint adds a data point to mongodbatlas.process.journaling.data_files metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingDataFiles.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessJournalingWrittenDataPoint adds a data point to mongodbatlas.process.journaling.written metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingWritten.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessMemoryUsageDataPoint adds a data point to mongodbatlas.process.memory.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkIoDataPoint adds a data point to mongodbatlas.process.network.io metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkRequestsDataPoint adds a data point to mongodbatlas.process.network.requests metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkRequestsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessNetworkRequests.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessOplogRateDataPoint adds a data point to mongodbatlas.process.oplog.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessOplogRate.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessOplogTimeDataPoint adds a data point to mongodbatlas.process.oplog.time metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pcommon.Timestamp, val float64, oplogTypeAttributeValue AttributeOplogType) {
mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue.String())
}
// RecordMongodbatlasProcessPageFaultsDataPoint adds a data point to mongodbatlas.process.page_faults metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue AttributeMemoryIssueType) {
mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue.String())
}
// RecordMongodbatlasProcessRestartsDataPoint adds a data point to mongodbatlas.process.restarts metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessRestarts.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessTicketsDataPoint adds a data point to mongodbatlas.process.tickets metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pcommon.Timestamp, val float64, ticketTypeAttributeValue AttributeTicketType) {
mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.normalized.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsDiskUsedDataPoint adds a data point to mongodbatlas.system.fts.disk.used metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasSystemFtsDiskUsed.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasSystemFtsMemoryUsageDataPoint adds a data point to mongodbatlas.system.fts.memory.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageAverageDataPoint adds a data point to mongodbatlas.system.memory.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageMaxDataPoint adds a data point to mongodbatlas.system.memory.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoAverageDataPoint adds a data point to mongodbatlas.system.network.io.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoMaxDataPoint adds a data point to mongodbatlas.system.network.io.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoAverageDataPoint adds a data point to mongodbatlas.system.paging.io.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoMaxDataPoint adds a data point to mongodbatlas.system.paging.io.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageAverageDataPoint adds a data point to mongodbatlas.system.paging.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageMaxDataPoint adds a data point to mongodbatlas.system.paging.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml.
// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines.
type ResourceBuilder struct {
config ResourceAttributesConfig
res pcommon.Resource
}
// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application.
func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder {
return &ResourceBuilder{
config: rac,
res: pcommon.NewResource(),
}
}
// SetMongodbAtlasClusterName sets provided value as "mongodb_atlas.cluster.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasClusterName(val string) {
if rb.config.MongodbAtlasClusterName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.cluster.name", val)
}
}
// SetMongodbAtlasDbName sets provided value as "mongodb_atlas.db.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasDbName(val string) {
if rb.config.MongodbAtlasDbName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.db.name", val)
}
}
// SetMongodbAtlasDiskPartition sets provided value as "mongodb_atlas.disk.partition" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasDiskPartition(val string) {
if rb.config.MongodbAtlasDiskPartition.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.disk.partition", val)
}
}
// SetMongodbAtlasHostName sets provided value as "mongodb_atlas.host.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasHostName(val string) {
if rb.config.MongodbAtlasHostName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.host.name", val)
}
}
// SetMongodbAtlasOrgName sets provided value as "mongodb_atlas.org_name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasOrgName(val string) {
if rb.config.MongodbAtlasOrgName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.org_name", val)
}
}
// SetMongodbAtlasProcessID sets provided value as "mongodb_atlas.process.id" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessID(val string) {
if rb.config.MongodbAtlasProcessID.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.id", val)
}
}
// SetMongodbAtlasProcessPort sets provided value as "mongodb_atlas.process.port" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessPort(val string) {
if rb.config.MongodbAtlasProcessPort.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.port", val)
}
}
// SetMongodbAtlasProcessTypeName sets provided value as "mongodb_atlas.process.type_name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessTypeName(val string) {
if rb.config.MongodbAtlasProcessTypeName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.type_name", val)
}
}
// SetMongodbAtlasProjectID sets provided value as "mongodb_atlas.project.id" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProjectID(val string) {
if rb.config.MongodbAtlasProjectID.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.project.id", val)
}
}
// SetMongodbAtlasProjectName sets provided value as "mongodb_atlas.project.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProjectName(val string) {
if rb.config.MongodbAtlasProjectName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.project.name", val)
}
}
// SetMongodbAtlasProviderName sets provided value as "mongodb_atlas.provider.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProviderName(val string) {
if rb.config.MongodbAtlasProviderName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.provider.name", val)
}
}
// SetMongodbAtlasRegionName sets provided value as "mongodb_atlas.region.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasRegionName(val string) {
if rb.config.MongodbAtlasRegionName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.region.name", val)
}
}
// SetMongodbAtlasUserAlias sets provided value as "mongodb_atlas.user.alias" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasUserAlias(val string) {
if rb.config.MongodbAtlasUserAlias.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.user.alias", val)
}
}
// Emit returns the built resource and resets the internal builder state.
func (rb *ResourceBuilder) Emit() pcommon.Resource {
r := rb.res
rb.res = pcommon.NewResource()
return r
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
import (
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// metricRecordFunc records the data point to the metric builder at the supplied timestamp
type metricRecordFunc func(*MetricsBuilder, *mongodbatlas.DataPoints, pcommon.Timestamp)
// getRecordFunc returns the metricRecordFunc that matches the metric name. Nil if none is found.
func getRecordFunc(metricName string) metricRecordFunc {
switch metricName {
// MongoDB CPU usage. For hosts with more than one CPU core, these values can exceed 100%.
case "PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// MongoDB CPU usage scaled to a range of 0% to 100%. Atlas computes this value by dividing by the number of CPU cores.
case "PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
// Context: Process
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Rate of asserts for a MongoDB process found in the asserts document that the serverStatus command generates.
case "ASSERT_REGULAR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeRegular)
}
case "ASSERT_WARNING":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeWarning)
}
case "ASSERT_MSG":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeMsg)
}
case "ASSERT_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeUser)
}
// Amount of data flushed in the background.
case "BACKGROUND_FLUSH_AVG":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessBackgroundFlushDataPoint(ts, float64(*dp.Value))
}
// Amount of bytes in the WiredTiger storage engine cache and tickets found in the wiredTiger.cache and wiredTiger.concurrentTransactions documents that the serverStatus command generates.
case "CACHE_BYTES_READ_INTO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionReadInto)
}
case "CACHE_BYTES_WRITTEN_FROM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionWrittenFrom)
}
case "CACHE_DIRTY_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusDirty)
}
case "CACHE_USED_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed)
}
case "CACHE_FILL_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeCacheFill)
}
case "DIRTY_FILL_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeDirtyFill)
}
case "TICKETS_AVAILABLE_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads)
}
case "TICKETS_AVAILABLE_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableWrites)
}
// Number of connections to a MongoDB process found in the connections document that the serverStatus command generates.
case "CONNECTIONS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessConnectionsDataPoint(ts, float64(*dp.Value))
}
// Number of cursors for a MongoDB process found in the metrics.cursor document that the serverStatus command generates.
case "CURSORS_TOTAL_OPEN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateOpen)
}
case "CURSORS_TOTAL_TIMED_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateTimedOut)
}
// Numbers of Memory Issues and Page Faults for a MongoDB process.
case "EXTRA_INFO_PAGE_FAULTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExtraInfo)
}
case "GLOBAL_ACCESSES_NOT_IN_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeGlobalAccessesNotInMemory)
}
case "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExceptionsThrown)
}
// Number of operations waiting on locks for the MongoDB process that the serverStatus command generates. Cloud Manager computes these values based on the type of storage engine.
case "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueTotal)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_READERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueReaders)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueWriters)
}
// Number of index btree operations.
case "INDEX_COUNTERS_BTREE_ACCESSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeAccesses)
}
case "INDEX_COUNTERS_BTREE_HITS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeHits)
}
case "INDEX_COUNTERS_BTREE_MISSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeMisses)
}
case "INDEX_COUNTERS_BTREE_MISS_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts, float64(*dp.Value))
}
// Number of journaling operations.
case "JOURNALING_COMMITS_IN_WRITE_LOCK":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingCommitsDataPoint(ts, float64(*dp.Value))
}
case "JOURNALING_MB":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingWrittenDataPoint(ts, float64(*dp.Value))
}
case "JOURNALING_WRITE_DATA_FILES_MB":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts, float64(*dp.Value))
}
// Amount of memory for a MongoDB process found in the mem document that the serverStatus command collects.
case "MEMORY_RESIDENT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "MEMORY_VIRTUAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
case "COMPUTED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateComputed)
}
// Amount of throughput for MongoDB process found in the network document that the serverStatus command collects.
case "NETWORK_BYTES_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "NETWORK_BYTES_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "NETWORK_NUM_REQUESTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkRequestsDataPoint(ts, float64(*dp.Value))
}
// Durations and throughput of the MongoDB process' oplog.
case "OPLOG_SLAVE_LAG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeSlaveLagMasterTime)
}
case "OPLOG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterTime)
}
case "OPLOG_MASTER_LAG_TIME_DIFF":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterLagTimeDiff)
}
case "OPLOG_RATE_GB_PER_HOUR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogRateDataPoint(ts, float64(*dp.Value))
}
// Number of database operations on a MongoDB process since the process last started.
case "DB_STORAGE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusTotal)
}
case "DB_DATA_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSize)
}
case "DB_INDEX_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusIndexSize)
}
case "DB_DATA_SIZE_TOTAL_WO_SYSTEM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSizeWoSystem)
}
// Rate of database operations on a MongoDB process since the process last started found in the opcounters document that the serverStatus command collects.
case "OPCOUNTER_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRolePrimary)
}
case "OPCOUNTER_QUERY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationQuery, AttributeClusterRolePrimary)
}
case "OPCOUNTER_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRolePrimary)
}
case "OPCOUNTER_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRolePrimary)
}
case "OPCOUNTER_GETMORE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationGetmore, AttributeClusterRolePrimary)
}
case "OPCOUNTER_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary)
}
case "OPCOUNTER_TTL_DELETED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationTTLDeleted, AttributeClusterRolePrimary)
}
// Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects.
case "OPCOUNTER_REPL_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRoleReplica)
}
// Average rate of documents returned, inserted, updated, or deleted per second during a selected time period.
case "DOCUMENT_METRICS_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusReturned)
}
case "DOCUMENT_METRICS_INSERTED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusInserted)
}
case "DOCUMENT_METRICS_UPDATED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusUpdated)
}
case "DOCUMENT_METRICS_DELETED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusDeleted)
}
// Average rate for operations per second during a selected time period that perform a sort but cannot perform the sort using an index.
case "OPERATIONS_SCAN_AND_ORDER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationScanAndOrder, AttributeClusterRolePrimary)
}
// Average execution time in milliseconds per read, write, or command operation during a selected time period.
case "OP_EXECUTION_TIME_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeReads)
}
case "OP_EXECUTION_TIME_WRITES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeWrites)
}
case "OP_EXECUTION_TIME_COMMANDS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeCommands)
}
// Number of times the host restarted within the previous hour.
case "RESTARTS_IN_LAST_HOUR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessRestartsDataPoint(ts, float64(*dp.Value))
}
// Average rate per second to scan index items during queries and query-plan evaluations found in the value of totalKeysExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Average rate of documents scanned per second during queries and query-plan evaluations found in the value of totalDocsExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED_OBJECTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// Ratio of the number of index items scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Ratio of the number of documents scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// CPU usage of processes on the host. For hosts with more than one CPU core, this value can exceed 100%.
case "SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "MAX_SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// CPU usage of processes on the host scaled to a range of 0 to 100% by dividing by the number of CPU cores.
case "SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// Physical memory usage, in bytes, that the host uses.
case "SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "MAX_SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "MAX_SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "MAX_SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "MAX_SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "MAX_SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
case "MAX_SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
// Average rate of physical bytes per second that the eth0 network interface received and transmitted.
case "SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Total amount of memory that swap uses.
case "SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "MAX_SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
case "MAX_SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
// Total amount of memory written and read from swap.
case "SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Memory usage, in bytes, that Atlas Search processes use.
case "FTS_PROCESS_RESIDENT_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "FTS_PROCESS_VIRTUAL_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "FTS_PROCESS_SHARED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateShared)
}
case "FTS_MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
// Disk space, in bytes, that Atlas Search indexes use.
// FTS_DISK_UTILIZATION is the documented field name, but FTS_DISK_USAGE is what is returned from the API.
// Including both so if the API changes to match the documentation this metric is still collected.
case "FTS_DISK_USAGE", "FTS_DISK_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts, float64(*dp.Value))
}
// Percentage of CPU that Atlas Search processes use.
case "FTS_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "FTS_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Process Disk Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
// Measures throughput of I/O operations for the disk partition used for MongoDB.
case "DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
case "MAX_DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
// Measures throughput of data read and written to the disk partition (not cache) used by MongoDB.
case "DISK_PARTITION_THROUGHPUT_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_THROUGHPUT_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
// This is a calculated metric that is the sum of the read and write throughput.
case "DISK_PARTITION_THROUGHPUT_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
// Measures the queue depth of the disk partition used by MongoDB.
case "DISK_QUEUE_DEPTH":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, float64(*dp.Value))
}
// Measures latency per operation type of the disk partition used by MongoDB.
case "DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
// The percentage of time during which requests are being issued to and serviced by the partition.
// This includes requests from any process, not just MongoDB processes.
case "DISK_PARTITION_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value))
}
case "MAX_DISK_PARTITION_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value))
}
// Measures the free disk space and used disk space on the disk partition used by MongoDB.
case "DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
// Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
case "DATABASE_COLLECTION_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeCollection)
}
case "DATABASE_INDEX_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_EXTENT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeExtent)
}
case "DATABASE_OBJECT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_VIEW_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeView)
}
case "DATABASE_AVERAGE_OBJECT_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_STORAGE_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeStorage)
}
case "DATABASE_INDEX_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_DATA_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeData)
}
default:
return nil
}
}
func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements) error {
recordFunc := getRecordFunc(meas.Name)
if recordFunc == nil {
return nil
}
return addDataPoint(mb, meas, recordFunc)
}
func addDataPoint(mb *MetricsBuilder, meas *mongodbatlas.Measurements, recordFunc metricRecordFunc) error {
for _, point := range meas.DataPoints {
if point.Value != nil {
curTime, err := time.Parse(time.RFC3339, point.Timestamp)
if err != nil {
return err
}
recordFunc(mb, point, pcommon.NewTimestampFromTime(curTime))
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
import (
"fmt"
"go.mongodb.org/atlas/mongodbatlas"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
func processMeasurements(
mb *metadata.MetricsBuilder,
measurements []*mongodbatlas.Measurements,
) error {
var errs error
for _, meas := range measurements {
err := metadata.MeasurementsToMetric(mb, meas)
if err != nil {
errs = multierr.Append(errs, err)
}
}
err := calculateTotalMetrics(mb, measurements)
if err != nil {
errs = multierr.Append(errs, err)
}
if errs != nil {
return fmt.Errorf("errors occurred while processing measurements: %w", errs)
}
return nil
}
func calculateTotalMetrics(
mb *metadata.MetricsBuilder,
measurements []*mongodbatlas.Measurements,
) error {
var err error
dptTotalMeasCombined := false
var dptTotalMeas *mongodbatlas.Measurements
for _, meas := range measurements {
switch meas.Name {
case "DISK_PARTITION_THROUGHPUT_READ", "DISK_PARTITION_THROUGHPUT_WRITE":
if dptTotalMeas == nil {
dptTotalMeas = cloneMeasurement(meas)
dptTotalMeas.Name = "DISK_PARTITION_THROUGHPUT_TOTAL"
continue
}
// Combine data point values with matching timestamps
for j, totalMeas := range dptTotalMeas.DataPoints {
if totalMeas.Timestamp != meas.DataPoints[j].Timestamp ||
(totalMeas.Value == nil && meas.DataPoints[j].Value == nil) {
continue
}
if totalMeas.Value == nil {
totalMeas.Value = new(float32)
}
addValue := *meas.DataPoints[j].Value
if meas.DataPoints[j].Value == nil {
addValue = 0
}
*totalMeas.Value += addValue
dptTotalMeasCombined = true
}
default:
}
}
if dptTotalMeasCombined {
err = metadata.MeasurementsToMetric(mb, dptTotalMeas)
}
return err
}
func cloneMeasurement(meas *mongodbatlas.Measurements) *mongodbatlas.Measurements {
clone := &mongodbatlas.Measurements{
Name: meas.Name,
Units: meas.Units,
DataPoints: make([]*mongodbatlas.DataPoints, len(meas.DataPoints)),
}
for i, dp := range meas.DataPoints {
if dp != nil {
newDP := *dp
clone.DataPoints[i] = &newDP
}
}
return clone
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package model // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
)
// LogEntry represents a MongoDB Atlas JSON log entry
type LogEntry struct {
Timestamp LogTimestamp `json:"t"`
Severity string `json:"s"`
Component string `json:"c"`
ID int64 `json:"id"`
Context string `json:"ctx"`
Message string `json:"msg"`
Attributes map[string]any `json:"attr"`
// Raw is the original log line. It is not a part of the payload, but transient data added during decoding.
Raw string `json:"-"`
}
// AuditLog represents a MongoDB Atlas JSON audit log entry
type AuditLog struct {
Type string `json:"atype"`
Timestamp LogTimestamp `json:"ts"`
ID *ID `json:"uuid,omitempty"`
Local Address `json:"local"`
Remote Address `json:"remote"`
Users []AuditUser `json:"users"`
Roles []AuditRole `json:"roles"`
Result int `json:"result"`
Param map[string]any `json:"param"`
// Raw is the original log line. It is not a part of the payload, but transient data added during decoding.
Raw string `json:"-"`
}
// logTimestamp is the structure that represents a Log Timestamp
type LogTimestamp struct {
Date string `json:"$date"`
}
type ID struct {
Binary string `json:"$binary"`
Type string `json:"$type"`
}
type Address struct {
IP *string `json:"ip,omitempty"`
Port *int `json:"port,omitempty"`
SystemUser *bool `json:"isSystemUser,omitempty"`
UnixSocket *string `json:"unix,omitempty"`
}
type AuditRole struct {
Role string `json:"role"`
Database string `json:"db"`
}
func (ar AuditRole) Pdata() pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(2)
m.PutStr("role", ar.Role)
m.PutStr("db", ar.Database)
return m
}
type AuditUser struct {
User string `json:"user"`
Database string `json:"db"`
}
func (ar AuditUser) Pdata() pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(2)
m.PutStr("user", ar.User)
m.PutStr("db", ar.Database)
return m
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
import (
"bytes"
"context"
"errors"
"fmt"
"net/http"
"strconv"
"sync"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/mongodb-forks/digest"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/config/configretry"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
type clientRoundTripper struct {
originalTransport http.RoundTripper
log *zap.Logger
backoffConfig configretry.BackOffConfig
stopped bool
mutex sync.Mutex
shutdownChan chan struct{}
}
func newClientRoundTripper(
originalTransport http.RoundTripper,
log *zap.Logger,
backoffConfig configretry.BackOffConfig,
) *clientRoundTripper {
return &clientRoundTripper{
originalTransport: originalTransport,
log: log,
backoffConfig: backoffConfig,
shutdownChan: make(chan struct{}, 1),
}
}
func (rt *clientRoundTripper) isStopped() bool {
rt.mutex.Lock()
defer rt.mutex.Unlock()
return rt.stopped
}
func (rt *clientRoundTripper) stop() {
rt.mutex.Lock()
defer rt.mutex.Unlock()
rt.stopped = true
}
func (rt *clientRoundTripper) Shutdown() error {
if rt.isStopped() {
return nil
}
rt.stop()
rt.shutdownChan <- struct{}{}
close(rt.shutdownChan)
return nil
}
func (rt *clientRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
if rt.isStopped() {
return nil, errors.New("request cancelled due to shutdown")
}
resp, err := rt.originalTransport.RoundTrip(r)
if err != nil {
return nil, err // Can't do anything
}
if resp.StatusCode == http.StatusTooManyRequests {
expBackoff := &backoff.ExponentialBackOff{
InitialInterval: rt.backoffConfig.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: rt.backoffConfig.MaxInterval,
MaxElapsedTime: rt.backoffConfig.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
expBackoff.Reset()
attempts := 0
for {
attempts++
delay := expBackoff.NextBackOff()
if delay == backoff.Stop {
return resp, err
}
rt.log.Warn("server busy, retrying request",
zap.Int("attempts", attempts),
zap.Duration("delay", delay))
select {
case <-r.Context().Done():
return resp, errors.New("request was cancelled or timed out")
case <-rt.shutdownChan:
return resp, errors.New("request is cancelled due to server shutdown")
case <-time.After(delay):
}
resp, err = rt.originalTransport.RoundTrip(r)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusTooManyRequests {
break
}
}
}
return resp, err
}
// MongoDBAtlasClient wraps the official MongoDB Atlas client to manage pagination
// and mapping to OpenTelmetry metric and log structures.
type MongoDBAtlasClient struct {
log *zap.Logger
client *mongodbatlas.Client
transport *http.Transport
roundTripper *clientRoundTripper
}
// NewMongoDBAtlasClient creates a new MongoDB Atlas client wrapper
func NewMongoDBAtlasClient(
publicKey string,
privateKey string,
backoffConfig configretry.BackOffConfig,
log *zap.Logger,
) *MongoDBAtlasClient {
defaultTransporter := http.DefaultTransport.(*http.Transport)
t := digest.NewTransportWithHTTPTransport(publicKey, privateKey, defaultTransporter)
roundTripper := newClientRoundTripper(t, log, backoffConfig)
tc := &http.Client{Transport: roundTripper}
client := mongodbatlas.NewClient(tc)
return &MongoDBAtlasClient{
log,
client,
defaultTransporter,
roundTripper,
}
}
func (s *MongoDBAtlasClient) Shutdown() error {
s.transport.CloseIdleConnections()
return s.roundTripper.Shutdown()
}
// Check both the returned error and the status of the HTTP response
func checkMongoDBClientErr(err error, response *mongodbatlas.Response) error {
if err != nil {
return err
}
if response != nil {
return response.CheckResponse(response.Body)
}
return nil
}
func hasNext(links []*mongodbatlas.Link) bool {
for _, link := range links {
if link.Rel == "next" {
return true
}
}
return false
}
// Organizations returns a list of all organizations available with the supplied credentials
func (s *MongoDBAtlasClient) Organizations(ctx context.Context) ([]*mongodbatlas.Organization, error) {
var allOrgs []*mongodbatlas.Organization
page := 1
for {
orgs, hasNext, err := s.getOrganizationsPage(ctx, page)
page++
if err != nil {
// TODO: Add error to a metric
// Stop, returning what we have (probably empty slice)
return allOrgs, fmt.Errorf("error retrieving organizations from MongoDB Atlas API: %w", err)
}
allOrgs = append(allOrgs, orgs...)
if !hasNext {
break
}
}
return allOrgs, nil
}
func (s *MongoDBAtlasClient) getOrganizationsPage(
ctx context.Context,
pageNum int,
) ([]*mongodbatlas.Organization, bool, error) {
orgs, response, err := s.client.Organizations.List(ctx, &mongodbatlas.OrganizationsListOptions{
ListOptions: mongodbatlas.ListOptions{
PageNum: pageNum,
},
})
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, fmt.Errorf("error in retrieving organizations: %w", err)
}
return orgs.Results, hasNext(orgs.Links), nil
}
// GetOrganization retrieves a single organization specified by orgID
func (s *MongoDBAtlasClient) GetOrganization(ctx context.Context, orgID string) (*mongodbatlas.Organization, error) {
org, response, err := s.client.Organizations.Get(ctx, orgID)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving project page: %w", err)
}
return org, nil
}
// Projects returns a list of projects accessible within the provided organization
func (s *MongoDBAtlasClient) Projects(
ctx context.Context,
orgID string,
) ([]*mongodbatlas.Project, error) {
var allProjects []*mongodbatlas.Project
page := 1
for {
projects, hasNext, err := s.getProjectsPage(ctx, orgID, page)
page++
if err != nil {
return allProjects, fmt.Errorf("error retrieving list of projects from MongoDB Atlas API: %w", err)
}
allProjects = append(allProjects, projects...)
if !hasNext {
break
}
}
return allProjects, nil
}
// GetProject returns a single project specified by projectName
func (s *MongoDBAtlasClient) GetProject(ctx context.Context, projectName string) (*mongodbatlas.Project, error) {
project, response, err := s.client.Projects.GetOneProjectByName(ctx, projectName)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving project page: %w", err)
}
return project, nil
}
func (s *MongoDBAtlasClient) getProjectsPage(
ctx context.Context,
orgID string,
pageNum int,
) ([]*mongodbatlas.Project, bool, error) {
projects, response, err := s.client.Organizations.Projects(
ctx,
orgID,
&mongodbatlas.ProjectsListOptions{
ListOptions: mongodbatlas.ListOptions{PageNum: pageNum},
},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, fmt.Errorf("error retrieving project page: %w", err)
}
return projects.Results, hasNext(projects.Links), nil
}
// Processes returns the list of processes running for a given project.
func (s *MongoDBAtlasClient) Processes(
ctx context.Context,
projectID string,
) ([]*mongodbatlas.Process, error) {
// A paginated API, but the MongoDB client just returns the values from the first page
// Note: MongoDB Atlas also has the idea of a Cluster- we can retrieve a list of clusters from
// the Project, but a Cluster does not have a link to its Process list and a Process does not
// have a link to its Cluster (save through the hostname, which is not a documented relationship).
processes, response, err := s.client.Processes.List(
ctx,
projectID,
&mongodbatlas.ProcessesListOptions{
ListOptions: mongodbatlas.ListOptions{
PageNum: 0,
ItemsPerPage: 0,
IncludeCount: true,
},
},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving processes from MongoDB Atlas API: %w", err)
}
return processes, nil
}
func (s *MongoDBAtlasClient) getProcessDatabasesPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
) ([]*mongodbatlas.ProcessDatabase, bool, error) {
databases, response, err := s.client.ProcessDatabases.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ListOptions{PageNum: pageNum},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return databases.Results, hasNext(databases.Links), nil
}
// ProcessDatabases lists databases that are running in a given MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDatabases(
ctx context.Context,
projectID string,
host string,
port int,
) ([]*mongodbatlas.ProcessDatabase, error) {
var allProcessDatabases []*mongodbatlas.ProcessDatabase
pageNum := 1
for {
processes, hasMore, err := s.getProcessDatabasesPage(ctx, projectID, host, port, pageNum)
pageNum++
if err != nil {
return allProcessDatabases, err
}
allProcessDatabases = append(allProcessDatabases, processes...)
if !hasMore {
break
}
}
return allProcessDatabases, nil
}
// ProcessMetrics returns a set of metrics associated with the specified running process.
func (s *MongoDBAtlasClient) ProcessMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.getProcessMeasurementsPage(
ctx,
projectID,
host,
port,
pageNum,
start,
end,
resolution,
)
if err != nil {
s.log.Debug("Error retrieving process metrics from MongoDB Atlas API", zap.Error(err))
break // Return partial results
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) getProcessMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessMeasurements.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// ProcessDatabaseMetrics returns metrics about a particular database running within a MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDatabaseMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
dbname string,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.getProcessDatabaseMeasurementsPage(
ctx,
projectID,
host,
port,
dbname,
pageNum,
start,
end,
resolution,
)
if err != nil {
return err
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) getProcessDatabaseMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
dbname string,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessDatabaseMeasurements.List(
ctx,
projectID,
host,
port,
dbname,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// ProcessDisks enumerates the disks accessible to a specified MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDisks(
ctx context.Context,
projectID string,
host string,
port int,
) []*mongodbatlas.ProcessDisk {
var allDisks []*mongodbatlas.ProcessDisk
pageNum := 1
for {
disks, hasMore, err := s.getProcessDisksPage(ctx, projectID, host, port, pageNum)
if err != nil {
s.log.Debug("Error retrieving disk metrics from MongoDB Atlas API", zap.Error(err))
break // Return partial results
}
pageNum++
allDisks = append(allDisks, disks...)
if !hasMore {
break
}
}
return allDisks
}
func (s *MongoDBAtlasClient) getProcessDisksPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
) ([]*mongodbatlas.ProcessDisk, bool, error) {
disks, result, err := s.client.ProcessDisks.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ListOptions{PageNum: pageNum},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return disks.Results, hasNext(disks.Links), nil
}
// ProcessDiskMetrics returns metrics supplied for a particular disk partition used by a MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDiskMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
partitionName string,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.processDiskMeasurementsPage(
ctx,
projectID,
host,
port,
partitionName,
pageNum,
start,
end,
resolution,
)
if err != nil {
return err
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) processDiskMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
partitionName string,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessDiskMeasurements.List(
ctx,
projectID,
host,
port,
partitionName,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// GetLogs retrieves the logs from the mongo API using API call: https://www.mongodb.com/docs/atlas/reference/api/logs/#syntax
func (s *MongoDBAtlasClient) GetLogs(ctx context.Context, groupID, hostname, logName string, start, end time.Time) (*bytes.Buffer, error) {
buf := bytes.NewBuffer([]byte{})
dateRange := &mongodbatlas.DateRangetOptions{StartDate: toUnixString(start), EndDate: toUnixString(end)}
resp, err := s.client.Logs.Get(ctx, groupID, hostname, logName, buf, dateRange)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("received status code: %d", resp.StatusCode)
}
return buf, nil
}
// GetClusters retrieves the clusters from the mongo API using API call: https://www.mongodb.com/docs/atlas/reference/api/clusters-get-all/#request
func (s *MongoDBAtlasClient) GetClusters(ctx context.Context, groupID string) ([]mongodbatlas.Cluster, error) {
options := mongodbatlas.ListOptions{}
clusters, _, err := s.client.Clusters.List(ctx, groupID, &options)
if err != nil {
return nil, err
}
return clusters, nil
}
type AlertPollOptions struct {
PageNum int
PageSize int
}
// GetAlerts returns the alerts specified for the set projects
func (s *MongoDBAtlasClient) GetAlerts(ctx context.Context, groupID string, opts *AlertPollOptions) (ret []mongodbatlas.Alert, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.AlertsListOptions{ListOptions: lo}
alerts, response, err := s.client.Alerts.List(ctx, groupID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return alerts.Results, hasNext(response.Links), nil
}
// GetEventsOptions are the options to use for making a request to get Project Events
type GetEventsOptions struct {
// Which page of the paginated events
PageNum int
// How large the Pages will be
PageSize int
// The list of Event Types https://www.mongodb.com/docs/atlas/reference/api/events-projects-get-all/#event-type-values
// to grab from the API
EventTypes []string
// The oldest date to look back for the events
MinDate time.Time
// the newest time to accept events
MaxDate time.Time
}
// GetProjectEvents returns the events specified for the set projects
func (s *MongoDBAtlasClient) GetProjectEvents(ctx context.Context, groupID string, opts *GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.EventListOptions{
ListOptions: lo,
// Earliest Timestamp in ISO 8601 date and time format in UTC from when Atlas should return events.
MinDate: opts.MinDate.Format(time.RFC3339),
}
if len(opts.EventTypes) > 0 {
options.EventType = opts.EventTypes
}
events, response, err := s.client.Events.ListProjectEvents(ctx, groupID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return events.Results, hasNext(response.Links), nil
}
// GetOrgEvents returns the events specified for the set organizations
func (s *MongoDBAtlasClient) GetOrganizationEvents(ctx context.Context, orgID string, opts *GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.EventListOptions{
ListOptions: lo,
// Earliest Timestamp in ISO 8601 date and time format in UTC from when Atlas should return events.
MinDate: opts.MinDate.Format(time.RFC3339),
}
if len(opts.EventTypes) > 0 {
options.EventType = opts.EventTypes
}
events, response, err := s.client.Events.ListOrganizationEvents(ctx, orgID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return events.Results, hasNext(response.Links), nil
}
// GetAccessLogsOptions are the options to use for making a request to get Access Logs
type GetAccessLogsOptions struct {
// The oldest date to look back for the events
MinDate time.Time
// the newest time to accept events
MaxDate time.Time
// If true, only return successful access attempts; if false, only return failed access attempts
// If nil, return both successful and failed access attempts
AuthResult *bool
// Maximum number of entries to return
NLogs int
}
// GetAccessLogs returns the access logs specified for the cluster requested
func (s *MongoDBAtlasClient) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) {
options := mongodbatlas.AccessLogOptions{
// Earliest Timestamp in epoch milliseconds from when Atlas should access log results
Start: strconv.FormatInt(opts.MinDate.UTC().UnixMilli(), 10),
// Latest Timestamp in epoch milliseconds from when Atlas should access log results
End: strconv.FormatInt(opts.MaxDate.UTC().UnixMilli(), 10),
// If true, only return successful access attempts; if false, only return failed access attempts
// If nil, return both successful and failed access attempts
AuthResult: opts.AuthResult,
// Maximum number of entries to return (0-20000)
NLogs: opts.NLogs,
}
accessLogs, response, err := s.client.AccessTracking.ListByCluster(ctx, groupID, clusterName, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, err
}
return accessLogs.AccessLogs, nil
}
func toUnixString(t time.Time) string {
return strconv.Itoa(int(t.Unix()))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"bufio"
"compress/gzip"
"encoding/json"
"io"
"regexp"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
func decodeLogs(logger *zap.Logger, clusterMajorVersion string, r io.Reader) ([]model.LogEntry, error) {
switch clusterMajorVersion {
case mongoDBMajorVersion4_2:
// 4.2 clusters use a console log format
return decode4_2(logger.Named("console_decoder"), r)
default:
// All other versions use JSON logging
return decodeJSON(logger.Named("json_decoder"), r)
}
}
func decodeJSON(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.LogEntry
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
var entry model.LogEntry
if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil {
logger.Error("Failed to parse log entry as JSON", zap.String("entry", scanner.Text()))
continue
}
entry.Raw = scanner.Text()
entries = append(entries, entry)
}
}
var mongo4_2LogRegex = regexp.MustCompile(`^(?P<timestamp>\S+)\s+(?P<severity>\w+)\s+(?P<component>[\w-]+)\s+\[(?P<context>\S+)\]\s+(?P<message>.*)$`)
func decode4_2(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.LogEntry
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
submatches := mongo4_2LogRegex.FindStringSubmatch(scanner.Text())
if len(submatches) != 6 {
// Match failed for line; We will skip this line and continue processing others.
logger.Error("Entry did not match regex", zap.String("entry", scanner.Text()))
continue
}
entry := model.LogEntry{
Timestamp: model.LogTimestamp{
Date: submatches[1],
},
Severity: submatches[2],
Component: submatches[3],
Context: submatches[4],
Message: submatches[5],
Raw: submatches[0],
}
entries = append(entries, entry)
}
}
func decodeAuditJSON(logger *zap.Logger, r io.Reader) ([]model.AuditLog, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.AuditLog
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
var entry model.AuditLog
if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil {
logger.Error("Failed to parse audit log entry as JSON", zap.String("entry", scanner.Text()))
continue
}
entry.Raw = scanner.Text()
entries = append(entries, entry)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"errors"
"io"
"net"
"strings"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
const mongoDBMajorVersion4_2 = "4.2"
type logsReceiver struct {
log *zap.Logger
cfg *Config
client *internal.MongoDBAtlasClient
consumer consumer.Logs
stopperChan chan struct{}
wg sync.WaitGroup
start time.Time
end time.Time
}
type ProjectContext struct {
Project mongodbatlas.Project
orgName string
}
// MongoDB Atlas Documentation recommends a polling interval of 5 minutes: https://www.mongodb.com/docs/atlas/reference/api/logs/#logs
const collectionInterval = time.Minute * 5
func newMongoDBAtlasLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *logsReceiver {
client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger)
for _, p := range cfg.Logs.Projects {
p.populateIncludesAndExcludes()
}
return &logsReceiver{
log: settings.Logger,
cfg: cfg,
client: client,
stopperChan: make(chan struct{}),
consumer: consumer,
}
}
// Log receiver logic
func (s *logsReceiver) Start(ctx context.Context, _ component.Host) error {
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.start = time.Now().Add(-collectionInterval)
s.end = time.Now()
for {
s.collect(ctx)
// collection interval loop,
select {
case <-ctx.Done():
return
case <-s.stopperChan:
return
case <-time.After(collectionInterval):
s.start = s.end
s.end = time.Now()
}
}
}()
return nil
}
func (s *logsReceiver) Shutdown(_ context.Context) error {
close(s.stopperChan)
s.wg.Wait()
return s.client.Shutdown()
}
// parseHostNames parses out the hostname from the specified cluster host
func parseHostNames(s string, logger *zap.Logger) []string {
var hostnames []string
if s == "" {
return []string{}
}
for _, t := range strings.Split(s, ",") {
// separate hostname from scheme and port
host, _, err := net.SplitHostPort(strings.TrimPrefix(t, "mongodb://"))
if err != nil {
logger.Error("Could not parse out hostname: " + host)
continue
}
hostnames = append(hostnames, host)
}
return hostnames
}
// collect spins off functionality of the receiver from the Start function
func (s *logsReceiver) collect(ctx context.Context) {
for _, projectCfg := range s.cfg.Logs.Projects {
project, err := s.client.GetProject(ctx, projectCfg.Name)
if err != nil {
s.log.Error("Error retrieving project "+projectCfg.Name+":", zap.Error(err))
continue
}
pc := ProjectContext{Project: *project}
org, err := s.client.GetOrganization(ctx, project.OrgID)
if err != nil {
s.log.Error("Error retrieving organization", zap.Error(err))
pc.orgName = "unknown"
} else {
pc.orgName = org.Name
}
// get clusters for each of the projects
clusters, err := s.processClusters(ctx, *projectCfg, project.ID)
if err != nil {
s.log.Error("Failure to process Clusters", zap.Error(err))
}
s.collectClusterLogs(clusters, *projectCfg, pc)
}
}
func (s *logsReceiver) processClusters(ctx context.Context, projectCfg LogsProjectConfig, projectID string) ([]mongodbatlas.Cluster, error) {
clusters, err := s.client.GetClusters(ctx, projectID)
if err != nil {
s.log.Error("Failure to collect clusters from project: %w", zap.Error(err))
return nil, err
}
return filterClusters(clusters, projectCfg.ProjectConfig)
}
type ClusterInfo struct {
ClusterName string
RegionName string
ProviderName string
MongoDBMajorVersion string
}
func (s *logsReceiver) collectClusterLogs(clusters []mongodbatlas.Cluster, projectCfg LogsProjectConfig, pc ProjectContext) {
for _, cluster := range clusters {
clusterInfo := ClusterInfo{
ClusterName: cluster.Name,
RegionName: cluster.ProviderSettings.RegionName,
ProviderName: cluster.ProviderSettings.ProviderName,
MongoDBMajorVersion: cluster.MongoDBMajorVersion,
}
hostnames := parseHostNames(cluster.ConnectionStrings.Standard, s.log)
for _, hostname := range hostnames {
// Defaults to true if not specified
if projectCfg.EnableHostLogs == nil || *projectCfg.EnableHostLogs {
s.log.Debug("Collecting logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name))
s.collectLogs(pc, hostname, "mongodb.gz", clusterInfo)
s.collectLogs(pc, hostname, "mongos.gz", clusterInfo)
}
// Defaults to false if not specified
if projectCfg.EnableAuditLogs {
s.log.Debug("Collecting audit logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name))
s.collectAuditLogs(pc, hostname, "mongodb-audit-log.gz", clusterInfo)
s.collectAuditLogs(pc, hostname, "mongos-audit-log.gz", clusterInfo)
}
}
}
}
func filterClusters(clusters []mongodbatlas.Cluster, projectCfg ProjectConfig) ([]mongodbatlas.Cluster, error) {
include, exclude := projectCfg.IncludeClusters, projectCfg.ExcludeClusters
var allowed bool
var clusterNameSet map[string]struct{}
// check to include or exclude clusters
switch {
// keep all clusters if include and exclude are not specified
case len(include) == 0 && len(exclude) == 0:
return clusters, nil
// include is initialized
case len(include) > 0 && len(exclude) == 0:
allowed = true
clusterNameSet = projectCfg.includesByClusterName
// exclude is initialized
case len(exclude) > 0 && len(include) == 0:
allowed = false
clusterNameSet = projectCfg.excludesByClusterName
// both are initialized
default:
return nil, errors.New("both Include and Exclude clusters configured")
}
var filtered []mongodbatlas.Cluster
for _, cluster := range clusters {
if _, ok := clusterNameSet[cluster.Name]; (!ok && !allowed) || (ok && allowed) {
filtered = append(filtered, cluster)
}
}
return filtered, nil
}
func (s *logsReceiver) getHostLogs(groupID, hostname, logName string, clusterMajorVersion string) ([]model.LogEntry, error) {
// Get gzip bytes buffer from API
buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end)
if err != nil {
return nil, err
}
return decodeLogs(s.log, clusterMajorVersion, buf)
}
func (s *logsReceiver) getHostAuditLogs(groupID, hostname, logName string) ([]model.AuditLog, error) {
// Get gzip bytes buffer from API
buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end)
if err != nil {
return nil, err
}
return decodeAuditJSON(s.log, buf)
}
func (s *logsReceiver) collectLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) {
logs, err := s.getHostLogs(pc.Project.ID, hostname, logName, clusterInfo.MongoDBMajorVersion)
if err != nil && !errors.Is(err, io.EOF) {
s.log.Warn("Failed to retrieve host logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
if len(logs) == 0 {
s.log.Warn("Attempted to retrieve host logs but received 0 logs", zap.Error(err), zap.String("log", logName), zap.String("hostname", hostname), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
plog := mongodbEventToLogData(s.log,
logs,
pc,
hostname,
logName,
clusterInfo)
err = s.consumer.ConsumeLogs(context.Background(), plog)
if err != nil {
s.log.Error("Failed to consume logs", zap.Error(err))
}
}
func (s *logsReceiver) collectAuditLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) {
logs, err := s.getHostAuditLogs(
pc.Project.ID,
hostname,
logName,
)
if err != nil && !errors.Is(err, io.EOF) {
s.log.Warn("Failed to retrieve audit logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
if len(logs) == 0 {
s.log.Warn("Attempted to retrieve audit logs but received 0 logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
plog, err := mongodbAuditEventToLogData(s.log,
logs,
pc,
hostname,
logName,
clusterInfo)
if err != nil {
s.log.Warn("Failed to translate audit logs: "+logName, zap.Error(err))
return
}
err = s.consumer.ConsumeLogs(context.Background(), plog)
if err != nil {
s.log.Error("Failed to consume logs", zap.Error(err))
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/multierr"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
const (
// Number of log attributes to add to the plog.LogRecordSlice for host logs.
totalLogAttributes = 10
// Number of log attributes to add to the plog.LogRecordSlice for audit logs.
totalAuditLogAttributes = 16
// Number of resource attributes to add to the plog.ResourceLogs.
totalResourceAttributes = 4
)
// jsonTimestampLayout for the timestamp format in the plog.Logs structure
const (
jsonTimestampLayout = "2006-01-02T15:04:05.000-07:00"
consoleTimestampLayout = "2006-01-02T15:04:05.000-0700"
)
// Severity mapping of the mongodb atlas logs
var severityMap = map[string]plog.SeverityNumber{
"F": plog.SeverityNumberFatal,
"E": plog.SeverityNumberError,
"W": plog.SeverityNumberWarn,
"I": plog.SeverityNumberInfo,
"D": plog.SeverityNumberDebug,
"D1": plog.SeverityNumberDebug,
"D2": plog.SeverityNumberDebug2,
"D3": plog.SeverityNumberDebug3,
"D4": plog.SeverityNumberDebug4,
"D5": plog.SeverityNumberDebug4,
}
// mongoAuditEventToLogRecord converts model.AuditLog event to plog.LogRecordSlice and adds the resource attributes.
func mongodbAuditEventToLogData(logger *zap.Logger, logs []model.AuditLog, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) (plog.Logs, error) {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
resourceAttrs := rl.Resource().Attributes()
resourceAttrs.EnsureCapacity(totalResourceAttributes)
// Attributes related to the object causing the event.
resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName)
resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name)
resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName)
resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName)
resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName)
resourceAttrs.PutStr("mongodb_atlas.host.name", hostname)
var errs []error
for _, log := range logs {
lr := sl.LogRecords().AppendEmpty()
logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion)
t, err := time.Parse(logTsFormat, log.Timestamp.Date)
if err != nil {
logger.Warn("Time failed to parse correctly", zap.Error(err))
}
lr.SetTimestamp(pcommon.NewTimestampFromTime(t))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
// Insert Raw Log message into Body of LogRecord
lr.Body().SetStr(log.Raw)
// Since Audit Logs don't have a severity/level
// Set the "SeverityNumber" and "SeverityText" to INFO
lr.SetSeverityNumber(plog.SeverityNumberInfo)
lr.SetSeverityText("INFO")
attrs := lr.Attributes()
attrs.EnsureCapacity(totalAuditLogAttributes)
attrs.PutStr("atype", log.Type)
if log.Local.IP != nil {
attrs.PutStr("local.ip", *log.Local.IP)
}
if log.Local.Port != nil {
attrs.PutInt("local.port", int64(*log.Local.Port))
}
if log.Local.SystemUser != nil {
attrs.PutBool("local.isSystemUser", *log.Local.SystemUser)
}
if log.Local.UnixSocket != nil {
attrs.PutStr("local.unix", *log.Local.UnixSocket)
}
if log.Remote.IP != nil {
attrs.PutStr("remote.ip", *log.Remote.IP)
}
if log.Remote.Port != nil {
attrs.PutInt("remote.port", int64(*log.Remote.Port))
}
if log.Remote.SystemUser != nil {
attrs.PutBool("remote.isSystemUser", *log.Remote.SystemUser)
}
if log.Remote.UnixSocket != nil {
attrs.PutStr("remote.unix", *log.Remote.UnixSocket)
}
if log.ID != nil {
attrs.PutStr("uuid.binary", log.ID.Binary)
attrs.PutStr("uuid.type", log.ID.Type)
}
attrs.PutInt("result", int64(log.Result))
if err = attrs.PutEmptyMap("param").FromRaw(log.Param); err != nil {
errs = append(errs, err)
}
usersSlice := attrs.PutEmptySlice("users")
usersSlice.EnsureCapacity(len(log.Users))
for _, user := range log.Users {
user.Pdata().CopyTo(usersSlice.AppendEmpty().SetEmptyMap())
}
rolesSlice := attrs.PutEmptySlice("roles")
rolesSlice.EnsureCapacity(len(log.Roles))
for _, roles := range log.Roles {
roles.Pdata().CopyTo(rolesSlice.AppendEmpty().SetEmptyMap())
}
attrs.PutStr("log_name", logName)
}
return ld, multierr.Combine(errs...)
}
// mongoEventToLogRecord converts model.LogEntry event to plog.LogRecordSlice and adds the resource attributes.
func mongodbEventToLogData(logger *zap.Logger, logs []model.LogEntry, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) plog.Logs {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
resourceAttrs := rl.Resource().Attributes()
resourceAttrs.EnsureCapacity(totalResourceAttributes)
// Attributes related to the object causing the event.
resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName)
resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name)
resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName)
resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName)
resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName)
resourceAttrs.PutStr("mongodb_atlas.host.name", hostname)
logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion)
for _, log := range logs {
lr := sl.LogRecords().AppendEmpty()
t, err := time.Parse(logTsFormat, log.Timestamp.Date)
if err != nil {
logger.Warn("Time failed to parse correctly", zap.Error(err))
}
lr.SetTimestamp(pcommon.NewTimestampFromTime(t))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
// Insert Raw Log message into Body of LogRecord
lr.Body().SetStr(log.Raw)
// Set the "SeverityNumber" and "SeverityText" if a known type of
// severity is found.
if severityNumber, ok := severityMap[log.Severity]; ok {
lr.SetSeverityNumber(severityNumber)
lr.SetSeverityText(log.Severity)
} else {
logger.Debug("unknown severity type", zap.String("type", log.Severity))
}
attrs := lr.Attributes()
attrs.EnsureCapacity(totalLogAttributes)
//nolint:errcheck
attrs.FromRaw(log.Attributes)
attrs.PutStr("message", log.Message)
attrs.PutStr("component", log.Component)
attrs.PutStr("context", log.Context)
// log ID is not present on MongoDB 4.2 systems
if clusterInfo.MongoDBMajorVersion != mongoDBMajorVersion4_2 {
attrs.PutInt("id", log.ID)
}
attrs.PutStr("log_name", logName)
}
return ld
}
func tsLayout(clusterVersion string) string {
switch clusterVersion {
case mongoDBMajorVersion4_2:
return consoleTimestampLayout
default:
return jsonTimestampLayout
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/scraper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
type mongodbatlasreceiver struct {
log *zap.Logger
cfg *Config
client *internal.MongoDBAtlasClient
lastRun time.Time
mb *metadata.MetricsBuilder
stopperChan chan struct{}
}
type timeconstraints struct {
start string
end string
resolution string
}
func newMongoDBAtlasReceiver(settings receiver.Settings, cfg *Config) *mongodbatlasreceiver {
client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger)
for _, p := range cfg.Projects {
p.populateIncludesAndExcludes()
}
return &mongodbatlasreceiver{
log: settings.Logger,
cfg: cfg,
client: client,
mb: metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings),
stopperChan: make(chan struct{}),
}
}
func newMongoDBAtlasScraper(recv *mongodbatlasreceiver) (scraper.Metrics, error) {
return scraper.NewMetrics(recv.scrape, scraper.WithShutdown(recv.shutdown))
}
func (s *mongodbatlasreceiver) scrape(ctx context.Context) (pmetric.Metrics, error) {
now := time.Now()
if err := s.poll(ctx, s.timeConstraints(now)); err != nil {
return pmetric.Metrics{}, err
}
s.lastRun = now
return s.mb.Emit(), nil
}
func (s *mongodbatlasreceiver) timeConstraints(now time.Time) timeconstraints {
var start time.Time
if s.lastRun.IsZero() {
start = now.Add(s.cfg.CollectionInterval * -1)
} else {
start = s.lastRun
}
return timeconstraints{
start.UTC().Format(time.RFC3339),
now.UTC().Format(time.RFC3339),
s.cfg.Granularity,
}
}
func (s *mongodbatlasreceiver) shutdown(context.Context) error {
return s.client.Shutdown()
}
// poll decides whether to poll all projects or a specific project based on the configuration.
func (s *mongodbatlasreceiver) poll(ctx context.Context, time timeconstraints) error {
if len(s.cfg.Projects) == 0 {
return s.pollAllProjects(ctx, time)
}
return s.pollProjects(ctx, time)
}
// pollAllProjects handles polling across all projects within the organizations.
func (s *mongodbatlasreceiver) pollAllProjects(ctx context.Context, time timeconstraints) error {
orgs, err := s.client.Organizations(ctx)
if err != nil {
return fmt.Errorf("error retrieving organizations: %w", err)
}
for _, org := range orgs {
proj, err := s.client.Projects(ctx, org.ID)
if err != nil {
s.log.Error("error retrieving projects", zap.String("orgID", org.ID), zap.Error(err))
continue
}
for _, project := range proj {
// Since there is no specific ProjectConfig for these projects, pass nil.
if err := s.processProject(ctx, time, org.Name, project, nil); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
}
return nil
}
// pollProject handles polling for specific projects as configured.
func (s *mongodbatlasreceiver) pollProjects(ctx context.Context, time timeconstraints) error {
for _, projectCfg := range s.cfg.Projects {
project, err := s.client.GetProject(ctx, projectCfg.Name)
if err != nil {
s.log.Error("error retrieving project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}
org, err := s.client.GetOrganization(ctx, project.OrgID)
if err != nil {
s.log.Error("error retrieving organization from project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}
if err := s.processProject(ctx, time, org.Name, project, projectCfg); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
return nil
}
func (s *mongodbatlasreceiver) processProject(ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, projectCfg *ProjectConfig) error {
nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID)
if err != nil {
return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err)
}
processes, err := s.client.Processes(ctx, project.ID)
if err != nil {
return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err)
}
for _, process := range processes {
clusterName := nodeClusterMap[process.UserAlias]
providerValues := providerMap[clusterName]
if !shouldProcessCluster(projectCfg, clusterName) {
// Skip processing for this cluster
continue
}
if err := s.extractProcessMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
}
return nil
}
// shouldProcessCluster checks whether a given cluster should be processed based on the project configuration.
func shouldProcessCluster(projectCfg *ProjectConfig, clusterName string) bool {
if projectCfg == nil {
// If there is no project config, process all clusters.
return true
}
_, isIncluded := projectCfg.includesByClusterName[clusterName]
_, isExcluded := projectCfg.excludesByClusterName[clusterName]
// Return false immediately if the cluster is excluded.
if isExcluded {
return false
}
// If IncludeClusters is empty, or the cluster is explicitly included, return true.
return len(projectCfg.IncludeClusters) == 0 || isIncluded
}
type providerValues struct {
RegionName string
ProviderName string
}
func (s *mongodbatlasreceiver) getNodeClusterNameMap(
ctx context.Context,
projectID string,
) (map[string]string, map[string]providerValues, error) {
providerMap := make(map[string]providerValues)
clusterMap := make(map[string]string)
clusters, err := s.client.GetClusters(ctx, projectID)
if err != nil {
return nil, nil, err
}
for _, cluster := range clusters {
// URI in the form mongodb://host1.mongodb.net:27017,host2.mongodb.net:27017,host3.mongodb.net:27017
nodes := strings.Split(strings.TrimPrefix(cluster.MongoURI, "mongodb://"), ",")
for _, node := range nodes {
// Remove the port from the node
n, _, _ := strings.Cut(node, ":")
clusterMap[n] = cluster.Name
}
providerMap[cluster.Name] = providerValues{
RegionName: cluster.ProviderSettings.RegionName,
ProviderName: cluster.ProviderSettings.ProviderName,
}
}
return clusterMap, providerMap, nil
}
func (s *mongodbatlasreceiver) extractProcessMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
if err := s.client.ProcessMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
return nil
}
func (s *mongodbatlasreceiver) extractProcessDatabaseMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
processDatabases, err := s.client.ProcessDatabases(
ctx,
project.ID,
process.Hostname,
process.Port,
)
if err != nil {
return fmt.Errorf("error retrieving process databases: %w", err)
}
for _, db := range processDatabases {
if err := s.client.ProcessDatabaseMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
db.DatabaseName,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling database metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasDbName(db.DatabaseName)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
}
return nil
}
func (s *mongodbatlasreceiver) extractProcessDiskMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
for _, disk := range s.client.ProcessDisks(ctx, project.ID, process.Hostname, process.Port) {
if err := s.client.ProcessDiskMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
disk.PartitionName,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling disk metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasDiskPartition(disk.PartitionName)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver"
// This file implements factory for SAPM receiver.
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver/internal/metadata"
)
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:7276"
)
// NewFactory creates a factory for SAPM receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithTraces(createTracesReceiver, metadata.TracesStability))
}
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
}
}
// extract the port number from string in "address:port" format. If the
// port number cannot be extracted returns an error.
// TODO make this a utility function
func extractPortFromEndpoint(endpoint string) (int, error) {
_, portStr, err := net.SplitHostPort(endpoint)
if err != nil {
return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err)
}
port, err := strconv.ParseInt(portStr, 10, 0)
if err != nil {
return 0, fmt.Errorf("endpoint port is not a number: %w", err)
}
if port < 1 || port > 65535 {
return 0, errors.New("port number must be between 1 and 65535")
}
return int(port), nil
}
// verify that the configured port is not 0
func (rCfg *Config) validate() error {
_, err := extractPortFromEndpoint(rCfg.Endpoint)
if err != nil {
return err
}
return nil
}
// CreateTraces creates a trace receiver based on provided config.
func createTracesReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
// assert config is SAPM config
rCfg := cfg.(*Config)
err := rCfg.validate()
if err != nil {
return nil, err
}
// Create the receiver.
return newReceiver(params, rCfg, nextConsumer)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver"
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net/http"
"sync"
"github.com/gorilla/mux"
splunksapm "github.com/signalfx/sapm-proto/gen"
"github.com/signalfx/sapm-proto/sapmprotocol"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger"
)
var gzipWriterPool = &sync.Pool{
New: func() any {
return gzip.NewWriter(io.Discard)
},
}
// sapmReceiver receives spans in the Splunk SAPM format over HTTP
type sapmReceiver struct {
settings component.TelemetrySettings
config *Config
server *http.Server
shutdownWG sync.WaitGroup
nextConsumer consumer.Traces
// defaultResponse is a placeholder. For now this receiver returns an empty sapm response.
// This defaultResponse is an optimization so we don't have to proto.Marshal the response
// for every request. At some point this may be removed when there is actual content to return.
defaultResponse []byte
obsrecv *receiverhelper.ObsReport
}
// handleRequest parses an http request containing sapm and passes the trace data to the next consumer
func (sr *sapmReceiver) handleRequest(req *http.Request) error {
sapm, err := sapmprotocol.ParseTraceV2Request(req)
// errors processing the request should return http.StatusBadRequest
if err != nil {
return err
}
ctx := sr.obsrecv.StartTracesOp(req.Context())
td, err := jaeger.ProtoToTraces(sapm.Batches)
if err != nil {
return err
}
// pass the trace data to the next consumer
err = sr.nextConsumer.ConsumeTraces(ctx, td)
if err != nil {
err = fmt.Errorf("error passing trace data to next consumer: %w", err)
}
sr.obsrecv.EndTracesOp(ctx, "protobuf", td.SpanCount(), err)
return err
}
// HTTPHandlerFunc returns an http.HandlerFunc that handles SAPM requests
func (sr *sapmReceiver) HTTPHandlerFunc(rw http.ResponseWriter, req *http.Request) {
// handle the request payload
err := sr.handleRequest(req)
if err != nil {
errorutil.HTTPError(rw, err)
return
}
// respBytes are bytes to write to the http.Response
// build the response message
// NOTE currently the response is an empty struct. As an optimization this receiver will pass a
// byte array that was generated in the receiver's constructor. If this receiver needs to return
// more than an empty struct, then the sapm.PostSpansResponse{} struct will need to be marshaled
// and on error a http.StatusInternalServerError should be written to the http.ResponseWriter and
// this function should immediately return.
respBytes := sr.defaultResponse
rw.Header().Set(sapmprotocol.ContentTypeHeaderName, sapmprotocol.ContentTypeHeaderValue)
// write the response if client does not accept gzip encoding
if req.Header.Get(sapmprotocol.AcceptEncodingHeaderName) != sapmprotocol.GZipEncodingHeaderValue {
// write the response bytes
_, err = rw.Write(respBytes)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
}
return
}
// gzip the response
// get the gzip writer
writer := gzipWriterPool.Get().(*gzip.Writer)
defer gzipWriterPool.Put(writer)
var gzipBuffer bytes.Buffer
// reset the writer with the gzip buffer
writer.Reset(&gzipBuffer)
// gzip the responseBytes
_, err = writer.Write(respBytes)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
return
}
// close the gzip writer and write gzip footer
err = writer.Close()
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
return
}
// write the successfully gzipped payload
rw.Header().Set(sapmprotocol.ContentEncodingHeaderName, sapmprotocol.GZipEncodingHeaderValue)
_, err = rw.Write(gzipBuffer.Bytes())
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
}
}
// Start starts the sapmReceiver's server.
func (sr *sapmReceiver) Start(ctx context.Context, host component.Host) error {
// server.Handler will be nil on initial call, otherwise noop.
if sr.server != nil && sr.server.Handler != nil {
return nil
}
// set up the listener
ln, err := sr.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", sr.config.Endpoint, err)
}
// use gorilla mux to create a router/handler
nr := mux.NewRouter()
nr.HandleFunc(sapmprotocol.TraceEndpointV2, sr.HTTPHandlerFunc)
// create a server with the handler
sr.server, err = sr.config.ServerConfig.ToServer(ctx, host, sr.settings, nr)
if err != nil {
return err
}
sr.shutdownWG.Add(1)
// run the server on a routine
go func() {
defer sr.shutdownWG.Done()
if errHTTP := sr.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown stops the the sapmReceiver's server.
func (sr *sapmReceiver) Shutdown(context.Context) error {
if sr.server == nil {
return nil
}
err := sr.server.Close()
sr.shutdownWG.Wait()
return err
}
// this validates at compile time that sapmReceiver implements the receiver.Traces interface
var _ receiver.Traces = (*sapmReceiver)(nil)
// newReceiver creates a sapmReceiver that receives SAPM over http
func newReceiver(
params receiver.Settings,
config *Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
// build the response message
defaultResponse := &splunksapm.PostSpansResponse{}
defaultResponseBytes, err := defaultResponse.Marshal()
if err != nil {
return nil, fmt.Errorf("failed to marshal default response body for %v receiver: %w", params.ID, err)
}
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: params.ID,
Transport: transport,
ReceiverCreateSettings: params,
})
if err != nil {
return nil, err
}
return &sapmReceiver{
settings: params.TelemetrySettings,
config: config,
nextConsumer: nextConsumer,
defaultResponse: defaultResponseBytes,
obsrecv: obsrecv,
}, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"errors"
"go.opentelemetry.io/collector/config/confighttp"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
var errEmptyEndpoint = errors.New("empty endpoint")
// Config defines configuration for the SignalFx receiver.
type Config struct {
confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct
// Deprecated: `access_token_passthrough` is deprecated.
// Please enable include_metadata in the receiver and add the following config to the batch processor:
// batch:
// metadata_keys: [X-Sf-Token]
splunk.AccessTokenPassthroughConfig `mapstructure:",squash"`
}
// Validate verifies that the endpoint is valid and the configured port is not 0
func (rCfg *Config) Validate() error {
if rCfg.ServerConfig.Endpoint == "" {
return errEmptyEndpoint
}
_, err := extractPortFromEndpoint(rCfg.ServerConfig.Endpoint)
if err != nil {
return err
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"sync"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata"
)
// This file implements factory for SignalFx receiver.
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:9943"
)
// NewFactory creates a factory for SignalFx receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createLogsReceiver, metadata.LogsStability))
}
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
}
}
// extract the port number from string in "address:port" format. If the
// port number cannot be extracted returns an error.
func extractPortFromEndpoint(endpoint string) (int, error) {
_, portStr, err := net.SplitHostPort(endpoint)
if err != nil {
return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err)
}
port, err := strconv.ParseInt(portStr, 10, 0)
if err != nil {
return 0, fmt.Errorf("endpoint port is not a number: %w", err)
}
if port < 1 || port > 65535 {
return 0, errors.New("port number must be between 1 and 65535")
}
return int(port), nil
}
// createMetricsReceiver creates a metrics receiver based on provided config.
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
rCfg := cfg.(*Config)
if rCfg.AccessTokenPassthrough {
params.Logger.Warn(
"access_token_passthrough is deprecated. " +
"Please enable include_metadata in the receiver and add " +
"`metadata_keys: [X-Sf-Token]` to the batch processor",
)
}
receiverLock.Lock()
r := receivers[rCfg]
if r == nil {
var err error
r, err = newReceiver(params, *rCfg)
if err != nil {
return nil, err
}
receivers[rCfg] = r
}
receiverLock.Unlock()
r.RegisterMetricsConsumer(consumer)
return r, nil
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
rCfg := cfg.(*Config)
if rCfg.AccessTokenPassthrough {
params.Logger.Warn(
"access_token_passthrough is deprecated. " +
"Please enable include_metadata in the receiver and add " +
"`metadata_keys: [X-Sf-Token]` to the batch processor",
)
}
receiverLock.Lock()
r := receivers[rCfg]
if r == nil {
var err error
r, err = newReceiver(params, *rCfg)
if err != nil {
return nil, err
}
receivers[rCfg] = r
}
receiverLock.Unlock()
r.RegisterLogsConsumer(consumer)
return r, nil
}
var (
receiverLock sync.Mutex
receivers = map[*Config]*sfxReceiver{}
)
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
"unsafe"
"github.com/gorilla/mux"
sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata"
)
const (
defaultServerTimeout = 20 * time.Second
responseOK = "OK"
responseInvalidMethod = "Only \"POST\" method is supported"
responseEventsInvalidContentType = "\"Content-Type\" must be \"application/x-protobuf\""
responseInvalidContentType = "\"Content-Type\" must be either \"application/x-protobuf\" or \"application/x-protobuf;format=otlp\""
responseInvalidEncoding = "\"Content-Encoding\" must be \"gzip\" or empty"
responseErrGzipReader = "Error on gzip body"
responseErrReadBody = "Failed to read message body"
responseErrUnmarshalBody = "Failed to unmarshal message body"
responseErrNextConsumer = "Internal Server Error"
responseErrLogsNotConfigured = "Log pipeline has not been configured to handle events"
responseErrMetricsNotConfigured = "Metric pipeline has not been configured to handle datapoints"
// Centralizing some HTTP and related string constants.
protobufContentType = "application/x-protobuf"
otlpProtobufContentType = "application/x-protobuf;format=otlp"
gzipEncoding = "gzip"
httpContentTypeHeader = "Content-Type"
httpContentEncodingHeader = "Content-Encoding"
)
var (
okRespBody = initJSONResponse(responseOK)
invalidMethodRespBody = initJSONResponse(responseInvalidMethod)
invalidContentRespBody = initJSONResponse(responseInvalidContentType)
invalidEventsContentRespBody = initJSONResponse(responseEventsInvalidContentType)
invalidEncodingRespBody = initJSONResponse(responseInvalidEncoding)
errGzipReaderRespBody = initJSONResponse(responseErrGzipReader)
errReadBodyRespBody = initJSONResponse(responseErrReadBody)
errUnmarshalBodyRespBody = initJSONResponse(responseErrUnmarshalBody)
errNextConsumerRespBody = initJSONResponse(responseErrNextConsumer)
errLogsNotConfigured = initJSONResponse(responseErrLogsNotConfigured)
errMetricsNotConfigured = initJSONResponse(responseErrMetricsNotConfigured)
translator = &signalfx.ToTranslator{}
)
// sfxReceiver implements the receiver.Metrics for SignalFx metric protocol.
type sfxReceiver struct {
settings receiver.Settings
config *Config
metricsConsumer consumer.Metrics
logsConsumer consumer.Logs
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
}
var _ receiver.Metrics = (*sfxReceiver)(nil)
// New creates the SignalFx receiver with the given configuration.
func newReceiver(
settings receiver.Settings,
config Config,
) (*sfxReceiver, error) {
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: settings.ID,
Transport: transport,
ReceiverCreateSettings: settings,
})
if err != nil {
return nil, err
}
r := &sfxReceiver{
settings: settings,
config: &config,
obsrecv: obsrecv,
}
return r, nil
}
func (r *sfxReceiver) RegisterMetricsConsumer(mc consumer.Metrics) {
r.metricsConsumer = mc
}
func (r *sfxReceiver) RegisterLogsConsumer(lc consumer.Logs) {
r.logsConsumer = lc
}
// Start tells the receiver to start its processing.
// By convention the consumer of the received data is set when the receiver
// instance is created.
func (r *sfxReceiver) Start(ctx context.Context, host component.Host) error {
if r.server != nil {
return nil
}
// set up the listener
ln, err := r.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err)
}
mx := mux.NewRouter()
mx.HandleFunc("/v2/datapoint", r.handleDatapointReq)
mx.HandleFunc("/v2/event", r.handleEventReq)
r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx)
if err != nil {
return err
}
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
r.server.ReadHeaderTimeout = defaultServerTimeout
r.server.WriteTimeout = defaultServerTimeout
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown tells the receiver that should stop reception,
// giving it a chance to perform any necessary clean-up.
func (r *sfxReceiver) Shutdown(context.Context) error {
if r.server == nil {
return nil
}
err := r.server.Close()
r.shutdownWG.Wait()
return err
}
func (r *sfxReceiver) readBody(ctx context.Context, resp http.ResponseWriter, req *http.Request) ([]byte, bool) {
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, nil)
return nil, false
}
bodyReader := req.Body
if encoding == gzipEncoding {
var err error
bodyReader, err = gzip.NewReader(bodyReader)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errGzipReaderRespBody, err)
return nil, false
}
}
body, err := io.ReadAll(bodyReader)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errReadBodyRespBody, err)
return nil, false
}
return body, true
}
func (r *sfxReceiver) writeResponse(ctx context.Context, resp http.ResponseWriter, err error) {
if err != nil {
r.failRequest(ctx, resp, errorutil.GetHTTPStatusCodeFromError(err), errNextConsumerRespBody, err)
return
}
resp.WriteHeader(http.StatusOK)
_, err = resp.Write(okRespBody)
if err != nil {
r.failRequest(ctx, resp, http.StatusInternalServerError, errNextConsumerRespBody, err)
}
}
func (r *sfxReceiver) handleDatapointReq(resp http.ResponseWriter, req *http.Request) {
ctx := r.obsrecv.StartMetricsOp(req.Context())
if r.metricsConsumer == nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errMetricsNotConfigured, nil)
return
}
if req.Method != http.MethodPost {
r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil)
return
}
otlpFormat := false
switch req.Header.Get(httpContentTypeHeader) {
case protobufContentType:
case otlpProtobufContentType:
otlpFormat = true
default:
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidContentRespBody, nil)
return
}
body, ok := r.readBody(ctx, resp, req)
if !ok {
return
}
r.settings.Logger.Debug("Handling metrics data")
var md pmetric.Metrics
if otlpFormat {
r.settings.Logger.Debug("Received request is in OTLP format")
otlpreq := pmetricotlp.NewExportRequest()
if err := otlpreq.UnmarshalProto(body); err != nil {
r.settings.Logger.Debug("OTLP data unmarshalling failed", zap.Error(err))
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
md = otlpreq.Metrics()
} else {
msg := &sfxpb.DataPointUploadMessage{}
err := msg.Unmarshal(body)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
md, err = translator.ToMetrics(msg.Datapoints)
if err != nil {
r.settings.Logger.Debug("SignalFx conversion error", zap.Error(err))
}
}
dataPointCount := md.DataPointCount()
if dataPointCount == 0 {
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil)
_, _ = resp.Write(okRespBody)
return
}
r.addAccessTokenLabel(md, req)
err := r.metricsConsumer.ConsumeMetrics(ctx, md)
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), dataPointCount, err)
r.writeResponse(ctx, resp, err)
}
func (r *sfxReceiver) handleEventReq(resp http.ResponseWriter, req *http.Request) {
ctx := r.obsrecv.StartMetricsOp(req.Context())
if r.logsConsumer == nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errLogsNotConfigured, nil)
return
}
if req.Method != http.MethodPost {
r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil)
return
}
if req.Header.Get(httpContentTypeHeader) != protobufContentType {
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEventsContentRespBody, nil)
return
}
body, ok := r.readBody(ctx, resp, req)
if !ok {
return
}
msg := &sfxpb.EventUploadMessage{}
if err := msg.Unmarshal(body); err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
if len(msg.Events) == 0 {
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil)
_, _ = resp.Write(okRespBody)
return
}
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
signalFxV2EventsToLogRecords(msg.Events, sl.LogRecords())
if r.config.AccessTokenPassthrough {
if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" {
rl.Resource().Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken)
}
}
err := r.logsConsumer.ConsumeLogs(ctx, ld)
r.obsrecv.EndMetricsOp(
ctx,
metadata.Type.String(),
len(msg.Events),
err)
r.writeResponse(ctx, resp, err)
}
func (r *sfxReceiver) failRequest(
ctx context.Context,
resp http.ResponseWriter,
httpStatusCode int,
jsonResponse []byte,
err error,
) {
resp.WriteHeader(httpStatusCode)
if len(jsonResponse) > 0 {
// The response needs to be written as a JSON string.
_, writeErr := resp.Write(jsonResponse)
if writeErr != nil {
r.settings.Logger.Warn(
"Error writing HTTP response message",
zap.Error(writeErr),
zap.String("receiver", r.settings.ID.String()))
}
}
// Use the same pattern as strings.Builder String().
msg := *(*string)(unsafe.Pointer(&jsonResponse))
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, err)
r.settings.Logger.Debug(
"SignalFx receiver request failed",
zap.Int("http_status_code", httpStatusCode),
zap.String("msg", msg),
zap.Error(err), // It handles nil error
)
}
func (r *sfxReceiver) addAccessTokenLabel(md pmetric.Metrics, req *http.Request) {
if r.config.AccessTokenPassthrough {
if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" {
for i := 0; i < md.ResourceMetrics().Len(); i++ {
rm := md.ResourceMetrics().At(i)
res := rm.Resource()
res.Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken)
}
}
}
}
func initJSONResponse(s string) []byte {
respBody, err := json.Marshal(s)
if err != nil {
// This is to be used in initialization so panic here is fine.
panic(err)
}
return respBody
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
// signalFxV2ToMetricsData converts SignalFx event proto data points to
// plog.LogRecordSlice. Returning the converted data and the number of dropped log
// records.
func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs plog.LogRecordSlice) {
lrs.EnsureCapacity(len(events))
for _, event := range events {
lr := lrs.AppendEmpty()
attrs := lr.Attributes()
attrs.EnsureCapacity(2 + len(event.Dimensions) + len(event.Properties))
for _, dim := range event.Dimensions {
attrs.PutStr(dim.Key, dim.Value)
}
// The EventType field is stored as an attribute.
eventType := event.EventType
if eventType == "" {
eventType = "unknown"
}
attrs.PutStr(splunk.SFxEventType, eventType)
// SignalFx timestamps are in millis so convert to nanos by multiplying
// by 1 million.
lr.SetTimestamp(pcommon.Timestamp(event.Timestamp * 1e6))
if event.Category != nil {
attrs.PutInt(splunk.SFxEventCategoryKey, int64(*event.Category))
} else {
// This gives us an unambiguous way of determining that a log record
// represents a SignalFx event, even if category is missing from the
// event.
attrs.PutEmpty(splunk.SFxEventCategoryKey)
}
if len(event.Properties) > 0 {
propMap := attrs.PutEmptyMap(splunk.SFxEventPropertiesKey)
propMap.EnsureCapacity(len(event.Properties))
for _, prop := range event.Properties {
// No way to tell what value type is without testing each
// individually.
switch {
case prop.Value.StrValue != nil:
propMap.PutStr(prop.Key, prop.Value.GetStrValue())
case prop.Value.IntValue != nil:
propMap.PutInt(prop.Key, prop.Value.GetIntValue())
case prop.Value.DoubleValue != nil:
propMap.PutDouble(prop.Key, prop.Value.GetDoubleValue())
case prop.Value.BoolValue != nil:
propMap.PutBool(prop.Key, prop.Value.GetBoolValue())
default:
// If there is no property value, just insert a null to
// record that the key was present.
propMap.PutEmpty(prop.Key)
}
}
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
conventions "go.opentelemetry.io/collector/semconv/v1.27.0"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata"
)
// This file implements factory for Splunk HEC receiver.
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:8088"
)
// NewFactory creates a factory for Splunk HEC receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createLogsReceiver, metadata.LogsStability))
}
// CreateDefaultConfig creates the default configuration for Splunk HEC receiver.
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
AccessTokenPassthroughConfig: splunk.AccessTokenPassthroughConfig{},
HecToOtelAttrs: splunk.HecToOtelAttrs{
Source: splunk.DefaultSourceLabel,
SourceType: splunk.DefaultSourceTypeLabel,
Index: splunk.DefaultIndexLabel,
Host: conventions.AttributeHostName,
},
RawPath: splunk.DefaultRawPath,
HealthPath: splunk.DefaultHealthPath,
Ack: Ack{
Extension: nil,
Path: splunk.DefaultAckPath,
},
Splitting: SplittingStrategyLine,
}
}
// CreateMetrics creates a metrics receiver based on provided config.
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
var err error
var recv receiver.Metrics
rCfg := cfg.(*Config)
r := receivers.GetOrAdd(cfg, func() component.Component {
recv, err = newReceiver(params, *rCfg)
return recv
})
if err != nil {
return nil, err
}
r.Unwrap().(*splunkReceiver).metricsConsumer = consumer
return r, nil
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
var err error
var recv receiver.Logs
rCfg := cfg.(*Config)
r := receivers.GetOrAdd(cfg, func() component.Component {
recv, err = newReceiver(params, *rCfg)
return recv
})
if err != nil {
return nil, err
}
r.Unwrap().(*splunkReceiver).logsConsumer = consumer
return r, nil
}
var receivers = sharedcomponent.NewSharedComponents()
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata"
)
const (
defaultServerTimeout = 20 * time.Second
ackResponse = `{"acks": %s}`
responseOK = `{"text": "Success", "code": 0}`
responseOKWithAckID = `{"text": "Success", "code": 0, "ackId": %d}`
responseHecHealthy = `{"text": "HEC is healthy", "code": 17}`
responseInvalidMethodPostOnly = `"Only \"POST\" method is supported"`
responseInvalidEncoding = `"\"Content-Encoding\" must be \"gzip\" or empty"`
responseInvalidDataFormat = `{"text":"Invalid data format","code":6}`
responseErrEventRequired = `{"text":"Event field is required","code":12}`
responseErrEventBlank = `{"text":"Event field cannot be blank","code":13}`
responseErrGzipReader = `"Error on gzip body"`
responseErrUnmarshalBody = `"Failed to unmarshal message body"`
responseErrInternalServerError = `"Internal Server Error"`
responseErrUnsupportedMetricEvent = `"Unsupported metric event"`
responseErrUnsupportedLogEvent = `"Unsupported log event"`
responseErrHandlingIndexedFields = `{"text":"Error in handling indexed fields","code":15,"invalid-event-number":%d}`
responseErrDataChannelMissing = `{"text": "Data channel is missing","code":10}`
responseErrInvalidDataChannel = `{"text": "Invalid data channel", "code": 11}`
responseNoData = `{"text":"No data","code":5}`
// Centralizing some HTTP and related string constants.
gzipEncoding = "gzip"
httpContentEncodingHeader = "Content-Encoding"
httpContentTypeHeader = "Content-Type"
httpJSONTypeHeader = "application/json"
)
var (
errEmptyEndpoint = errors.New("empty endpoint")
errInvalidMethod = errors.New("invalid http method")
errInvalidEncoding = errors.New("invalid encoding")
errExtensionMissing = errors.New("ack extension not found")
okRespBody = []byte(responseOK)
eventRequiredRespBody = []byte(responseErrEventRequired)
eventBlankRespBody = []byte(responseErrEventBlank)
requiredDataChannelHeader = []byte(responseErrDataChannelMissing)
invalidEncodingRespBody = []byte(responseInvalidEncoding)
invalidFormatRespBody = []byte(responseInvalidDataFormat)
invalidMethodRespBodyPostOnly = []byte(responseInvalidMethodPostOnly)
errGzipReaderRespBody = []byte(responseErrGzipReader)
errUnmarshalBodyRespBody = []byte(responseErrUnmarshalBody)
errInternalServerError = []byte(responseErrInternalServerError)
errUnsupportedMetricEvent = []byte(responseErrUnsupportedMetricEvent)
errUnsupportedLogEvent = []byte(responseErrUnsupportedLogEvent)
noDataRespBody = []byte(responseNoData)
)
// splunkReceiver implements the receiver.Metrics for Splunk HEC metric protocol.
type splunkReceiver struct {
settings receiver.Settings
config *Config
logsConsumer consumer.Logs
metricsConsumer consumer.Metrics
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
gzipReaderPool *sync.Pool
ackExt ackextension.AckExtension
}
var (
_ receiver.Metrics = (*splunkReceiver)(nil)
_ receiver.Logs = (*splunkReceiver)(nil)
)
// newReceiver creates the Splunk HEC receiver with the given configuration.
func newReceiver(settings receiver.Settings, config Config) (*splunkReceiver, error) {
if config.Endpoint == "" {
return nil, errEmptyEndpoint
}
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: settings.ID,
Transport: transport,
ReceiverCreateSettings: settings,
})
if err != nil {
return nil, err
}
r := &splunkReceiver{
settings: settings,
config: &config,
server: &http.Server{
Addr: config.Endpoint,
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
ReadHeaderTimeout: defaultServerTimeout,
WriteTimeout: defaultServerTimeout,
},
obsrecv: obsrecv,
gzipReaderPool: &sync.Pool{New: func() any { return new(gzip.Reader) }},
}
return r, nil
}
// Start tells the receiver to start its processing.
// By convention the consumer of the received data is set when the receiver
// instance is created.
func (r *splunkReceiver) Start(ctx context.Context, host component.Host) error {
// server.Handler will be nil on initial call, otherwise noop.
if r.server != nil && r.server.Handler != nil {
return nil
}
mx := mux.NewRouter()
// set up the ack API handler if the ack extension is present
if r.config.Ack.Extension != nil {
if ext, found := host.GetExtensions()[*r.config.Ack.Extension]; found {
r.ackExt = ext.(ackextension.AckExtension)
mx.NewRoute().Path(r.config.Ack.Path).HandlerFunc(r.handleAck)
} else {
return fmt.Errorf("specified ack extension with id %q could not be found", *r.config.Ack.Extension)
}
}
mx.NewRoute().Path(r.config.HealthPath).HandlerFunc(r.handleHealthReq)
mx.NewRoute().Path(r.config.HealthPath + "/1.0").HandlerFunc(r.handleHealthReq).Methods(http.MethodGet)
if r.logsConsumer != nil {
mx.NewRoute().Path(r.config.RawPath).HandlerFunc(r.handleRawReq)
}
mx.NewRoute().HandlerFunc(r.handleReq)
// set up the listener
ln, err := r.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err)
}
r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx)
if err != nil {
return err
}
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
r.server.ReadHeaderTimeout = defaultServerTimeout
r.server.WriteTimeout = defaultServerTimeout
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return err
}
// Shutdown tells the receiver that should stop reception,
// giving it a chance to perform any necessary clean-up.
func (r *splunkReceiver) Shutdown(context.Context) error {
err := r.server.Close()
r.shutdownWG.Wait()
return err
}
func (r *splunkReceiver) processSuccessResponseWithAck(resp http.ResponseWriter, channelID string) error {
if r.ackExt == nil {
panic("writing response with ack when ack extension is not configured")
}
ackID := r.ackExt.ProcessEvent(channelID)
r.ackExt.Ack(channelID, ackID)
return r.processSuccessResponse(resp, []byte(fmt.Sprintf(responseOKWithAckID, ackID)))
}
func (r *splunkReceiver) processSuccessResponse(resp http.ResponseWriter, bodyContent []byte) error {
resp.Header().Set(httpContentTypeHeader, httpJSONTypeHeader)
resp.WriteHeader(http.StatusOK)
_, err := resp.Write(bodyContent)
return err
}
func (r *splunkReceiver) handleAck(resp http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
// shouldn't run into this case since we only enable this handler IF ackExt exists. But we have this check just in case
if r.ackExt == nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, errExtensionMissing)
return
}
var channelID string
var extracted bool
if channelID, extracted = r.extractChannel(req); extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
} else {
r.failRequest(resp, http.StatusBadRequest, requiredDataChannelHeader, nil)
return
}
dec := json.NewDecoder(req.Body)
var ackRequest splunk.AckRequest
err := dec.Decode(&ackRequest)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
if len(ackRequest.Acks) == 0 {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, errors.New("request body must include at least one ackID to be queried"))
return
}
queriedAcks := r.ackExt.QueryAcks(channelID, ackRequest.Acks)
ackString, _ := json.Marshal(queriedAcks)
if err := r.processSuccessResponse(resp, []byte(fmt.Sprintf(ackResponse, ackString))); err != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
}
}
func (r *splunkReceiver) handleRawReq(resp http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx = r.obsrecv.StartLogsOp(ctx)
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding)
return
}
var channelID string
var extracted bool
if channelID, extracted = r.extractChannel(req); extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
}
if req.ContentLength == 0 {
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil)
r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil)
return
}
bodyReader := req.Body
if encoding == gzipEncoding {
reader := r.gzipReaderPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err)
_, _ = io.ReadAll(req.Body)
_ = req.Body.Close()
return
}
bodyReader = reader
defer r.gzipReaderPool.Put(reader)
}
resourceCustomizer := r.createResourceCustomizer(req)
query := req.URL.Query()
var timestamp pcommon.Timestamp
if query.Has(queryTime) {
t, err := strconv.ParseInt(query.Get(queryTime), 10, 64)
if t < 0 {
err = errors.New("time cannot be less than 0")
}
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
timestamp = pcommon.NewTimestampFromTime(time.Unix(t, 0))
}
ld, slLen, err := splunkHecRawToLogData(bodyReader, query, resourceCustomizer, r.config, timestamp)
if err != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
return
}
consumerErr := r.logsConsumer.ConsumeLogs(ctx, ld)
_ = bodyReader.Close()
if consumerErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, consumerErr)
} else {
var ackErr error
if len(channelID) > 0 && r.ackExt != nil {
ackErr = r.processSuccessResponseWithAck(resp, channelID)
} else {
ackErr = r.processSuccessResponse(resp, okRespBody)
}
if ackErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
} else {
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), slLen, nil)
}
}
}
func (r *splunkReceiver) extractChannel(req *http.Request) (string, bool) {
// check header
for k, v := range req.Header {
if strings.EqualFold(k, splunk.HTTPSplunkChannelHeader) {
return strings.ToUpper(v[0]), true
}
}
// check query param
for k, v := range req.URL.Query() {
if strings.EqualFold(k, "channel") {
return strings.ToUpper(v[0]), true
}
}
return "", false
}
func (r *splunkReceiver) validateChannelHeader(channelID string) error {
if len(channelID) == 0 {
return errors.New(responseErrDataChannelMissing)
}
// channel id must be a valid uuid
// https://docs.splunk.com/Documentation/Splunk/9.2.1/Data/AboutHECIDXAck#:~:text=close%20the%20file.-,About%20channels%20and%20sending%20data,-Sending%20events%20to
_, err := uuid.Parse(channelID)
if err != nil {
return errors.New(responseErrInvalidDataChannel)
}
return nil
}
func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding)
return
}
channelID, extracted := r.extractChannel(req)
if extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
}
bodyReader := req.Body
if encoding == gzipEncoding {
reader := r.gzipReaderPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err)
return
}
bodyReader = reader
defer r.gzipReaderPool.Put(reader)
}
if req.ContentLength == 0 {
r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil)
return
}
dec := jsoniter.NewDecoder(bodyReader)
var events []*splunk.Event
var metricEvents []*splunk.Event
for dec.More() {
var msg splunk.Event
err := dec.Decode(&msg)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
if msg.Event == nil {
r.failRequest(resp, http.StatusBadRequest, eventRequiredRespBody, nil)
return
}
if msg.Event == "" {
r.failRequest(resp, http.StatusBadRequest, eventBlankRespBody, nil)
return
}
for _, v := range msg.Fields {
if !isFlatJSONField(v) {
r.failRequest(resp, http.StatusBadRequest, []byte(fmt.Sprintf(responseErrHandlingIndexedFields, len(events)+len(metricEvents))), nil)
return
}
}
if msg.IsMetric() {
if r.metricsConsumer == nil {
r.failRequest(resp, http.StatusBadRequest, errUnsupportedMetricEvent, err)
return
}
metricEvents = append(metricEvents, &msg)
} else {
if r.logsConsumer == nil {
r.failRequest(resp, http.StatusBadRequest, errUnsupportedLogEvent, err)
return
}
events = append(events, &msg)
}
}
resourceCustomizer := r.createResourceCustomizer(req)
if r.logsConsumer != nil && len(events) > 0 {
ld, err := splunkHecToLogData(r.settings.Logger, events, resourceCustomizer, r.config)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
ctx = r.obsrecv.StartLogsOp(ctx)
decodeErr := r.logsConsumer.ConsumeLogs(ctx, ld)
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), len(events), nil)
if decodeErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr)
return
}
}
if r.metricsConsumer != nil && len(metricEvents) > 0 {
md, _ := splunkHecToMetricsData(r.settings.Logger, metricEvents, resourceCustomizer, r.config)
ctx = r.obsrecv.StartMetricsOp(ctx)
decodeErr := r.metricsConsumer.ConsumeMetrics(ctx, md)
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), len(metricEvents), nil)
if decodeErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr)
return
}
}
var ackErr error
if len(channelID) > 0 && r.ackExt != nil {
ackErr = r.processSuccessResponseWithAck(resp, channelID)
} else {
ackErr = r.processSuccessResponse(resp, okRespBody)
}
if ackErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, ackErr)
}
}
func (r *splunkReceiver) createResourceCustomizer(req *http.Request) func(resource pcommon.Resource) {
if r.config.AccessTokenPassthrough {
accessToken := req.Header.Get("Authorization")
if strings.HasPrefix(accessToken, splunk.HECTokenHeader+" ") {
accessTokenValue := accessToken[len(splunk.HECTokenHeader)+1:]
return func(resource pcommon.Resource) {
resource.Attributes().PutStr(splunk.HecTokenLabel, accessTokenValue)
}
}
}
return nil
}
func (r *splunkReceiver) failRequest(
resp http.ResponseWriter,
httpStatusCode int,
jsonResponse []byte,
err error,
) {
resp.WriteHeader(httpStatusCode)
if len(jsonResponse) > 0 {
// The response needs to be written as a JSON string.
resp.Header().Add("Content-Type", "application/json")
_, writeErr := resp.Write(jsonResponse)
if writeErr != nil {
r.settings.Logger.Warn("Error writing HTTP response message", zap.Error(writeErr))
}
}
if r.settings.Logger.Core().Enabled(zap.DebugLevel) {
msg := string(jsonResponse)
r.settings.Logger.Debug(
"Splunk HEC receiver request failed",
zap.Int("http_status_code", httpStatusCode),
zap.String("msg", msg),
zap.Error(err), // It handles nil error
)
}
}
func (r *splunkReceiver) handleHealthReq(writer http.ResponseWriter, _ *http.Request) {
writer.Header().Add("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write([]byte(responseHecHealthy))
}
func isFlatJSONField(field any) bool {
switch value := field.(type) {
case map[string]any:
return false
case []any:
for _, v := range value {
switch v.(type) {
case map[string]any, []any:
return false
}
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"bufio"
"errors"
"io"
"net/url"
"sort"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
const (
// splunk metadata
index = "index"
source = "source"
sourcetype = "sourcetype"
host = "host"
queryTime = "time"
)
var errCannotConvertValue = errors.New("cannot convert field value to attribute")
// splunkHecToLogData transforms splunk events into logs
func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (plog.Logs, error) {
ld := plog.NewLogs()
scopeLogsMap := make(map[[4]string]plog.ScopeLogs)
for _, event := range events {
key := [4]string{event.Host, event.Source, event.SourceType, event.Index}
var sl plog.ScopeLogs
var found bool
if sl, found = scopeLogsMap[key]; !found {
rl := ld.ResourceLogs().AppendEmpty()
sl = rl.ScopeLogs().AppendEmpty()
scopeLogsMap[key] = sl
appendSplunkMetadata(rl, config.HecToOtelAttrs, event.Host, event.Source, event.SourceType, event.Index)
if resourceCustomizer != nil {
resourceCustomizer(rl.Resource())
}
}
// The SourceType field is the most logical "name" of the event.
logRecord := sl.LogRecords().AppendEmpty()
if err := convertToValue(logger, event.Event, logRecord.Body()); err != nil {
return ld, err
}
// Splunk timestamps are in seconds so convert to nanos by multiplying
// by 1 billion.
logRecord.SetTimestamp(pcommon.Timestamp(event.Time * 1e9))
// Set event fields first, so the specialized attributes overwrite them if needed.
keys := make([]string, 0, len(event.Fields))
for k := range event.Fields {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
val := event.Fields[key]
err := convertToValue(logger, val, logRecord.Attributes().PutEmpty(key))
if err != nil {
return ld, err
}
}
}
return ld, nil
}
// splunkHecRawToLogData transforms raw splunk event into log
func splunkHecRawToLogData(bodyReader io.Reader, query url.Values, resourceCustomizer func(pcommon.Resource), config *Config, timestamp pcommon.Timestamp) (plog.Logs, int, error) {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
appendSplunkMetadata(rl, config.HecToOtelAttrs, query.Get(host), query.Get(source), query.Get(sourcetype), query.Get(index))
if resourceCustomizer != nil {
resourceCustomizer(rl.Resource())
}
sl := rl.ScopeLogs().AppendEmpty()
if config.Splitting == SplittingStrategyNone {
b, err := io.ReadAll(bodyReader)
if err != nil {
return ld, 0, err
}
logRecord := sl.LogRecords().AppendEmpty()
logRecord.Body().SetStr(string(b))
logRecord.SetTimestamp(timestamp)
} else {
sc := bufio.NewScanner(bodyReader)
for sc.Scan() {
logRecord := sl.LogRecords().AppendEmpty()
logLine := sc.Text()
logRecord.Body().SetStr(logLine)
logRecord.SetTimestamp(timestamp)
}
}
return ld, sl.LogRecords().Len(), nil
}
func appendSplunkMetadata(rl plog.ResourceLogs, attrs splunk.HecToOtelAttrs, host, source, sourceType, index string) {
if host != "" {
rl.Resource().Attributes().PutStr(attrs.Host, host)
}
if source != "" {
rl.Resource().Attributes().PutStr(attrs.Source, source)
}
if sourceType != "" {
rl.Resource().Attributes().PutStr(attrs.SourceType, sourceType)
}
if index != "" {
rl.Resource().Attributes().PutStr(attrs.Index, index)
}
}
func convertToValue(logger *zap.Logger, src any, dest pcommon.Value) error {
switch value := src.(type) {
case nil:
case string:
dest.SetStr(value)
case int64:
dest.SetInt(value)
case float64:
dest.SetDouble(value)
case bool:
dest.SetBool(value)
case map[string]any:
return convertToAttributeMap(logger, value, dest)
case []any:
return convertToSliceVal(logger, value, dest)
default:
logger.Debug("Unsupported value conversion", zap.Any("value", src))
return errCannotConvertValue
}
return nil
}
func convertToSliceVal(logger *zap.Logger, value []any, dest pcommon.Value) error {
arr := dest.SetEmptySlice()
for _, elt := range value {
err := convertToValue(logger, elt, arr.AppendEmpty())
if err != nil {
return err
}
}
return nil
}
func convertToAttributeMap(logger *zap.Logger, value map[string]any, dest pcommon.Value) error {
attrMap := dest.SetEmptyMap()
keys := make([]string, 0, len(value))
for k := range value {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := value[k]
if err := convertToValue(logger, v, attrMap.PutEmpty(k)); err != nil {
return err
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"fmt"
"strconv"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
// splunkHecToMetricsData converts Splunk HEC metric points to
// pmetric.Metrics. Returning the converted data and the number of
// dropped time series.
func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (pmetric.Metrics, int) {
numDroppedTimeSeries := 0
md := pmetric.NewMetrics()
scopeMetricsMap := make(map[[4]string]pmetric.ScopeMetrics)
for _, event := range events {
values := event.GetMetricValues()
labels := buildAttributes(event.Fields)
metrics := pmetric.NewMetricSlice()
for metricName, metricValue := range values {
pointTimestamp := convertTimestamp(event.Time)
metric := pmetric.NewMetric()
metric.SetName(metricName)
switch v := metricValue.(type) {
case int64:
addIntGauge(metrics, metricName, v, pointTimestamp, labels)
case *int64:
addIntGauge(metrics, metricName, *v, pointTimestamp, labels)
case float64:
addDoubleGauge(metrics, metricName, v, pointTimestamp, labels)
case *float64:
addDoubleGauge(metrics, metricName, *v, pointTimestamp, labels)
case string:
convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, v, labels)
case *string:
convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, *v, labels)
default:
// drop this point as we do not know how to extract a value from it
numDroppedTimeSeries++
logger.Debug("Cannot convert metric, unknown input type",
zap.String("metric", metricName))
}
}
if metrics.Len() == 0 {
continue
}
key := [4]string{event.Host, event.Source, event.SourceType, event.Index}
var sm pmetric.ScopeMetrics
var found bool
if sm, found = scopeMetricsMap[key]; !found {
resourceMetrics := md.ResourceMetrics().AppendEmpty()
sm = resourceMetrics.ScopeMetrics().AppendEmpty()
scopeMetricsMap[key] = sm
attrs := resourceMetrics.Resource().Attributes()
if event.Host != "" {
attrs.PutStr(config.HecToOtelAttrs.Host, event.Host)
}
if event.Source != "" {
attrs.PutStr(config.HecToOtelAttrs.Source, event.Source)
}
if event.SourceType != "" {
attrs.PutStr(config.HecToOtelAttrs.SourceType, event.SourceType)
}
if event.Index != "" {
attrs.PutStr(config.HecToOtelAttrs.Index, event.Index)
}
if resourceCustomizer != nil {
resourceCustomizer(resourceMetrics.Resource())
}
}
metrics.MoveAndAppendTo(sm.Metrics())
}
return md, numDroppedTimeSeries
}
func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pmetric.MetricSlice, metricName string, pointTimestamp pcommon.Timestamp, s string, attributes pcommon.Map) {
// best effort, cast to string and turn into a number
dbl, err := strconv.ParseFloat(s, 64)
if err != nil {
*numDroppedTimeSeries++
logger.Debug("Cannot convert metric value from string to number",
zap.String("metric", metricName))
} else {
addDoubleGauge(metrics, metricName, dbl, pointTimestamp, attributes)
}
}
func addIntGauge(metrics pmetric.MetricSlice, metricName string, value int64, ts pcommon.Timestamp, attributes pcommon.Map) {
metric := metrics.AppendEmpty()
metric.SetName(metricName)
intPt := metric.SetEmptyGauge().DataPoints().AppendEmpty()
intPt.SetTimestamp(ts)
intPt.SetIntValue(value)
attributes.CopyTo(intPt.Attributes())
}
func addDoubleGauge(metrics pmetric.MetricSlice, metricName string, value float64, ts pcommon.Timestamp, attributes pcommon.Map) {
metric := metrics.AppendEmpty()
metric.SetName(metricName)
doublePt := metric.SetEmptyGauge().DataPoints().AppendEmpty()
doublePt.SetTimestamp(ts)
doublePt.SetDoubleValue(value)
attributes.CopyTo(doublePt.Attributes())
}
func convertTimestamp(sec float64) pcommon.Timestamp {
return pcommon.Timestamp(sec * 1e9)
}
// Extract dimensions from the Splunk event fields to populate metric data point attributes.
func buildAttributes(dimensions map[string]any) pcommon.Map {
attributes := pcommon.NewMap()
attributes.EnsureCapacity(len(dimensions))
for key, val := range dimensions {
if strings.HasPrefix(key, "metric_name") || key == "_value" {
continue
}
if key == "" || val == nil {
// TODO: Log or metric for this odd ball?
continue
}
attributes.PutStr(key, fmt.Sprintf("%v", val))
}
return attributes
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"errors"
"time"
"go.opentelemetry.io/collector/config/confighttp"
"go.uber.org/multierr"
)
var (
errMissingEndpointFromConfig = errors.New("missing receiver server endpoint from config")
errReadTimeoutExceedsMaxValue = errors.New("the duration specified for read_timeout exceeds the maximum allowed value of 10s")
errWriteTimeoutExceedsMaxValue = errors.New("the duration specified for write_timeout exceeds the maximum allowed value of 10s")
errRequiredHeader = errors.New("both key and value are required to assign a required_header")
)
// Config defines configuration for the Generic Webhook receiver.
type Config struct {
confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct
ReadTimeout string `mapstructure:"read_timeout"` // wait time for reading request headers in ms. Default is 500ms.
WriteTimeout string `mapstructure:"write_timeout"` // wait time for writing request response in ms. Default is 500ms.
Path string `mapstructure:"path"` // path for data collection. Default is /events
HealthPath string `mapstructure:"health_path"` // path for health check api. Default is /health_check
RequiredHeader RequiredHeader `mapstructure:"required_header"` // optional setting to set a required header for all requests to have
}
type RequiredHeader struct {
Key string `mapstructure:"key"`
Value string `mapstructure:"value"`
}
func (cfg *Config) Validate() error {
var errs error
maxReadWriteTimeout, _ := time.ParseDuration("10s")
if cfg.ServerConfig.Endpoint == "" {
errs = multierr.Append(errs, errMissingEndpointFromConfig)
}
// If a user defines a custom read/write timeout there is a maximum value
// of 10s imposed here.
if cfg.ReadTimeout != "" {
readTimeout, err := time.ParseDuration(cfg.ReadTimeout)
if err != nil {
errs = multierr.Append(errs, err)
}
if readTimeout > maxReadWriteTimeout {
errs = multierr.Append(errs, errReadTimeoutExceedsMaxValue)
}
}
if cfg.WriteTimeout != "" {
writeTimeout, err := time.ParseDuration(cfg.WriteTimeout)
if err != nil {
errs = multierr.Append(errs, err)
}
if writeTimeout > maxReadWriteTimeout {
errs = multierr.Append(errs, errWriteTimeoutExceedsMaxValue)
}
}
if (cfg.RequiredHeader.Key != "" && cfg.RequiredHeader.Value == "") || (cfg.RequiredHeader.Value != "" && cfg.RequiredHeader.Key == "") {
errs = multierr.Append(errs, errRequiredHeader)
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
var scopeLogName = "otlp/" + metadata.Type.String()
const (
// might add this later, for now I wish to require a valid
// endpoint to be declared by the user.
// Default endpoints to bind to.
// defaultEndpoint = "localhost:8080"
defaultReadTimeout = "500ms"
defaultWriteTimeout = "500ms"
defaultPath = "/events"
defaultHealthPath = "/health_check"
)
// NewFactory creates a factory for Generic Webhook Receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithLogs(createLogsReceiver, metadata.LogsStability),
)
}
// Default configuration for the generic webhook receiver
func createDefaultConfig() component.Config {
return &Config{
Path: defaultPath,
HealthPath: defaultHealthPath,
ReadTimeout: defaultReadTimeout,
WriteTimeout: defaultWriteTimeout,
}
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
conf := cfg.(*Config)
return newLogsReceiver(params, *conf, consumer)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"bufio"
"compress/gzip"
"context"
"errors"
"io"
"net/http"
"sync"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/julienschmidt/httprouter"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
var (
errNilLogsConsumer = errors.New("missing a logs consumer")
errMissingEndpoint = errors.New("missing a receiver endpoint")
errInvalidRequestMethod = errors.New("invalid method. Valid method is POST")
errInvalidEncodingType = errors.New("invalid encoding type")
errEmptyResponseBody = errors.New("request body content length is zero")
errMissingRequiredHeader = errors.New("request was missing required header or incorrect header value")
)
const healthyResponse = `{"text": "Webhookevent receiver is healthy"}`
type eventReceiver struct {
settings receiver.Settings
cfg *Config
logConsumer consumer.Logs
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
gzipPool *sync.Pool
}
func newLogsReceiver(params receiver.Settings, cfg Config, consumer consumer.Logs) (receiver.Logs, error) {
if consumer == nil {
return nil, errNilLogsConsumer
}
if cfg.Endpoint == "" {
return nil, errMissingEndpoint
}
transport := "http"
if cfg.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: params.ID,
Transport: transport,
ReceiverCreateSettings: params,
})
if err != nil {
return nil, err
}
// create eventReceiver instance
er := &eventReceiver{
settings: params,
cfg: &cfg,
logConsumer: consumer,
obsrecv: obsrecv,
gzipPool: &sync.Pool{New: func() any { return new(gzip.Reader) }},
}
return er, nil
}
// Start function manages receiver startup tasks. part of the receiver.Logs interface.
func (er *eventReceiver) Start(ctx context.Context, host component.Host) error {
// noop if not nil. if start has not been called before these values should be nil.
if er.server != nil && er.server.Handler != nil {
return nil
}
// create listener from config
ln, err := er.cfg.ServerConfig.ToListener(ctx)
if err != nil {
return err
}
// set up router.
router := httprouter.New()
router.POST(er.cfg.Path, er.handleReq)
router.GET(er.cfg.HealthPath, er.handleHealthCheck)
// webhook server standup and configuration
er.server, err = er.cfg.ServerConfig.ToServer(ctx, host, er.settings.TelemetrySettings, router)
if err != nil {
return err
}
readTimeout, err := time.ParseDuration(er.cfg.ReadTimeout)
if err != nil {
return err
}
writeTimeout, err := time.ParseDuration(er.cfg.WriteTimeout)
if err != nil {
return err
}
// set timeouts
er.server.ReadHeaderTimeout = readTimeout
er.server.WriteTimeout = writeTimeout
// shutdown
er.shutdownWG.Add(1)
go func() {
defer er.shutdownWG.Done()
if errHTTP := er.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown function manages receiver shutdown tasks. part of the receiver.Logs interface.
func (er *eventReceiver) Shutdown(_ context.Context) error {
// server must exist to be closed.
if er.server == nil {
return nil
}
err := er.server.Close()
er.shutdownWG.Wait()
return err
}
// handleReq handles incoming request from webhook. On success returns a 200 response code to the webhook
func (er *eventReceiver) handleReq(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
ctx := r.Context()
ctx = er.obsrecv.StartLogsOp(ctx)
if r.Method != http.MethodPost {
er.failBadReq(ctx, w, http.StatusBadRequest, errInvalidRequestMethod)
return
}
if er.cfg.RequiredHeader.Key != "" {
requiredHeaderValue := r.Header.Get(er.cfg.RequiredHeader.Key)
if requiredHeaderValue != er.cfg.RequiredHeader.Value {
er.failBadReq(ctx, w, http.StatusUnauthorized, errMissingRequiredHeader)
return
}
}
encoding := r.Header.Get("Content-Encoding")
// only support gzip if encoding header is set.
if encoding != "" && encoding != "gzip" {
er.failBadReq(ctx, w, http.StatusUnsupportedMediaType, errInvalidEncodingType)
return
}
if r.ContentLength == 0 {
er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil)
er.failBadReq(ctx, w, http.StatusBadRequest, errEmptyResponseBody)
}
bodyReader := r.Body
// gzip encoded case
if encoding == "gzip" || encoding == "x-gzip" {
reader := er.gzipPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
er.failBadReq(ctx, w, http.StatusBadRequest, err)
_, _ = io.ReadAll(r.Body)
_ = r.Body.Close()
return
}
bodyReader = reader
defer er.gzipPool.Put(reader)
}
// send body into a scanner and then convert the request body into a log
sc := bufio.NewScanner(bodyReader)
ld, numLogs := reqToLog(sc, r.URL.Query(), er.cfg, er.settings)
consumerErr := er.logConsumer.ConsumeLogs(ctx, ld)
_ = bodyReader.Close()
if consumerErr != nil {
er.failBadReq(ctx, w, http.StatusInternalServerError, consumerErr)
} else {
w.WriteHeader(http.StatusOK)
}
er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), numLogs, consumerErr)
}
// Simple healthcheck endpoint.
func (er *eventReceiver) handleHealthCheck(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(healthyResponse))
}
// write response on a failed/bad request. Generates a small json body based on the thrown by
// the handle func and the appropriate http status code. many webhooks will either log these responses or
// notify webhook users should a none 2xx code be detected.
func (er *eventReceiver) failBadReq(_ context.Context,
w http.ResponseWriter,
httpStatusCode int,
err error,
) {
jsonResp, err := jsoniter.Marshal(err.Error())
if err != nil {
er.settings.Logger.Warn("failed to marshall error to json")
}
// write response to webhook
w.WriteHeader(httpStatusCode)
if len(jsonResp) > 0 {
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(jsonResp)
if err != nil {
er.settings.Logger.Warn("failed to write json response", zap.Error(err))
}
}
// log bad webhook request if debug is enabled
if er.settings.Logger.Core().Enabled(zap.DebugLevel) {
msg := string(jsonResp)
er.settings.Logger.Debug(msg, zap.Int("http_status_code", httpStatusCode), zap.Error(err))
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"bufio"
"net/url"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
func reqToLog(sc *bufio.Scanner,
query url.Values,
_ *Config,
settings receiver.Settings,
) (plog.Logs, int) {
// we simply dont split the data passed into scan (i.e. scan the whole thing)
// the downside to this approach is that only 1 log per request can be handled.
// NOTE: logs will contain these newline characters which could have formatting
// consequences downstream.
split := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if !atEOF {
return 0, nil, nil
}
return 0, data, bufio.ErrFinalToken
}
sc.Split(split)
log := plog.NewLogs()
resourceLog := log.ResourceLogs().AppendEmpty()
appendMetadata(resourceLog, query)
scopeLog := resourceLog.ScopeLogs().AppendEmpty()
scopeLog.Scope().SetName(scopeLogName)
scopeLog.Scope().SetVersion(settings.BuildInfo.Version)
scopeLog.Scope().Attributes().PutStr("source", settings.ID.String())
scopeLog.Scope().Attributes().PutStr("receiver", metadata.Type.String())
for sc.Scan() {
logRecord := scopeLog.LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
line := sc.Text()
logRecord.Body().SetStr(line)
}
return log, scopeLog.LogRecords().Len()
}
// append query parameters and webhook source as resource attributes
func appendMetadata(resourceLog plog.ResourceLogs, query url.Values) {
for k := range query {
if query.Get(k) != "" {
resourceLog.Resource().Attributes().PutStr(k, query.Get(k))
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package data // import "go.opentelemetry.io/collector/pdata/internal/data"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
// Called by Protobuf JSON deserialization.
func unmarshalJSON(dst []byte, iter *json.Iterator) {
src := iter.ReadStringAsSlice()
if len(src) == 0 {
return
}
if len(dst) != hex.DecodedLen(len(src)) {
iter.ReportError("ID.UnmarshalJSONIter", "length mismatch")
return
}
_, err := hex.Decode(dst, src)
if err != nil {
iter.ReportError("ID.UnmarshalJSONIter", err.Error())
return
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package data // import "go.opentelemetry.io/collector/pdata/internal/data"
import (
"encoding/hex"
"errors"
"github.com/gogo/protobuf/proto"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const profileIDSize = 16
var (
errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID")
errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
)
// ProfileID is a custom data type that is used for all profile_id fields in OTLP
// Protobuf messages.
type ProfileID [profileIDSize]byte
var _ proto.Sizer = (*ProfileID)(nil)
// Size returns the size of the data to serialize.
func (tid ProfileID) Size() int {
if tid.IsEmpty() {
return 0
}
return profileIDSize
}
// IsEmpty returns true if id contains at leas one non-zero byte.
func (tid ProfileID) IsEmpty() bool {
return tid == [profileIDSize]byte{}
}
// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization.
func (tid ProfileID) MarshalTo(data []byte) (n int, err error) {
if tid.IsEmpty() {
return 0, nil
}
if len(data) < profileIDSize {
return 0, errMarshalProfileID
}
return copy(data, tid[:]), nil
}
// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization.
func (tid *ProfileID) Unmarshal(data []byte) error {
if len(data) == 0 {
*tid = [profileIDSize]byte{}
return nil
}
if len(data) != profileIDSize {
return errUnmarshalProfileID
}
copy(tid[:], data)
return nil
}
// MarshalJSONStream converts ProfileID into a hex string.
func (tid ProfileID) MarshalJSONStream(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(tid[:]))
}
// UnmarshalJSONIter decodes ProfileID from hex string.
func (tid *ProfileID) UnmarshalJSONIter(iter *json.Iterator) {
*tid = [profileIDSize]byte{}
unmarshalJSON(tid[:], iter)
}
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/collector/logs/v1/logs_service.proto
package v1
import (
context "context"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ExportLogsServiceRequest struct {
// An array of ResourceLogs.
// For data coming from a single resource this array will typically contain one
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
// data from multiple origins typically batch the data before forwarding further and
// in that case this array will contain multiple elements.
ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
}
func (m *ExportLogsServiceRequest) Reset() { *m = ExportLogsServiceRequest{} }
func (m *ExportLogsServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportLogsServiceRequest) ProtoMessage() {}
func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8e3bf87aaa43acd4, []int{0}
}
func (m *ExportLogsServiceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportLogsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportLogsServiceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportLogsServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportLogsServiceRequest.Merge(m, src)
}
func (m *ExportLogsServiceRequest) XXX_Size() int {
return m.Size()
}
func (m *ExportLogsServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportLogsServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportLogsServiceRequest proto.InternalMessageInfo
func (m *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs {
if m != nil {
return m.ResourceLogs
}
return nil
}
type ExportLogsServiceResponse struct {
// The details of a partially successful export request.
//
// If the request is only partially accepted
// (i.e. when the server accepts only parts of the data and rejects the rest)
// the server MUST initialize the `partial_success` field and MUST
// set the `rejected_<signal>` with the number of items it rejected.
//
// Servers MAY also make use of the `partial_success` field to convey
// warnings/suggestions to senders even when the request was fully accepted.
// In such cases, the `rejected_<signal>` MUST have a value of `0` and
// the `error_message` MUST be non-empty.
//
// A `partial_success` message with an empty value (rejected_<signal> = 0 and
// `error_message` = "") is equivalent to it not being set/present. Senders
// SHOULD interpret it the same way as in the full success case.
PartialSuccess ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
}
func (m *ExportLogsServiceResponse) Reset() { *m = ExportLogsServiceResponse{} }
func (m *ExportLogsServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportLogsServiceResponse) ProtoMessage() {}
func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_8e3bf87aaa43acd4, []int{1}
}
func (m *ExportLogsServiceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportLogsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportLogsServiceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportLogsServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportLogsServiceResponse.Merge(m, src)
}
func (m *ExportLogsServiceResponse) XXX_Size() int {
return m.Size()
}
func (m *ExportLogsServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportLogsServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportLogsServiceResponse proto.InternalMessageInfo
func (m *ExportLogsServiceResponse) GetPartialSuccess() ExportLogsPartialSuccess {
if m != nil {
return m.PartialSuccess
}
return ExportLogsPartialSuccess{}
}
type ExportLogsPartialSuccess struct {
// The number of rejected log records.
//
// A `rejected_<signal>` field holding a `0` value indicates that the
// request was fully accepted.
RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"`
// A developer-facing human-readable message in English. It should be used
// either to explain why the server rejected parts of the data during a partial
// success or to convey warnings/suggestions during a full success. The message
// should offer guidance on how users can address such issues.
//
// error_message is an optional field. An error_message with an empty value
// is equivalent to it not being set.
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
func (m *ExportLogsPartialSuccess) Reset() { *m = ExportLogsPartialSuccess{} }
func (m *ExportLogsPartialSuccess) String() string { return proto.CompactTextString(m) }
func (*ExportLogsPartialSuccess) ProtoMessage() {}
func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) {
return fileDescriptor_8e3bf87aaa43acd4, []int{2}
}
func (m *ExportLogsPartialSuccess) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportLogsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportLogsPartialSuccess.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportLogsPartialSuccess) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportLogsPartialSuccess.Merge(m, src)
}
func (m *ExportLogsPartialSuccess) XXX_Size() int {
return m.Size()
}
func (m *ExportLogsPartialSuccess) XXX_DiscardUnknown() {
xxx_messageInfo_ExportLogsPartialSuccess.DiscardUnknown(m)
}
var xxx_messageInfo_ExportLogsPartialSuccess proto.InternalMessageInfo
func (m *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 {
if m != nil {
return m.RejectedLogRecords
}
return 0
}
func (m *ExportLogsPartialSuccess) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func init() {
proto.RegisterType((*ExportLogsServiceRequest)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest")
proto.RegisterType((*ExportLogsServiceResponse)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse")
proto.RegisterType((*ExportLogsPartialSuccess)(nil), "opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess")
}
func init() {
proto.RegisterFile("opentelemetry/proto/collector/logs/v1/logs_service.proto", fileDescriptor_8e3bf87aaa43acd4)
}
var fileDescriptor_8e3bf87aaa43acd4 = []byte{
// 430 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xc1, 0x6e, 0x13, 0x31,
0x10, 0x86, 0xd7, 0x2d, 0xaa, 0x84, 0xd3, 0x02, 0xb2, 0x7a, 0x08, 0x39, 0x2c, 0x55, 0x50, 0x51,
0xb8, 0x78, 0x49, 0xb8, 0x70, 0x03, 0x05, 0x71, 0x0b, 0x10, 0x6d, 0x11, 0x07, 0x2e, 0xab, 0xc5,
0x19, 0x59, 0x5b, 0x6d, 0x77, 0xdc, 0xb1, 0x13, 0xc1, 0x33, 0x20, 0x24, 0x5e, 0x80, 0x17, 0xe0,
0x49, 0x7a, 0xe0, 0xd0, 0x23, 0x27, 0x84, 0x92, 0x17, 0x41, 0x5e, 0x97, 0xb0, 0x0b, 0x39, 0x04,
0x4e, 0xbb, 0x1e, 0xcf, 0xff, 0xfd, 0xff, 0xd8, 0x32, 0x7f, 0x84, 0x06, 0x2a, 0x07, 0x25, 0x9c,
0x81, 0xa3, 0xf7, 0x89, 0x21, 0x74, 0x98, 0x28, 0x2c, 0x4b, 0x50, 0x0e, 0x29, 0x29, 0x51, 0xdb,
0x64, 0x31, 0xac, 0xbf, 0x99, 0x05, 0x5a, 0x14, 0x0a, 0x64, 0xdd, 0x24, 0x8e, 0x5b, 0xca, 0x50,
0x94, 0x6b, 0xa5, 0xf4, 0x0a, 0xb9, 0x18, 0xf6, 0x0e, 0x35, 0x6a, 0x0c, 0x58, 0xff, 0x17, 0xfa,
0x7a, 0xf7, 0x36, 0xd9, 0x36, 0xcd, 0x42, 0x5f, 0xff, 0x94, 0x77, 0x9f, 0xbd, 0x33, 0x48, 0x6e,
0x82, 0xda, 0x9e, 0x04, 0xff, 0x14, 0xce, 0xe7, 0x60, 0x9d, 0x78, 0xc1, 0x0f, 0x08, 0x2c, 0xce,
0x49, 0x41, 0xe6, 0x25, 0x5d, 0x76, 0xb4, 0x3b, 0xe8, 0x8c, 0xee, 0xcb, 0x4d, 0xc1, 0xae, 0xe2,
0xc8, 0xf4, 0x4a, 0xe1, 0x79, 0xe9, 0x3e, 0x35, 0x56, 0xfd, 0x0f, 0x8c, 0xdf, 0xde, 0x60, 0x66,
0x0d, 0x56, 0x16, 0x44, 0xc5, 0x6f, 0x9a, 0x9c, 0x5c, 0x91, 0x97, 0x99, 0x9d, 0x2b, 0x05, 0xd6,
0xfb, 0xb1, 0x41, 0x67, 0xf4, 0x58, 0x6e, 0x75, 0x10, 0xf2, 0x37, 0x7a, 0x1a, 0x38, 0x27, 0x01,
0x33, 0xbe, 0x76, 0xf1, 0xfd, 0x4e, 0x94, 0xde, 0x30, 0xad, 0x6a, 0xff, 0xbc, 0x39, 0x79, 0x5b,
0x21, 0x1e, 0xf0, 0x43, 0x82, 0x53, 0x50, 0x0e, 0x66, 0x7e, 0xf2, 0x8c, 0x40, 0x21, 0xcd, 0x42,
0xa0, 0xdd, 0x54, 0xfc, 0xda, 0x9b, 0xa0, 0x4e, 0xc3, 0x8e, 0xb8, 0xcb, 0x0f, 0x80, 0x08, 0x29,
0x3b, 0x03, 0x6b, 0x73, 0x0d, 0xdd, 0x9d, 0x23, 0x36, 0xb8, 0x9e, 0xee, 0xd7, 0xc5, 0xe7, 0xa1,
0x36, 0xfa, 0xcc, 0x78, 0xa7, 0x31, 0xba, 0xf8, 0xc8, 0xf8, 0x5e, 0xc8, 0x20, 0xfe, 0x7d, 0xc8,
0xf6, 0x65, 0xf5, 0x9e, 0xfc, 0x3f, 0x20, 0x5c, 0x40, 0x3f, 0x1a, 0x7f, 0x65, 0x17, 0xcb, 0x98,
0x5d, 0x2e, 0x63, 0xf6, 0x63, 0x19, 0xb3, 0x4f, 0xab, 0x38, 0xba, 0x5c, 0xc5, 0xd1, 0xb7, 0x55,
0x1c, 0xf1, 0x41, 0x81, 0xdb, 0x19, 0x8c, 0x6f, 0x35, 0xd8, 0x53, 0xdf, 0x33, 0x65, 0x6f, 0x26,
0xfa, 0x4f, 0x75, 0xd1, 0x7c, 0x04, 0x66, 0x96, 0xbb, 0x3c, 0x29, 0x2a, 0x07, 0x54, 0xe5, 0x65,
0x52, 0xaf, 0x6a, 0xbc, 0x86, 0xea, 0xef, 0xb7, 0xf2, 0x65, 0xe7, 0xf8, 0xa5, 0x81, 0xea, 0xd5,
0x9a, 0x55, 0xbb, 0xc8, 0xa7, 0xeb, 0x24, 0x3e, 0x80, 0x7c, 0x3d, 0x7c, 0xbb, 0x57, 0x33, 0x1e,
0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xaf, 0x6c, 0x7d, 0x83, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// LogsServiceClient is the client API for LogsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type LogsServiceClient interface {
Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error)
}
type logsServiceClient struct {
cc *grpc.ClientConn
}
func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient {
return &logsServiceClient{cc}
}
func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) {
out := new(ExportLogsServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// LogsServiceServer is the server API for LogsService service.
type LogsServiceServer interface {
Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error)
}
// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations.
type UnimplementedLogsServiceServer struct {
}
func (*UnimplementedLogsServiceServer) Export(ctx context.Context, req *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) {
s.RegisterService(&_LogsService_serviceDesc, srv)
}
func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportLogsServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LogsServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _LogsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
HandlerType: (*LogsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: _LogsService_Export_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
}
func (m *ExportLogsServiceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportLogsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportLogsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceLogs) > 0 {
for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogsService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExportLogsServiceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportLogsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportLogsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogsService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ExportLogsPartialSuccess) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportLogsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportLogsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ErrorMessage) > 0 {
i -= len(m.ErrorMessage)
copy(dAtA[i:], m.ErrorMessage)
i = encodeVarintLogsService(dAtA, i, uint64(len(m.ErrorMessage)))
i--
dAtA[i] = 0x12
}
if m.RejectedLogRecords != 0 {
i = encodeVarintLogsService(dAtA, i, uint64(m.RejectedLogRecords))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintLogsService(dAtA []byte, offset int, v uint64) int {
offset -= sovLogsService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ExportLogsServiceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceLogs) > 0 {
for _, e := range m.ResourceLogs {
l = e.Size()
n += 1 + l + sovLogsService(uint64(l))
}
}
return n
}
func (m *ExportLogsServiceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.PartialSuccess.Size()
n += 1 + l + sovLogsService(uint64(l))
return n
}
func (m *ExportLogsPartialSuccess) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.RejectedLogRecords != 0 {
n += 1 + sovLogsService(uint64(m.RejectedLogRecords))
}
l = len(m.ErrorMessage)
if l > 0 {
n += 1 + l + sovLogsService(uint64(l))
}
return n
}
func sovLogsService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozLogsService(x uint64) (n int) {
return sovLogsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ExportLogsServiceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportLogsServiceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportLogsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogsService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceLogs = append(m.ResourceLogs, &v1.ResourceLogs{})
if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportLogsServiceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportLogsServiceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportLogsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogsService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportLogsPartialSuccess) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportLogsPartialSuccess: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportLogsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
}
m.RejectedLogRecords = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RejectedLogRecords |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogsService
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ErrorMessage = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipLogsService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthLogsService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupLogsService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthLogsService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthLogsService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowLogsService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupLogsService = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
package v1
import (
context "context"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ExportMetricsServiceRequest struct {
// An array of ResourceMetrics.
// For data coming from a single resource this array will typically contain one
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
// data from multiple origins typically batch the data before forwarding further and
// in that case this array will contain multiple elements.
ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
}
func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportMetricsServiceRequest) ProtoMessage() {}
func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_75fb6015e6e64798, []int{0}
}
func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
}
func (m *ExportMetricsServiceRequest) XXX_Size() int {
return m.Size()
}
func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
func (m *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
if m != nil {
return m.ResourceMetrics
}
return nil
}
type ExportMetricsServiceResponse struct {
// The details of a partially successful export request.
//
// If the request is only partially accepted
// (i.e. when the server accepts only parts of the data and rejects the rest)
// the server MUST initialize the `partial_success` field and MUST
// set the `rejected_<signal>` with the number of items it rejected.
//
// Servers MAY also make use of the `partial_success` field to convey
// warnings/suggestions to senders even when the request was fully accepted.
// In such cases, the `rejected_<signal>` MUST have a value of `0` and
// the `error_message` MUST be non-empty.
//
// A `partial_success` message with an empty value (rejected_<signal> = 0 and
// `error_message` = "") is equivalent to it not being set/present. Senders
// SHOULD interpret it the same way as in the full success case.
PartialSuccess ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
}
func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportMetricsServiceResponse) ProtoMessage() {}
func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_75fb6015e6e64798, []int{1}
}
func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
}
func (m *ExportMetricsServiceResponse) XXX_Size() int {
return m.Size()
}
func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
func (m *ExportMetricsServiceResponse) GetPartialSuccess() ExportMetricsPartialSuccess {
if m != nil {
return m.PartialSuccess
}
return ExportMetricsPartialSuccess{}
}
type ExportMetricsPartialSuccess struct {
// The number of rejected data points.
//
// A `rejected_<signal>` field holding a `0` value indicates that the
// request was fully accepted.
RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
// A developer-facing human-readable message in English. It should be used
// either to explain why the server rejected parts of the data during a partial
// success or to convey warnings/suggestions during a full success. The message
// should offer guidance on how users can address such issues.
//
// error_message is an optional field. An error_message with an empty value
// is equivalent to it not being set.
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
func (m *ExportMetricsPartialSuccess) Reset() { *m = ExportMetricsPartialSuccess{} }
func (m *ExportMetricsPartialSuccess) String() string { return proto.CompactTextString(m) }
func (*ExportMetricsPartialSuccess) ProtoMessage() {}
func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
return fileDescriptor_75fb6015e6e64798, []int{2}
}
func (m *ExportMetricsPartialSuccess) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportMetricsPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportMetricsPartialSuccess.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportMetricsPartialSuccess) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportMetricsPartialSuccess.Merge(m, src)
}
func (m *ExportMetricsPartialSuccess) XXX_Size() int {
return m.Size()
}
func (m *ExportMetricsPartialSuccess) XXX_DiscardUnknown() {
xxx_messageInfo_ExportMetricsPartialSuccess.DiscardUnknown(m)
}
var xxx_messageInfo_ExportMetricsPartialSuccess proto.InternalMessageInfo
func (m *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
if m != nil {
return m.RejectedDataPoints
}
return 0
}
func (m *ExportMetricsPartialSuccess) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func init() {
proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest")
proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse")
proto.RegisterType((*ExportMetricsPartialSuccess)(nil), "opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess")
}
func init() {
proto.RegisterFile("opentelemetry/proto/collector/metrics/v1/metrics_service.proto", fileDescriptor_75fb6015e6e64798)
}
var fileDescriptor_75fb6015e6e64798 = []byte{
// 427 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xbf, 0x8e, 0xd3, 0x30,
0x18, 0x8f, 0xef, 0xd0, 0x49, 0xf8, 0xe0, 0x0e, 0x99, 0x1b, 0x4e, 0x05, 0x85, 0x53, 0x58, 0x22,
0x81, 0x1c, 0x5a, 0x76, 0x86, 0xc2, 0xb1, 0x9d, 0x1a, 0xa5, 0x88, 0xa1, 0x4b, 0x64, 0xdc, 0x4f,
0x51, 0x50, 0x1a, 0x1b, 0xdb, 0xad, 0xe8, 0x5b, 0x30, 0xb0, 0xf0, 0x0a, 0x88, 0x07, 0xe9, 0xd8,
0xb1, 0x13, 0x42, 0xed, 0x8b, 0xa0, 0xc4, 0x69, 0xc1, 0x25, 0x43, 0xc5, 0x6d, 0xce, 0xcf, 0xdf,
0xef, 0x4f, 0x7e, 0xd6, 0x87, 0x5f, 0x09, 0x09, 0xa5, 0x81, 0x02, 0x26, 0x60, 0xd4, 0x3c, 0x92,
0x4a, 0x18, 0x11, 0x71, 0x51, 0x14, 0xc0, 0x8d, 0x50, 0x51, 0x85, 0xe6, 0x5c, 0x47, 0xb3, 0xee,
0xf6, 0x98, 0x6a, 0x50, 0xb3, 0x9c, 0x03, 0xad, 0x47, 0x49, 0xe8, 0xf0, 0x2d, 0x48, 0x77, 0x7c,
0xda, 0x90, 0xe8, 0xac, 0xdb, 0xb9, 0xc8, 0x44, 0x26, 0xac, 0x7e, 0x75, 0xb2, 0xa3, 0x9d, 0xe7,
0x6d, 0xfe, 0xff, 0xba, 0xda, 0xe9, 0x60, 0x8e, 0x1f, 0x5d, 0x7f, 0x96, 0x42, 0x99, 0x1b, 0x0b,
0x0f, 0x6d, 0x96, 0x04, 0x3e, 0x4d, 0x41, 0x1b, 0x32, 0xc2, 0x0f, 0x14, 0x68, 0x31, 0x55, 0x1c,
0xd2, 0x86, 0x78, 0x89, 0xae, 0x8e, 0xc3, 0xd3, 0x5e, 0x44, 0xdb, 0x72, 0xfe, 0x49, 0x47, 0x93,
0x86, 0xd7, 0x08, 0x27, 0xe7, 0xca, 0x05, 0x82, 0xaf, 0x08, 0x3f, 0x6e, 0xf7, 0xd6, 0x52, 0x94,
0x1a, 0x88, 0xc1, 0xe7, 0x92, 0x29, 0x93, 0xb3, 0x22, 0xd5, 0x53, 0xce, 0x41, 0x57, 0xde, 0x28,
0x3c, 0xed, 0x5d, 0xd3, 0x43, 0x3b, 0xa2, 0x8e, 0x41, 0x6c, 0xd5, 0x86, 0x56, 0xac, 0x7f, 0x67,
0xf1, 0xf3, 0x89, 0x97, 0x9c, 0x49, 0x07, 0x0d, 0xcc, 0x5e, 0x23, 0x2e, 0x89, 0xbc, 0xc0, 0x17,
0x0a, 0x3e, 0x02, 0x37, 0x30, 0x4e, 0xc7, 0xcc, 0xb0, 0x54, 0x8a, 0xbc, 0x34, 0x36, 0xd9, 0x71,
0x42, 0xb6, 0x77, 0x6f, 0x98, 0x61, 0x71, 0x7d, 0x43, 0x9e, 0xe2, 0xfb, 0xa0, 0x94, 0x50, 0xe9,
0x04, 0xb4, 0x66, 0x19, 0x5c, 0x1e, 0x5d, 0xa1, 0xf0, 0x6e, 0x72, 0xaf, 0x06, 0x6f, 0x2c, 0xd6,
0xfb, 0x81, 0xf0, 0x99, 0x5b, 0x03, 0xf9, 0x86, 0xf0, 0x89, 0x4d, 0x42, 0xfe, 0xf7, 0x87, 0xdd,
0xd7, 0xec, 0xbc, 0xbd, 0xad, 0x8c, 0x7d, 0x98, 0xc0, 0xeb, 0xaf, 0xd0, 0x62, 0xed, 0xa3, 0xe5,
0xda, 0x47, 0xbf, 0xd6, 0x3e, 0xfa, 0xb2, 0xf1, 0xbd, 0xe5, 0xc6, 0xf7, 0x56, 0x1b, 0xdf, 0xc3,
0xcf, 0x72, 0x71, 0xb0, 0x4d, 0xff, 0xa1, 0xeb, 0x10, 0x57, 0x93, 0x31, 0x1a, 0x0d, 0xb2, 0x7d,
0x8d, 0xfc, 0xef, 0x1d, 0x92, 0x55, 0xf1, 0x51, 0x5e, 0x1a, 0x50, 0x25, 0x2b, 0xa2, 0xfa, 0xab,
0x36, 0xc9, 0xa0, 0x6c, 0x5d, 0xb5, 0xef, 0x47, 0xe1, 0x40, 0x42, 0xf9, 0x6e, 0x27, 0x57, 0x1b,
0xd1, 0xd7, 0xbb, 0x48, 0x4d, 0x0c, 0xfa, 0xbe, 0xfb, 0xe1, 0xa4, 0x56, 0x7a, 0xf9, 0x3b, 0x00,
0x00, 0xff, 0xff, 0x47, 0xf2, 0x5f, 0x42, 0xc8, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// MetricsServiceClient is the client API for MetricsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type MetricsServiceClient interface {
Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
}
type metricsServiceClient struct {
cc *grpc.ClientConn
}
func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
return &metricsServiceClient{cc}
}
func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
out := new(ExportMetricsServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// MetricsServiceServer is the server API for MetricsService service.
type MetricsServiceServer interface {
Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
}
// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
type UnimplementedMetricsServiceServer struct {
}
func (*UnimplementedMetricsServiceServer) Export(ctx context.Context, req *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
s.RegisterService(&_MetricsService_serviceDesc, srv)
}
func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportMetricsServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MetricsServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _MetricsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
HandlerType: (*MetricsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: _MetricsService_Export_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
}
func (m *ExportMetricsServiceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportMetricsServiceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportMetricsServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceMetrics) > 0 {
for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetricsService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExportMetricsServiceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportMetricsServiceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportMetricsServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetricsService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ExportMetricsPartialSuccess) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportMetricsPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportMetricsPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ErrorMessage) > 0 {
i -= len(m.ErrorMessage)
copy(dAtA[i:], m.ErrorMessage)
i = encodeVarintMetricsService(dAtA, i, uint64(len(m.ErrorMessage)))
i--
dAtA[i] = 0x12
}
if m.RejectedDataPoints != 0 {
i = encodeVarintMetricsService(dAtA, i, uint64(m.RejectedDataPoints))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintMetricsService(dAtA []byte, offset int, v uint64) int {
offset -= sovMetricsService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ExportMetricsServiceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceMetrics) > 0 {
for _, e := range m.ResourceMetrics {
l = e.Size()
n += 1 + l + sovMetricsService(uint64(l))
}
}
return n
}
func (m *ExportMetricsServiceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.PartialSuccess.Size()
n += 1 + l + sovMetricsService(uint64(l))
return n
}
func (m *ExportMetricsPartialSuccess) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.RejectedDataPoints != 0 {
n += 1 + sovMetricsService(uint64(m.RejectedDataPoints))
}
l = len(m.ErrorMessage)
if l > 0 {
n += 1 + l + sovMetricsService(uint64(l))
}
return n
}
func sovMetricsService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetricsService(x uint64) (n int) {
return sovMetricsService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ExportMetricsServiceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetricsService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetricsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceMetrics = append(m.ResourceMetrics, &v1.ResourceMetrics{})
if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetricsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetricsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportMetricsServiceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportMetricsServiceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportMetricsServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetricsService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetricsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetricsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetricsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportMetricsPartialSuccess) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportMetricsPartialSuccess: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportMetricsPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
}
m.RejectedDataPoints = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RejectedDataPoints |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetricsService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetricsService
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetricsService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ErrorMessage = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetricsService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetricsService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetricsService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetricsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetricsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetricsService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMetricsService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMetricsService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMetricsService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMetricsService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMetricsService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMetricsService = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto
package v1development
import (
context "context"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ExportProfilesServiceRequest struct {
// An array of ResourceProfiles.
// For data coming from a single resource this array will typically contain one
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
// data from multiple origins typically batch the data before forwarding further and
// in that case this array will contain multiple elements.
ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
// The reference table containing all data shared by profiles across the message being sent.
Dictionary v1development.ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
}
func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} }
func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportProfilesServiceRequest) ProtoMessage() {}
func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_ad3943ce836e7720, []int{0}
}
func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportProfilesServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportProfilesServiceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportProfilesServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportProfilesServiceRequest.Merge(m, src)
}
func (m *ExportProfilesServiceRequest) XXX_Size() int {
return m.Size()
}
func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportProfilesServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo
func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles {
if m != nil {
return m.ResourceProfiles
}
return nil
}
func (m *ExportProfilesServiceRequest) GetDictionary() v1development.ProfilesDictionary {
if m != nil {
return m.Dictionary
}
return v1development.ProfilesDictionary{}
}
type ExportProfilesServiceResponse struct {
// The details of a partially successful export request.
//
// If the request is only partially accepted
// (i.e. when the server accepts only parts of the data and rejects the rest)
// the server MUST initialize the `partial_success` field and MUST
// set the `rejected_<signal>` with the number of items it rejected.
//
// Servers MAY also make use of the `partial_success` field to convey
// warnings/suggestions to senders even when the request was fully accepted.
// In such cases, the `rejected_<signal>` MUST have a value of `0` and
// the `error_message` MUST be non-empty.
//
// A `partial_success` message with an empty value (rejected_<signal> = 0 and
// `error_message` = "") is equivalent to it not being set/present. Senders
// SHOULD interpret it the same way as in the full success case.
PartialSuccess ExportProfilesPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
}
func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesServiceResponse{} }
func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportProfilesServiceResponse) ProtoMessage() {}
func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_ad3943ce836e7720, []int{1}
}
func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportProfilesServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportProfilesServiceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportProfilesServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportProfilesServiceResponse.Merge(m, src)
}
func (m *ExportProfilesServiceResponse) XXX_Size() int {
return m.Size()
}
func (m *ExportProfilesServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportProfilesServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportProfilesServiceResponse proto.InternalMessageInfo
func (m *ExportProfilesServiceResponse) GetPartialSuccess() ExportProfilesPartialSuccess {
if m != nil {
return m.PartialSuccess
}
return ExportProfilesPartialSuccess{}
}
type ExportProfilesPartialSuccess struct {
// The number of rejected profiles.
//
// A `rejected_<signal>` field holding a `0` value indicates that the
// request was fully accepted.
RejectedProfiles int64 `protobuf:"varint,1,opt,name=rejected_profiles,json=rejectedProfiles,proto3" json:"rejected_profiles,omitempty"`
// A developer-facing human-readable message in English. It should be used
// either to explain why the server rejected parts of the data during a partial
// success or to convey warnings/suggestions during a full success. The message
// should offer guidance on how users can address such issues.
//
// error_message is an optional field. An error_message with an empty value
// is equivalent to it not being set.
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPartialSuccess{} }
func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) }
func (*ExportProfilesPartialSuccess) ProtoMessage() {}
func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) {
return fileDescriptor_ad3943ce836e7720, []int{2}
}
func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportProfilesPartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportProfilesPartialSuccess.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportProfilesPartialSuccess) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportProfilesPartialSuccess.Merge(m, src)
}
func (m *ExportProfilesPartialSuccess) XXX_Size() int {
return m.Size()
}
func (m *ExportProfilesPartialSuccess) XXX_DiscardUnknown() {
xxx_messageInfo_ExportProfilesPartialSuccess.DiscardUnknown(m)
}
var xxx_messageInfo_ExportProfilesPartialSuccess proto.InternalMessageInfo
func (m *ExportProfilesPartialSuccess) GetRejectedProfiles() int64 {
if m != nil {
return m.RejectedProfiles
}
return 0
}
func (m *ExportProfilesPartialSuccess) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func init() {
proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest")
proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse")
proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess")
}
func init() {
proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720)
}
var fileDescriptor_ad3943ce836e7720 = []byte{
// 467 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x8a, 0xd3, 0x40,
0x18, 0xc7, 0x33, 0xbb, 0xb2, 0xe0, 0xac, 0xba, 0x1a, 0xf6, 0xb0, 0x14, 0x8d, 0x4b, 0xbc, 0x14,
0x84, 0x09, 0x5b, 0x17, 0x44, 0x10, 0x0f, 0x75, 0x3d, 0x89, 0x18, 0x52, 0xf1, 0xa0, 0x87, 0x10,
0x27, 0x9f, 0x61, 0x24, 0x9d, 0x19, 0x67, 0xa6, 0xc5, 0x1e, 0x7d, 0x03, 0xdf, 0xc1, 0x9b, 0x57,
0x1f, 0xc2, 0x1e, 0x7b, 0xf4, 0x24, 0xd2, 0x3e, 0x80, 0x4f, 0x20, 0x48, 0x32, 0x4d, 0x6c, 0x42,
0xa5, 0x58, 0x7a, 0xcb, 0x7c, 0xc3, 0xff, 0xf7, 0xff, 0x7f, 0xdf, 0x17, 0x06, 0x3f, 0x15, 0x12,
0xb8, 0x81, 0x1c, 0x86, 0x60, 0xd4, 0x24, 0x90, 0x4a, 0x18, 0x11, 0x50, 0x91, 0xe7, 0x40, 0x8d,
0x50, 0xc5, 0xf9, 0x2d, 0xcb, 0x41, 0x07, 0xe3, 0xb3, 0x14, 0xc6, 0x90, 0x0b, 0x39, 0x04, 0x6e,
0xea, 0x72, 0xac, 0x41, 0x8d, 0x19, 0x05, 0x52, 0xea, 0xdc, 0xf3, 0x06, 0xcc, 0x16, 0x49, 0x0d,
0x23, 0x95, 0x8a, 0x34, 0x60, 0x9d, 0xe3, 0x4c, 0x64, 0xc2, 0x1a, 0x17, 0x5f, 0x56, 0xd6, 0x79,
0xb0, 0x2e, 0xd8, 0x86, 0x38, 0x56, 0xea, 0xff, 0x42, 0xf8, 0xe6, 0x93, 0x0f, 0x52, 0x28, 0x13,
0x2e, 0x2f, 0x06, 0x36, 0x66, 0x04, 0xef, 0x47, 0xa0, 0x8d, 0xcb, 0xf0, 0x0d, 0x05, 0x5a, 0x8c,
0x14, 0x85, 0xb8, 0xd2, 0x9e, 0xa0, 0xd3, 0xfd, 0xee, 0x61, 0xef, 0x21, 0x59, 0xd7, 0xc3, 0xfa,
0xe4, 0x24, 0x5a, 0x42, 0x2a, 0x9b, 0xe8, 0xba, 0x6a, 0x55, 0xdc, 0x14, 0xe3, 0x94, 0x51, 0xc3,
0x04, 0x4f, 0xd4, 0xe4, 0x64, 0xef, 0x14, 0x75, 0x0f, 0x7b, 0x8f, 0xfe, 0xc7, 0xa3, 0x22, 0x5d,
0xd4, 0x94, 0xfe, 0xa5, 0xe9, 0x8f, 0xdb, 0x4e, 0xb4, 0xc2, 0xf5, 0x3f, 0x23, 0x7c, 0xeb, 0x1f,
0x1d, 0x6b, 0x29, 0xb8, 0x06, 0xf7, 0x23, 0xc2, 0x47, 0x32, 0x51, 0x86, 0x25, 0x79, 0xac, 0x47,
0x94, 0x82, 0x2e, 0x3a, 0x2e, 0xd2, 0x44, 0x64, 0x9b, 0xad, 0x91, 0xa6, 0x5d, 0x68, 0xd1, 0x03,
0x4b, 0x5e, 0x26, 0xbc, 0x26, 0x1b, 0x55, 0x5f, 0xb6, 0xd7, 0xd2, 0x54, 0xb9, 0x77, 0x8b, 0xb5,
0xbc, 0x03, 0x6a, 0x20, 0x5d, 0x5d, 0x0b, 0xea, 0xee, 0x17, 0x83, 0xb5, 0x17, 0xf5, 0x60, 0xef,
0xe0, 0xab, 0xa0, 0x94, 0x50, 0xf1, 0x10, 0xb4, 0x4e, 0x32, 0x28, 0x67, 0x7b, 0x39, 0xba, 0x52,
0x16, 0x9f, 0xd9, 0x5a, 0xef, 0x1b, 0xc2, 0x47, 0xad, 0x89, 0xb8, 0x5f, 0x11, 0x3e, 0xb0, 0x31,
0xdc, 0x9d, 0xb4, 0xde, 0xfc, 0xb7, 0x3a, 0x83, 0x9d, 0x32, 0xed, 0xf6, 0x7c, 0xa7, 0xff, 0x1b,
0x4d, 0xe7, 0x1e, 0x9a, 0xcd, 0x3d, 0xf4, 0x73, 0xee, 0xa1, 0x4f, 0x0b, 0xcf, 0x99, 0x2d, 0x3c,
0xe7, 0xfb, 0xc2, 0x73, 0xf0, 0x7d, 0x26, 0xb6, 0xf2, 0xec, 0x1f, 0xb7, 0xec, 0xc2, 0x42, 0x16,
0xa2, 0x57, 0xaf, 0xb3, 0x36, 0x90, 0x35, 0xde, 0x84, 0x34, 0x31, 0x49, 0xc0, 0xb8, 0x01, 0xc5,
0x93, 0x3c, 0x28, 0x4f, 0xa5, 0x63, 0x06, 0x7c, 0xe3, 0xd3, 0xf1, 0x65, 0xef, 0xfc, 0xb9, 0x04,
0xfe, 0xa2, 0x46, 0x97, 0xa6, 0xe4, 0x71, 0x9d, 0xb5, 0xca, 0x44, 0x5e, 0x9e, 0x5d, 0xfc, 0x95,
0xbd, 0x39, 0x28, 0x1d, 0xee, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb9, 0xb5, 0x6e, 0xb0,
0x04, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ProfilesServiceClient is the client API for ProfilesService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ProfilesServiceClient interface {
Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error)
}
type profilesServiceClient struct {
cc *grpc.ClientConn
}
func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient {
return &profilesServiceClient{cc}
}
func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) {
out := new(ExportProfilesServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ProfilesServiceServer is the server API for ProfilesService service.
type ProfilesServiceServer interface {
Export(context.Context, *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error)
}
// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations.
type UnimplementedProfilesServiceServer struct {
}
func (*UnimplementedProfilesServiceServer) Export(ctx context.Context, req *ExportProfilesServiceRequest) (*ExportProfilesServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) {
s.RegisterService(&_ProfilesService_serviceDesc, srv)
}
func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportProfilesServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfilesServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ProfilesService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService",
HandlerType: (*ProfilesServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: _ProfilesService_Export_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto",
}
func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportProfilesServiceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportProfilesServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfilesService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.ResourceProfiles) > 0 {
for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfilesService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExportProfilesServiceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportProfilesServiceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportProfilesServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfilesService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ExportProfilesPartialSuccess) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportProfilesPartialSuccess) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportProfilesPartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ErrorMessage) > 0 {
i -= len(m.ErrorMessage)
copy(dAtA[i:], m.ErrorMessage)
i = encodeVarintProfilesService(dAtA, i, uint64(len(m.ErrorMessage)))
i--
dAtA[i] = 0x12
}
if m.RejectedProfiles != 0 {
i = encodeVarintProfilesService(dAtA, i, uint64(m.RejectedProfiles))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintProfilesService(dAtA []byte, offset int, v uint64) int {
offset -= sovProfilesService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ExportProfilesServiceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceProfiles) > 0 {
for _, e := range m.ResourceProfiles {
l = e.Size()
n += 1 + l + sovProfilesService(uint64(l))
}
}
l = m.Dictionary.Size()
n += 1 + l + sovProfilesService(uint64(l))
return n
}
func (m *ExportProfilesServiceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.PartialSuccess.Size()
n += 1 + l + sovProfilesService(uint64(l))
return n
}
func (m *ExportProfilesPartialSuccess) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.RejectedProfiles != 0 {
n += 1 + sovProfilesService(uint64(m.RejectedProfiles))
}
l = len(m.ErrorMessage)
if l > 0 {
n += 1 + l + sovProfilesService(uint64(l))
}
return n
}
func sovProfilesService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozProfilesService(x uint64) (n int) {
return sovProfilesService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportProfilesServiceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportProfilesServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfilesService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfilesService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{})
if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfilesService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfilesService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfilesService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfilesService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportProfilesServiceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportProfilesServiceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportProfilesServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfilesService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfilesService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfilesService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfilesService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportProfilesPartialSuccess) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportProfilesPartialSuccess: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportProfilesPartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
}
m.RejectedProfiles = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RejectedProfiles |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfilesService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProfilesService
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthProfilesService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ErrorMessage = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfilesService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfilesService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipProfilesService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfilesService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfilesService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfilesService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthProfilesService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupProfilesService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthProfilesService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthProfilesService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowProfilesService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupProfilesService = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
package v1
import (
context "context"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ExportTraceServiceRequest struct {
// An array of ResourceSpans.
// For data coming from a single resource this array will typically contain one
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
// data from multiple origins typically batch the data before forwarding further and
// in that case this array will contain multiple elements.
ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
}
func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceRequest) ProtoMessage() {}
func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_192a962890318cf4, []int{0}
}
func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
}
func (m *ExportTraceServiceRequest) XXX_Size() int {
return m.Size()
}
func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
func (m *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
if m != nil {
return m.ResourceSpans
}
return nil
}
type ExportTraceServiceResponse struct {
// The details of a partially successful export request.
//
// If the request is only partially accepted
// (i.e. when the server accepts only parts of the data and rejects the rest)
// the server MUST initialize the `partial_success` field and MUST
// set the `rejected_<signal>` with the number of items it rejected.
//
// Servers MAY also make use of the `partial_success` field to convey
// warnings/suggestions to senders even when the request was fully accepted.
// In such cases, the `rejected_<signal>` MUST have a value of `0` and
// the `error_message` MUST be non-empty.
//
// A `partial_success` message with an empty value (rejected_<signal> = 0 and
// `error_message` = "") is equivalent to it not being set/present. Senders
// SHOULD interpret it the same way as in the full success case.
PartialSuccess ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success"`
}
func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
func (*ExportTraceServiceResponse) ProtoMessage() {}
func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_192a962890318cf4, []int{1}
}
func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
}
func (m *ExportTraceServiceResponse) XXX_Size() int {
return m.Size()
}
func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
func (m *ExportTraceServiceResponse) GetPartialSuccess() ExportTracePartialSuccess {
if m != nil {
return m.PartialSuccess
}
return ExportTracePartialSuccess{}
}
type ExportTracePartialSuccess struct {
// The number of rejected spans.
//
// A `rejected_<signal>` field holding a `0` value indicates that the
// request was fully accepted.
RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
// A developer-facing human-readable message in English. It should be used
// either to explain why the server rejected parts of the data during a partial
// success or to convey warnings/suggestions during a full success. The message
// should offer guidance on how users can address such issues.
//
// error_message is an optional field. An error_message with an empty value
// is equivalent to it not being set.
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
}
func (m *ExportTracePartialSuccess) Reset() { *m = ExportTracePartialSuccess{} }
func (m *ExportTracePartialSuccess) String() string { return proto.CompactTextString(m) }
func (*ExportTracePartialSuccess) ProtoMessage() {}
func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
return fileDescriptor_192a962890318cf4, []int{2}
}
func (m *ExportTracePartialSuccess) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExportTracePartialSuccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExportTracePartialSuccess.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExportTracePartialSuccess) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExportTracePartialSuccess.Merge(m, src)
}
func (m *ExportTracePartialSuccess) XXX_Size() int {
return m.Size()
}
func (m *ExportTracePartialSuccess) XXX_DiscardUnknown() {
xxx_messageInfo_ExportTracePartialSuccess.DiscardUnknown(m)
}
var xxx_messageInfo_ExportTracePartialSuccess proto.InternalMessageInfo
func (m *ExportTracePartialSuccess) GetRejectedSpans() int64 {
if m != nil {
return m.RejectedSpans
}
return 0
}
func (m *ExportTracePartialSuccess) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func init() {
proto.RegisterType((*ExportTraceServiceRequest)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest")
proto.RegisterType((*ExportTraceServiceResponse)(nil), "opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse")
proto.RegisterType((*ExportTracePartialSuccess)(nil), "opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess")
}
func init() {
proto.RegisterFile("opentelemetry/proto/collector/trace/v1/trace_service.proto", fileDescriptor_192a962890318cf4)
}
var fileDescriptor_192a962890318cf4 = []byte{
// 413 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4f, 0xeb, 0xd3, 0x30,
0x18, 0x6e, 0x36, 0x19, 0x98, 0xfd, 0x11, 0x8b, 0x87, 0xd9, 0x43, 0x1d, 0x15, 0x47, 0x45, 0x48,
0xd9, 0xbc, 0x79, 0xb3, 0xe2, 0x71, 0x38, 0xba, 0xe1, 0xc1, 0xcb, 0x88, 0xdd, 0x4b, 0xa9, 0x74,
0x4d, 0x4c, 0xb2, 0xa1, 0x5f, 0x42, 0xf4, 0x2b, 0x78, 0xf4, 0x93, 0xec, 0xb8, 0xa3, 0x27, 0x91,
0xed, 0x8b, 0x48, 0x12, 0x2d, 0xad, 0xf4, 0x30, 0x7e, 0xbf, 0x5b, 0xf2, 0xf0, 0x3e, 0x7f, 0xde,
0x27, 0x04, 0xbf, 0x60, 0x1c, 0x4a, 0x05, 0x05, 0xec, 0x40, 0x89, 0xcf, 0x11, 0x17, 0x4c, 0xb1,
0x28, 0x65, 0x45, 0x01, 0xa9, 0x62, 0x22, 0x52, 0x82, 0xa6, 0x10, 0x1d, 0x66, 0xf6, 0xb0, 0x91,
0x20, 0x0e, 0x79, 0x0a, 0xc4, 0x8c, 0xb9, 0xd3, 0x06, 0xd7, 0x82, 0xa4, 0xe2, 0x12, 0x43, 0x21,
0x87, 0x99, 0xf7, 0x20, 0x63, 0x19, 0xb3, 0xca, 0xfa, 0x64, 0x07, 0xbd, 0xb0, 0xcd, 0xb9, 0xe9,
0x67, 0x27, 0x03, 0x86, 0x1f, 0xbe, 0xfe, 0xc4, 0x99, 0x50, 0x6b, 0x0d, 0xae, 0x6c, 0x86, 0x04,
0x3e, 0xee, 0x41, 0x2a, 0x37, 0xc1, 0x23, 0x01, 0x92, 0xed, 0x85, 0x8e, 0xc7, 0x69, 0x29, 0xc7,
0x68, 0xd2, 0x0d, 0xfb, 0xf3, 0x67, 0xa4, 0x2d, 0xdd, 0xbf, 0x4c, 0x24, 0xf9, 0xcb, 0x59, 0x69,
0x4a, 0x32, 0x14, 0xf5, 0x6b, 0xf0, 0x05, 0x61, 0xaf, 0xcd, 0x51, 0x72, 0x56, 0x4a, 0x70, 0x39,
0xbe, 0xc7, 0xa9, 0x50, 0x39, 0x2d, 0x36, 0x72, 0x9f, 0xa6, 0x20, 0xb5, 0x27, 0x0a, 0xfb, 0xf3,
0x97, 0xe4, 0xba, 0x46, 0x48, 0x4d, 0x7c, 0x69, 0x95, 0x56, 0x56, 0x28, 0xbe, 0x73, 0xfc, 0xf5,
0xc8, 0x49, 0x46, 0xbc, 0x81, 0x06, 0x59, 0xa3, 0x81, 0x26, 0xc5, 0x7d, 0xa2, 0x1b, 0xf8, 0x00,
0xa9, 0x82, 0x6d, 0xd5, 0x00, 0x0a, 0xbb, 0x7a, 0x29, 0x8b, 0x9a, 0xa5, 0xdc, 0xc7, 0x78, 0x08,
0x42, 0x30, 0xb1, 0xd9, 0x81, 0x94, 0x34, 0x83, 0x71, 0x67, 0x82, 0xc2, 0xbb, 0xc9, 0xc0, 0x80,
0x0b, 0x8b, 0xcd, 0xbf, 0x23, 0x3c, 0xa8, 0xef, 0xec, 0x7e, 0x43, 0xb8, 0x67, 0xad, 0xdd, 0x9b,
0x6c, 0xd7, 0x7c, 0x2c, 0x2f, 0xbe, 0x8d, 0x84, 0x6d, 0x3f, 0x70, 0xe2, 0x13, 0x3a, 0x9e, 0x7d,
0x74, 0x3a, 0xfb, 0xe8, 0xf7, 0xd9, 0x47, 0x5f, 0x2f, 0xbe, 0x73, 0xba, 0xf8, 0xce, 0xcf, 0x8b,
0xef, 0xe0, 0xa7, 0x39, 0xbb, 0xd2, 0x22, 0xbe, 0x5f, 0x57, 0x5f, 0xea, 0xa9, 0x25, 0x7a, 0xb7,
0xc8, 0xfe, 0xe7, 0xe7, 0xf5, 0xef, 0xc0, 0xb7, 0x54, 0xd1, 0x28, 0x2f, 0x15, 0x88, 0x92, 0x16,
0x91, 0xb9, 0x19, 0x83, 0x0c, 0xca, 0x96, 0x5f, 0xf3, 0xa3, 0x33, 0x7d, 0xc3, 0xa1, 0x5c, 0x57,
0x62, 0xc6, 0x86, 0xbc, 0xaa, 0xc2, 0x98, 0x08, 0xe4, 0xed, 0xec, 0x7d, 0xcf, 0xa8, 0x3c, 0xff,
0x13, 0x00, 0x00, 0xff, 0xff, 0x82, 0xce, 0x78, 0xc7, 0x8f, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TraceServiceClient interface {
Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
}
type traceServiceClient struct {
cc *grpc.ClientConn
}
func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
out := new(ExportTraceServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// TraceServiceServer is the server API for TraceService service.
type TraceServiceServer interface {
Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
}
// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
type UnimplementedTraceServiceServer struct {
}
func (*UnimplementedTraceServiceServer) Export(ctx context.Context, req *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&_TraceService_serviceDesc, srv)
}
func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportTraceServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TraceServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var _TraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: _TraceService_Export_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
}
func (m *ExportTraceServiceRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportTraceServiceRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportTraceServiceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceSpans) > 0 {
for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTraceService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExportTraceServiceResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportTraceServiceResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportTraceServiceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.PartialSuccess.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTraceService(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ExportTracePartialSuccess) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExportTracePartialSuccess) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExportTracePartialSuccess) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ErrorMessage) > 0 {
i -= len(m.ErrorMessage)
copy(dAtA[i:], m.ErrorMessage)
i = encodeVarintTraceService(dAtA, i, uint64(len(m.ErrorMessage)))
i--
dAtA[i] = 0x12
}
if m.RejectedSpans != 0 {
i = encodeVarintTraceService(dAtA, i, uint64(m.RejectedSpans))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintTraceService(dAtA []byte, offset int, v uint64) int {
offset -= sovTraceService(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ExportTraceServiceRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceSpans) > 0 {
for _, e := range m.ResourceSpans {
l = e.Size()
n += 1 + l + sovTraceService(uint64(l))
}
}
return n
}
func (m *ExportTraceServiceResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.PartialSuccess.Size()
n += 1 + l + sovTraceService(uint64(l))
return n
}
func (m *ExportTracePartialSuccess) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.RejectedSpans != 0 {
n += 1 + sovTraceService(uint64(m.RejectedSpans))
}
l = len(m.ErrorMessage)
if l > 0 {
n += 1 + l + sovTraceService(uint64(l))
}
return n
}
func sovTraceService(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTraceService(x uint64) (n int) {
return sovTraceService(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ExportTraceServiceRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportTraceServiceRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportTraceServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTraceService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTraceService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceSpans = append(m.ResourceSpans, &v1.ResourceSpans{})
if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTraceService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTraceService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportTraceServiceResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportTraceServiceResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportTraceServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTraceService
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTraceService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PartialSuccess.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTraceService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTraceService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExportTracePartialSuccess) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExportTracePartialSuccess: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExportTracePartialSuccess: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
}
m.RejectedSpans = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.RejectedSpans |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTraceService
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTraceService
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTraceService
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ErrorMessage = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTraceService(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTraceService
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTraceService(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTraceService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTraceService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTraceService
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTraceService
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTraceService
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTraceService
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTraceService = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTraceService = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTraceService = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/common/v1/common.proto
package v1
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
// primitive value such as a string or integer or it may contain an arbitrary nested
// object containing arrays, key-value lists and primitives.
type AnyValue struct {
// The value is one of the listed fields. It is valid for all values to be unspecified
// in which case this AnyValue is considered to be "empty".
//
// Types that are valid to be assigned to Value:
// *AnyValue_StringValue
// *AnyValue_BoolValue
// *AnyValue_IntValue
// *AnyValue_DoubleValue
// *AnyValue_ArrayValue
// *AnyValue_KvlistValue
// *AnyValue_BytesValue
Value isAnyValue_Value `protobuf_oneof:"value"`
}
func (m *AnyValue) Reset() { *m = AnyValue{} }
func (m *AnyValue) String() string { return proto.CompactTextString(m) }
func (*AnyValue) ProtoMessage() {}
func (*AnyValue) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{0}
}
func (m *AnyValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *AnyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_AnyValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *AnyValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_AnyValue.Merge(m, src)
}
func (m *AnyValue) XXX_Size() int {
return m.Size()
}
func (m *AnyValue) XXX_DiscardUnknown() {
xxx_messageInfo_AnyValue.DiscardUnknown(m)
}
var xxx_messageInfo_AnyValue proto.InternalMessageInfo
type isAnyValue_Value interface {
isAnyValue_Value()
MarshalTo([]byte) (int, error)
Size() int
}
type AnyValue_StringValue struct {
StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"`
}
type AnyValue_BoolValue struct {
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"`
}
type AnyValue_IntValue struct {
IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"`
}
type AnyValue_DoubleValue struct {
DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"`
}
type AnyValue_ArrayValue struct {
ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof" json:"array_value,omitempty"`
}
type AnyValue_KvlistValue struct {
KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof" json:"kvlist_value,omitempty"`
}
type AnyValue_BytesValue struct {
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof" json:"bytes_value,omitempty"`
}
func (*AnyValue_StringValue) isAnyValue_Value() {}
func (*AnyValue_BoolValue) isAnyValue_Value() {}
func (*AnyValue_IntValue) isAnyValue_Value() {}
func (*AnyValue_DoubleValue) isAnyValue_Value() {}
func (*AnyValue_ArrayValue) isAnyValue_Value() {}
func (*AnyValue_KvlistValue) isAnyValue_Value() {}
func (*AnyValue_BytesValue) isAnyValue_Value() {}
func (m *AnyValue) GetValue() isAnyValue_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *AnyValue) GetStringValue() string {
if x, ok := m.GetValue().(*AnyValue_StringValue); ok {
return x.StringValue
}
return ""
}
func (m *AnyValue) GetBoolValue() bool {
if x, ok := m.GetValue().(*AnyValue_BoolValue); ok {
return x.BoolValue
}
return false
}
func (m *AnyValue) GetIntValue() int64 {
if x, ok := m.GetValue().(*AnyValue_IntValue); ok {
return x.IntValue
}
return 0
}
func (m *AnyValue) GetDoubleValue() float64 {
if x, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
return x.DoubleValue
}
return 0
}
func (m *AnyValue) GetArrayValue() *ArrayValue {
if x, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
return x.ArrayValue
}
return nil
}
func (m *AnyValue) GetKvlistValue() *KeyValueList {
if x, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
return x.KvlistValue
}
return nil
}
func (m *AnyValue) GetBytesValue() []byte {
if x, ok := m.GetValue().(*AnyValue_BytesValue); ok {
return x.BytesValue
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*AnyValue) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*AnyValue_StringValue)(nil),
(*AnyValue_BoolValue)(nil),
(*AnyValue_IntValue)(nil),
(*AnyValue_DoubleValue)(nil),
(*AnyValue_ArrayValue)(nil),
(*AnyValue_KvlistValue)(nil),
(*AnyValue_BytesValue)(nil),
}
}
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
// since oneof in AnyValue does not allow repeated fields.
type ArrayValue struct {
// Array of values. The array may be empty (contain 0 elements).
Values []AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
}
func (m *ArrayValue) Reset() { *m = ArrayValue{} }
func (m *ArrayValue) String() string { return proto.CompactTextString(m) }
func (*ArrayValue) ProtoMessage() {}
func (*ArrayValue) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{1}
}
func (m *ArrayValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ArrayValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ArrayValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ArrayValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArrayValue.Merge(m, src)
}
func (m *ArrayValue) XXX_Size() int {
return m.Size()
}
func (m *ArrayValue) XXX_DiscardUnknown() {
xxx_messageInfo_ArrayValue.DiscardUnknown(m)
}
var xxx_messageInfo_ArrayValue proto.InternalMessageInfo
func (m *ArrayValue) GetValues() []AnyValue {
if m != nil {
return m.Values
}
return nil
}
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
// are semantically equivalent.
type KeyValueList struct {
// A collection of key/value pairs of key-value pairs. The list may be empty (may
// contain 0 elements).
// The keys MUST be unique (it is not allowed to have more than one
// value with the same key).
Values []KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values"`
}
func (m *KeyValueList) Reset() { *m = KeyValueList{} }
func (m *KeyValueList) String() string { return proto.CompactTextString(m) }
func (*KeyValueList) ProtoMessage() {}
func (*KeyValueList) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{2}
}
func (m *KeyValueList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KeyValueList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KeyValueList.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *KeyValueList) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyValueList.Merge(m, src)
}
func (m *KeyValueList) XXX_Size() int {
return m.Size()
}
func (m *KeyValueList) XXX_DiscardUnknown() {
xxx_messageInfo_KeyValueList.DiscardUnknown(m)
}
var xxx_messageInfo_KeyValueList proto.InternalMessageInfo
func (m *KeyValueList) GetValues() []KeyValue {
if m != nil {
return m.Values
}
return nil
}
// KeyValue is a key-value pair that is used to store Span attributes, Link
// attributes, etc.
type KeyValue struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
}
func (m *KeyValue) Reset() { *m = KeyValue{} }
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
func (*KeyValue) ProtoMessage() {}
func (*KeyValue) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{3}
}
func (m *KeyValue) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *KeyValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyValue.Merge(m, src)
}
func (m *KeyValue) XXX_Size() int {
return m.Size()
}
func (m *KeyValue) XXX_DiscardUnknown() {
xxx_messageInfo_KeyValue.DiscardUnknown(m)
}
var xxx_messageInfo_KeyValue proto.InternalMessageInfo
func (m *KeyValue) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *KeyValue) GetValue() AnyValue {
if m != nil {
return m.Value
}
return AnyValue{}
}
// InstrumentationScope is a message representing the instrumentation scope information
// such as the fully qualified name and version.
type InstrumentationScope struct {
// An empty instrumentation scope name means the name is unknown.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// Additional attributes that describe the scope. [Optional].
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Attributes []KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
}
func (m *InstrumentationScope) Reset() { *m = InstrumentationScope{} }
func (m *InstrumentationScope) String() string { return proto.CompactTextString(m) }
func (*InstrumentationScope) ProtoMessage() {}
func (*InstrumentationScope) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{4}
}
func (m *InstrumentationScope) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *InstrumentationScope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_InstrumentationScope.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *InstrumentationScope) XXX_Merge(src proto.Message) {
xxx_messageInfo_InstrumentationScope.Merge(m, src)
}
func (m *InstrumentationScope) XXX_Size() int {
return m.Size()
}
func (m *InstrumentationScope) XXX_DiscardUnknown() {
xxx_messageInfo_InstrumentationScope.DiscardUnknown(m)
}
var xxx_messageInfo_InstrumentationScope proto.InternalMessageInfo
func (m *InstrumentationScope) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *InstrumentationScope) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *InstrumentationScope) GetAttributes() []KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *InstrumentationScope) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
// A reference to an Entity.
// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs.
//
// Status: [Development]
type EntityRef struct {
// The Schema URL, if known. This is the identifier of the Schema that the entity data
// is recorded in. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
//
// This schema_url applies to the data in this message and to the Resource attributes
// referenced by id_keys and description_keys.
// TODO: discuss if we are happy with this somewhat complicated definition of what
// the schema_url applies to.
//
// This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs.
SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
// Defines the type of the entity. MUST not change during the lifetime of the entity.
// For example: "service" or "host". This field is required and MUST not be empty
// for valid entities.
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// Attribute Keys that identify the entity.
// MUST not change during the lifetime of the entity. The Id must contain at least one attribute.
// These keys MUST exist in the containing {message}.attributes.
IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"`
// Descriptive (non-identifying) attribute keys of the entity.
// MAY change over the lifetime of the entity. MAY be empty.
// These attribute keys are not part of entity's identity.
// These keys MUST exist in the containing {message}.attributes.
DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"`
}
func (m *EntityRef) Reset() { *m = EntityRef{} }
func (m *EntityRef) String() string { return proto.CompactTextString(m) }
func (*EntityRef) ProtoMessage() {}
func (*EntityRef) Descriptor() ([]byte, []int) {
return fileDescriptor_62ba46dcb97aa817, []int{5}
}
func (m *EntityRef) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *EntityRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_EntityRef.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *EntityRef) XXX_Merge(src proto.Message) {
xxx_messageInfo_EntityRef.Merge(m, src)
}
func (m *EntityRef) XXX_Size() int {
return m.Size()
}
func (m *EntityRef) XXX_DiscardUnknown() {
xxx_messageInfo_EntityRef.DiscardUnknown(m)
}
var xxx_messageInfo_EntityRef proto.InternalMessageInfo
func (m *EntityRef) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
func (m *EntityRef) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *EntityRef) GetIdKeys() []string {
if m != nil {
return m.IdKeys
}
return nil
}
func (m *EntityRef) GetDescriptionKeys() []string {
if m != nil {
return m.DescriptionKeys
}
return nil
}
func init() {
proto.RegisterType((*AnyValue)(nil), "opentelemetry.proto.common.v1.AnyValue")
proto.RegisterType((*ArrayValue)(nil), "opentelemetry.proto.common.v1.ArrayValue")
proto.RegisterType((*KeyValueList)(nil), "opentelemetry.proto.common.v1.KeyValueList")
proto.RegisterType((*KeyValue)(nil), "opentelemetry.proto.common.v1.KeyValue")
proto.RegisterType((*InstrumentationScope)(nil), "opentelemetry.proto.common.v1.InstrumentationScope")
proto.RegisterType((*EntityRef)(nil), "opentelemetry.proto.common.v1.EntityRef")
}
func init() {
proto.RegisterFile("opentelemetry/proto/common/v1/common.proto", fileDescriptor_62ba46dcb97aa817)
}
var fileDescriptor_62ba46dcb97aa817 = []byte{
// 608 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41,
0x14, 0xdf, 0xa1, 0xa5, 0xed, 0xbe, 0xd6, 0x48, 0x26, 0x44, 0x1b, 0x93, 0x96, 0xb5, 0x1e, 0x5c,
0x34, 0x69, 0x03, 0x5e, 0xbc, 0x52, 0x24, 0xa9, 0x01, 0x23, 0x59, 0x84, 0x83, 0x97, 0x66, 0xdb,
0x7d, 0xd6, 0x09, 0xdb, 0x99, 0xcd, 0xec, 0xb4, 0xc9, 0x5e, 0xfd, 0x04, 0x7e, 0x0e, 0x2f, 0x7e,
0x0d, 0x2e, 0x26, 0x1c, 0x3d, 0x19, 0x02, 0x5f, 0xc4, 0xcc, 0x9f, 0x16, 0xe4, 0x00, 0xc1, 0xdb,
0x7b, 0xbf, 0xf7, 0x7b, 0xbf, 0xf7, 0x7e, 0x33, 0x93, 0x81, 0x57, 0x22, 0x43, 0xae, 0x30, 0xc5,
0x29, 0x2a, 0x59, 0xf4, 0x32, 0x29, 0x94, 0xe8, 0x8d, 0xc5, 0x74, 0x2a, 0x78, 0x6f, 0xbe, 0xe5,
0xa2, 0xae, 0x81, 0x69, 0xeb, 0x1f, 0xae, 0x05, 0xbb, 0x8e, 0x31, 0xdf, 0x7a, 0xb6, 0x3e, 0x11,
0x13, 0x61, 0x05, 0x74, 0x64, 0xeb, 0x9d, 0x8b, 0x15, 0xa8, 0xed, 0xf0, 0xe2, 0x24, 0x4e, 0x67,
0x48, 0x5f, 0x40, 0x23, 0x57, 0x92, 0xf1, 0xc9, 0x70, 0xae, 0xf3, 0x26, 0x09, 0x48, 0xe8, 0x0f,
0xbc, 0xa8, 0x6e, 0x51, 0x4b, 0xda, 0x00, 0x18, 0x09, 0x91, 0x3a, 0xca, 0x4a, 0x40, 0xc2, 0xda,
0xc0, 0x8b, 0x7c, 0x8d, 0x59, 0x42, 0x0b, 0x7c, 0xc6, 0x95, 0xab, 0x97, 0x02, 0x12, 0x96, 0x06,
0x5e, 0x54, 0x63, 0x5c, 0x2d, 0x87, 0x24, 0x62, 0x36, 0x4a, 0xd1, 0x31, 0xca, 0x01, 0x09, 0x89,
0x1e, 0x62, 0x51, 0x4b, 0x3a, 0x80, 0x7a, 0x2c, 0x65, 0x5c, 0x38, 0xce, 0x6a, 0x40, 0xc2, 0xfa,
0xf6, 0x66, 0xf7, 0x4e, 0x87, 0xdd, 0x1d, 0xdd, 0x61, 0xfa, 0x07, 0x5e, 0x04, 0xf1, 0x32, 0xa3,
0x87, 0xd0, 0x38, 0x9d, 0xa7, 0x2c, 0x5f, 0x2c, 0x55, 0x31, 0x72, 0xaf, 0xef, 0x91, 0xdb, 0x47,
0xdb, 0x7e, 0xc0, 0x72, 0xa5, 0xf7, 0xb3, 0x12, 0x56, 0xf1, 0x39, 0xd4, 0x47, 0x85, 0xc2, 0xdc,
0x09, 0x56, 0x03, 0x12, 0x36, 0xf4, 0x50, 0x03, 0x1a, 0x4a, 0xbf, 0x0a, 0xab, 0xa6, 0xd8, 0x39,
0x02, 0xb8, 0xde, 0x8c, 0xee, 0x41, 0xc5, 0xc0, 0x79, 0x93, 0x04, 0xa5, 0xb0, 0xbe, 0xfd, 0xf2,
0x3e, 0x53, 0xee, 0x72, 0xfa, 0xe5, 0xb3, 0x3f, 0x1b, 0x5e, 0xe4, 0x9a, 0x3b, 0xc7, 0xd0, 0xb8,
0xb9, 0xdf, 0x83, 0x65, 0x17, 0xcd, 0xb7, 0x64, 0x63, 0xa8, 0x2d, 0x2a, 0x74, 0x0d, 0x4a, 0xa7,
0x58, 0xd8, 0x47, 0x10, 0xe9, 0x90, 0xee, 0x3a, 0x4b, 0xe6, 0xd6, 0x1f, 0xbc, 0xba, 0x3b, 0x8e,
0x5f, 0x04, 0xd6, 0xdf, 0xf3, 0x5c, 0xc9, 0xd9, 0x14, 0xb9, 0x8a, 0x15, 0x13, 0xfc, 0x68, 0x2c,
0x32, 0xa4, 0x14, 0xca, 0x3c, 0x9e, 0xba, 0x57, 0x17, 0x99, 0x98, 0x36, 0xa1, 0x3a, 0x47, 0x99,
0x33, 0xc1, 0xcd, 0x4c, 0x3f, 0x5a, 0xa4, 0xf4, 0x03, 0x40, 0xac, 0x94, 0x64, 0xa3, 0x99, 0xc2,
0xbc, 0x59, 0xfa, 0x1f, 0xd3, 0x37, 0x04, 0xe8, 0x5b, 0x68, 0x26, 0x52, 0x64, 0x19, 0x26, 0xc3,
0x6b, 0x74, 0x38, 0x16, 0x33, 0xae, 0xcc, 0x0b, 0x7d, 0x14, 0x3d, 0x71, 0xf5, 0x9d, 0x65, 0x79,
0x57, 0x57, 0x3b, 0xdf, 0x08, 0xf8, 0x7b, 0x5c, 0x31, 0x55, 0x44, 0xf8, 0x85, 0xb6, 0x00, 0xf2,
0xf1, 0x57, 0x9c, 0xc6, 0xc3, 0x99, 0x4c, 0x9d, 0x15, 0xdf, 0x22, 0xc7, 0x32, 0xd5, 0x1e, 0x55,
0x91, 0xa1, 0x33, 0x63, 0x62, 0xfa, 0x14, 0xaa, 0x2c, 0x19, 0x9e, 0x62, 0x61, 0x6d, 0xf8, 0x51,
0x85, 0x25, 0xfb, 0x58, 0xe4, 0x74, 0x13, 0xd6, 0x12, 0xcc, 0xc7, 0x92, 0x65, 0xfa, 0x90, 0x2c,
0xa3, 0x6c, 0x18, 0x8f, 0x6f, 0xe0, 0x9a, 0xda, 0xff, 0x49, 0xce, 0x2e, 0xdb, 0xe4, 0xfc, 0xb2,
0x4d, 0x2e, 0x2e, 0xdb, 0xe4, 0xfb, 0x55, 0xdb, 0x3b, 0xbf, 0x6a, 0x7b, 0xbf, 0xaf, 0xda, 0x1e,
0x04, 0x4c, 0xdc, 0x7d, 0x2c, 0xfd, 0xfa, 0xae, 0x09, 0x0f, 0x35, 0x7c, 0x48, 0x3e, 0xbf, 0x9b,
0xdc, 0x6e, 0x60, 0xfa, 0xcf, 0x49, 0x53, 0x1c, 0x2b, 0x21, 0x7b, 0x59, 0x12, 0xab, 0xb8, 0xc7,
0xb8, 0x42, 0xc9, 0xe3, 0xb4, 0x67, 0x32, 0xa3, 0x38, 0x41, 0x7e, 0xfd, 0x35, 0xfd, 0x58, 0x69,
0x7d, 0xcc, 0x90, 0x7f, 0x5a, 0x6a, 0x18, 0xf5, 0xae, 0x9d, 0xd4, 0x3d, 0xd9, 0x1a, 0x55, 0x4c,
0xcf, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x8b, 0xd4, 0x3b, 0xe2, 0x04, 0x00, 0x00,
}
func (m *AnyValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AnyValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Value != nil {
{
size := m.Value.Size()
i -= size
if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
return len(dAtA) - i, nil
}
func (m *AnyValue_StringValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= len(m.StringValue)
copy(dAtA[i:], m.StringValue)
i = encodeVarintCommon(dAtA, i, uint64(len(m.StringValue)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *AnyValue_BoolValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i--
if m.BoolValue {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x10
return len(dAtA) - i, nil
}
func (m *AnyValue_IntValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i = encodeVarintCommon(dAtA, i, uint64(m.IntValue))
i--
dAtA[i] = 0x18
return len(dAtA) - i, nil
}
func (m *AnyValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue))))
i--
dAtA[i] = 0x21
return len(dAtA) - i, nil
}
func (m *AnyValue_ArrayValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.ArrayValue != nil {
{
size, err := m.ArrayValue.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
return len(dAtA) - i, nil
}
func (m *AnyValue_KvlistValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_KvlistValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.KvlistValue != nil {
{
size, err := m.KvlistValue.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
return len(dAtA) - i, nil
}
func (m *AnyValue_BytesValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *AnyValue_BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.BytesValue != nil {
i -= len(m.BytesValue)
copy(dAtA[i:], m.BytesValue)
i = encodeVarintCommon(dAtA, i, uint64(len(m.BytesValue)))
i--
dAtA[i] = 0x3a
}
return len(dAtA) - i, nil
}
func (m *ArrayValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ArrayValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ArrayValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Values) > 0 {
for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *KeyValueList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KeyValueList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *KeyValueList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Values) > 0 {
for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *KeyValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintCommon(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *InstrumentationScope) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *InstrumentationScope) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *InstrumentationScope) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.DroppedAttributesCount != 0 {
i = encodeVarintCommon(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x20
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommon(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Version) > 0 {
i -= len(m.Version)
copy(dAtA[i:], m.Version)
i = encodeVarintCommon(dAtA, i, uint64(len(m.Version)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintCommon(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *EntityRef) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EntityRef) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *EntityRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DescriptionKeys) > 0 {
for iNdEx := len(m.DescriptionKeys) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.DescriptionKeys[iNdEx])
copy(dAtA[i:], m.DescriptionKeys[iNdEx])
i = encodeVarintCommon(dAtA, i, uint64(len(m.DescriptionKeys[iNdEx])))
i--
dAtA[i] = 0x22
}
}
if len(m.IdKeys) > 0 {
for iNdEx := len(m.IdKeys) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.IdKeys[iNdEx])
copy(dAtA[i:], m.IdKeys[iNdEx])
i = encodeVarintCommon(dAtA, i, uint64(len(m.IdKeys[iNdEx])))
i--
dAtA[i] = 0x1a
}
}
if len(m.Type) > 0 {
i -= len(m.Type)
copy(dAtA[i:], m.Type)
i = encodeVarintCommon(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0x12
}
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintCommon(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintCommon(dAtA []byte, offset int, v uint64) int {
offset -= sovCommon(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *AnyValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Value != nil {
n += m.Value.Size()
}
return n
}
func (m *AnyValue_StringValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.StringValue)
n += 1 + l + sovCommon(uint64(l))
return n
}
func (m *AnyValue_BoolValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 2
return n
}
func (m *AnyValue_IntValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 1 + sovCommon(uint64(m.IntValue))
return n
}
func (m *AnyValue_DoubleValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *AnyValue_ArrayValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ArrayValue != nil {
l = m.ArrayValue.Size()
n += 1 + l + sovCommon(uint64(l))
}
return n
}
func (m *AnyValue_KvlistValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.KvlistValue != nil {
l = m.KvlistValue.Size()
n += 1 + l + sovCommon(uint64(l))
}
return n
}
func (m *AnyValue_BytesValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.BytesValue != nil {
l = len(m.BytesValue)
n += 1 + l + sovCommon(uint64(l))
}
return n
}
func (m *ArrayValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Values) > 0 {
for _, e := range m.Values {
l = e.Size()
n += 1 + l + sovCommon(uint64(l))
}
}
return n
}
func (m *KeyValueList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Values) > 0 {
for _, e := range m.Values {
l = e.Size()
n += 1 + l + sovCommon(uint64(l))
}
}
return n
}
func (m *KeyValue) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovCommon(uint64(l))
}
l = m.Value.Size()
n += 1 + l + sovCommon(uint64(l))
return n
}
func (m *InstrumentationScope) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovCommon(uint64(l))
}
l = len(m.Version)
if l > 0 {
n += 1 + l + sovCommon(uint64(l))
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovCommon(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovCommon(uint64(m.DroppedAttributesCount))
}
return n
}
func (m *EntityRef) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovCommon(uint64(l))
}
l = len(m.Type)
if l > 0 {
n += 1 + l + sovCommon(uint64(l))
}
if len(m.IdKeys) > 0 {
for _, s := range m.IdKeys {
l = len(s)
n += 1 + l + sovCommon(uint64(l))
}
}
if len(m.DescriptionKeys) > 0 {
for _, s := range m.DescriptionKeys {
l = len(s)
n += 1 + l + sovCommon(uint64(l))
}
}
return n
}
func sovCommon(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozCommon(x uint64) (n int) {
return sovCommon(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *AnyValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AnyValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = &AnyValue_StringValue{string(dAtA[iNdEx:postIndex])}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Value = &AnyValue_BoolValue{b}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Value = &AnyValue_IntValue{v}
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = &AnyValue_DoubleValue{float64(math.Float64frombits(v))}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &ArrayValue{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Value = &AnyValue_ArrayValue{v}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &KeyValueList{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Value = &AnyValue_KvlistValue{v}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := make([]byte, postIndex-iNdEx)
copy(v, dAtA[iNdEx:postIndex])
m.Value = &AnyValue_BytesValue{v}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ArrayValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Values = append(m.Values, AnyValue{})
if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KeyValueList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Values = append(m.Values, KeyValue{})
if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KeyValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *InstrumentationScope) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: InstrumentationScope: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: InstrumentationScope: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Version = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *EntityRef) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EntityRef: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EntityRef: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IdKeys = append(m.IdKeys, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommon
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommon
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommon
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DescriptionKeys = append(m.DescriptionKeys, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommon(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommon
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipCommon(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommon
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommon
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommon
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthCommon
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupCommon
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthCommon
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthCommon = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCommon = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupCommon = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/logs/v1/logs.proto
package v1
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Possible values for LogRecord.SeverityNumber.
type SeverityNumber int32
const (
// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used.
SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0
SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1
SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2
SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3
SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4
SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5
SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6
SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7
SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8
SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9
SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10
SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11
SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12
SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13
SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14
SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15
SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16
SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17
SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18
SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19
SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20
SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21
SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22
SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23
SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24
)
var SeverityNumber_name = map[int32]string{
0: "SEVERITY_NUMBER_UNSPECIFIED",
1: "SEVERITY_NUMBER_TRACE",
2: "SEVERITY_NUMBER_TRACE2",
3: "SEVERITY_NUMBER_TRACE3",
4: "SEVERITY_NUMBER_TRACE4",
5: "SEVERITY_NUMBER_DEBUG",
6: "SEVERITY_NUMBER_DEBUG2",
7: "SEVERITY_NUMBER_DEBUG3",
8: "SEVERITY_NUMBER_DEBUG4",
9: "SEVERITY_NUMBER_INFO",
10: "SEVERITY_NUMBER_INFO2",
11: "SEVERITY_NUMBER_INFO3",
12: "SEVERITY_NUMBER_INFO4",
13: "SEVERITY_NUMBER_WARN",
14: "SEVERITY_NUMBER_WARN2",
15: "SEVERITY_NUMBER_WARN3",
16: "SEVERITY_NUMBER_WARN4",
17: "SEVERITY_NUMBER_ERROR",
18: "SEVERITY_NUMBER_ERROR2",
19: "SEVERITY_NUMBER_ERROR3",
20: "SEVERITY_NUMBER_ERROR4",
21: "SEVERITY_NUMBER_FATAL",
22: "SEVERITY_NUMBER_FATAL2",
23: "SEVERITY_NUMBER_FATAL3",
24: "SEVERITY_NUMBER_FATAL4",
}
var SeverityNumber_value = map[string]int32{
"SEVERITY_NUMBER_UNSPECIFIED": 0,
"SEVERITY_NUMBER_TRACE": 1,
"SEVERITY_NUMBER_TRACE2": 2,
"SEVERITY_NUMBER_TRACE3": 3,
"SEVERITY_NUMBER_TRACE4": 4,
"SEVERITY_NUMBER_DEBUG": 5,
"SEVERITY_NUMBER_DEBUG2": 6,
"SEVERITY_NUMBER_DEBUG3": 7,
"SEVERITY_NUMBER_DEBUG4": 8,
"SEVERITY_NUMBER_INFO": 9,
"SEVERITY_NUMBER_INFO2": 10,
"SEVERITY_NUMBER_INFO3": 11,
"SEVERITY_NUMBER_INFO4": 12,
"SEVERITY_NUMBER_WARN": 13,
"SEVERITY_NUMBER_WARN2": 14,
"SEVERITY_NUMBER_WARN3": 15,
"SEVERITY_NUMBER_WARN4": 16,
"SEVERITY_NUMBER_ERROR": 17,
"SEVERITY_NUMBER_ERROR2": 18,
"SEVERITY_NUMBER_ERROR3": 19,
"SEVERITY_NUMBER_ERROR4": 20,
"SEVERITY_NUMBER_FATAL": 21,
"SEVERITY_NUMBER_FATAL2": 22,
"SEVERITY_NUMBER_FATAL3": 23,
"SEVERITY_NUMBER_FATAL4": 24,
}
func (x SeverityNumber) String() string {
return proto.EnumName(SeverityNumber_name, int32(x))
}
func (SeverityNumber) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{0}
}
// LogRecordFlags represents constants used to interpret the
// LogRecord.flags field, which is protobuf 'fixed32' type and is to
// be used as bit-fields. Each non-zero value defined in this enum is
// a bit-mask. To extract the bit-field, for example, use an
// expression like:
//
// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
type LogRecordFlags int32
const (
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0
// Bits 0-7 are used for trace flags.
LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255
)
var LogRecordFlags_name = map[int32]string{
0: "LOG_RECORD_FLAGS_DO_NOT_USE",
255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK",
}
var LogRecordFlags_value = map[string]int32{
"LOG_RECORD_FLAGS_DO_NOT_USE": 0,
"LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255,
}
func (x LogRecordFlags) String() string {
return proto.EnumName(LogRecordFlags_name, int32(x))
}
func (LogRecordFlags) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{1}
}
// LogsData represents the logs data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP logs data but do not
// implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type LogsData struct {
// An array of ResourceLogs.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"`
}
func (m *LogsData) Reset() { *m = LogsData{} }
func (m *LogsData) String() string { return proto.CompactTextString(m) }
func (*LogsData) ProtoMessage() {}
func (*LogsData) Descriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{0}
}
func (m *LogsData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LogsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LogsData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *LogsData) XXX_Merge(src proto.Message) {
xxx_messageInfo_LogsData.Merge(m, src)
}
func (m *LogsData) XXX_Size() int {
return m.Size()
}
func (m *LogsData) XXX_DiscardUnknown() {
xxx_messageInfo_LogsData.DiscardUnknown(m)
}
var xxx_messageInfo_LogsData proto.InternalMessageInfo
func (m *LogsData) GetResourceLogs() []*ResourceLogs {
if m != nil {
return m.ResourceLogs
}
return nil
}
// A collection of ScopeLogs from a Resource.
type ResourceLogs struct {
DeprecatedScopeLogs []*ScopeLogs `protobuf:"bytes,1000,rep,name=deprecated_scope_logs,json=deprecatedScopeLogs,proto3" json:"deprecated_scope_logs,omitempty"`
// The resource for the logs in this message.
// If this field is not set then resource info is unknown.
Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
// A list of ScopeLogs that originate from a resource.
ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_logs" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ResourceLogs) Reset() { *m = ResourceLogs{} }
func (m *ResourceLogs) String() string { return proto.CompactTextString(m) }
func (*ResourceLogs) ProtoMessage() {}
func (*ResourceLogs) Descriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{1}
}
func (m *ResourceLogs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResourceLogs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ResourceLogs) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceLogs.Merge(m, src)
}
func (m *ResourceLogs) XXX_Size() int {
return m.Size()
}
func (m *ResourceLogs) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceLogs.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceLogs proto.InternalMessageInfo
func (m *ResourceLogs) GetDeprecatedScopeLogs() []*ScopeLogs {
if m != nil {
return m.DeprecatedScopeLogs
}
return nil
}
func (m *ResourceLogs) GetResource() v1.Resource {
if m != nil {
return m.Resource
}
return v1.Resource{}
}
func (m *ResourceLogs) GetScopeLogs() []*ScopeLogs {
if m != nil {
return m.ScopeLogs
}
return nil
}
func (m *ResourceLogs) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A collection of Logs produced by a Scope.
type ScopeLogs struct {
// The instrumentation scope information for the logs in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
// A list of log records.
LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the log data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all logs in the "logs" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ScopeLogs) Reset() { *m = ScopeLogs{} }
func (m *ScopeLogs) String() string { return proto.CompactTextString(m) }
func (*ScopeLogs) ProtoMessage() {}
func (*ScopeLogs) Descriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{2}
}
func (m *ScopeLogs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScopeLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScopeLogs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ScopeLogs) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScopeLogs.Merge(m, src)
}
func (m *ScopeLogs) XXX_Size() int {
return m.Size()
}
func (m *ScopeLogs) XXX_DiscardUnknown() {
xxx_messageInfo_ScopeLogs.DiscardUnknown(m)
}
var xxx_messageInfo_ScopeLogs proto.InternalMessageInfo
func (m *ScopeLogs) GetScope() v11.InstrumentationScope {
if m != nil {
return m.Scope
}
return v11.InstrumentationScope{}
}
func (m *ScopeLogs) GetLogRecords() []*LogRecord {
if m != nil {
return m.LogRecords
}
return nil
}
func (m *ScopeLogs) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A log record according to OpenTelemetry Log Data Model:
// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md
type LogRecord struct {
// time_unix_nano is the time when the event occurred.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
// Value of 0 indicates unknown or missing timestamp.
TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// Time when the event was observed by the collection system.
// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK)
// this timestamp is typically set at the generation time and is equal to Timestamp.
// For events originating externally and collected by OpenTelemetry (e.g. using
// Collector) this is the time when OpenTelemetry's code observed the event measured
// by the clock of the OpenTelemetry code. This field MUST be set once the event is
// observed by OpenTelemetry.
//
// For converting OpenTelemetry log data to formats that support only one timestamp or
// when receiving OpenTelemetry log data by recipients that support only one timestamp
// internally the following logic is recommended:
// - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
// Value of 0 indicates unknown or missing timestamp.
ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"`
// Numerical value of the severity, normalized to values described in Log Data Model.
// [Optional].
SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"`
// The severity text (also known as log level). The original string representation as
// it is known at the source. [Optional].
SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"`
// A value containing the body of the log record. Can be for example a human-readable
// string message (including multi-line) describing the event in a free form or it can
// be a structured data composed of arrays and maps of other values. [Optional].
Body v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body"`
// Additional attributes that describe the specific event occurrence. [Optional].
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Attributes []v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes"`
DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Flags, a bit field. 8 least significant bits are the trace flags as
// defined in W3C Trace Context specification. 24 most significant bits are reserved
// and must be set to 0. Readers must not assume that 24 most significant bits
// will be zero and must correctly mask the bits when reading 8-bit trace flag (use
// flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional].
Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"`
// A unique identifier for a trace. All logs from the same trace share
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is optional.
//
// The receivers SHOULD assume that the log record is not associated with a
// trace if any of the following is true:
// - the field is not present,
// - the field contains an invalid value.
TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
// A unique identifier for a span within a trace, assigned when the span
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is optional. If the sender specifies a valid span_id then it SHOULD also
// specify a valid trace_id.
//
// The receivers SHOULD assume that the log record is not associated with a
// span if any of the following is true:
// - the field is not present,
// - the field contains an invalid value.
SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
// A unique identifier of event category/type.
// All events with the same event_name are expected to conform to the same
// schema for both their attributes and their body.
//
// Recommended to be fully qualified and short (no longer than 256 characters).
//
// Presence of event_name on the log record identifies this record
// as an event.
//
// [Optional].
EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"`
}
func (m *LogRecord) Reset() { *m = LogRecord{} }
func (m *LogRecord) String() string { return proto.CompactTextString(m) }
func (*LogRecord) ProtoMessage() {}
func (*LogRecord) Descriptor() ([]byte, []int) {
return fileDescriptor_d1c030a3ec7e961e, []int{3}
}
func (m *LogRecord) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *LogRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_LogRecord.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *LogRecord) XXX_Merge(src proto.Message) {
xxx_messageInfo_LogRecord.Merge(m, src)
}
func (m *LogRecord) XXX_Size() int {
return m.Size()
}
func (m *LogRecord) XXX_DiscardUnknown() {
xxx_messageInfo_LogRecord.DiscardUnknown(m)
}
var xxx_messageInfo_LogRecord proto.InternalMessageInfo
func (m *LogRecord) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *LogRecord) GetObservedTimeUnixNano() uint64 {
if m != nil {
return m.ObservedTimeUnixNano
}
return 0
}
func (m *LogRecord) GetSeverityNumber() SeverityNumber {
if m != nil {
return m.SeverityNumber
}
return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED
}
func (m *LogRecord) GetSeverityText() string {
if m != nil {
return m.SeverityText
}
return ""
}
func (m *LogRecord) GetBody() v11.AnyValue {
if m != nil {
return m.Body
}
return v11.AnyValue{}
}
func (m *LogRecord) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *LogRecord) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
func (m *LogRecord) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
func (m *LogRecord) GetEventName() string {
if m != nil {
return m.EventName
}
return ""
}
func init() {
proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value)
proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value)
proto.RegisterType((*LogsData)(nil), "opentelemetry.proto.logs.v1.LogsData")
proto.RegisterType((*ResourceLogs)(nil), "opentelemetry.proto.logs.v1.ResourceLogs")
proto.RegisterType((*ScopeLogs)(nil), "opentelemetry.proto.logs.v1.ScopeLogs")
proto.RegisterType((*LogRecord)(nil), "opentelemetry.proto.logs.v1.LogRecord")
}
func init() {
proto.RegisterFile("opentelemetry/proto/logs/v1/logs.proto", fileDescriptor_d1c030a3ec7e961e)
}
var fileDescriptor_d1c030a3ec7e961e = []byte{
// 971 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46,
0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3,
0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90,
0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f,
0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b,
0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f,
0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92,
0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd,
0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a,
0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86,
0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2,
0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f,
0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5,
0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26,
0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b,
0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95,
0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40,
0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d,
0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9,
0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05,
0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9,
0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe,
0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41,
0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca,
0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b,
0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7,
0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b,
0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd,
0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c,
0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d,
0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83,
0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd,
0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8,
0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31,
0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48,
0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18,
0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd,
0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48,
0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f,
0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99,
0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5,
0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57,
0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c,
0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7,
0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55,
0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e,
0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07,
0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0,
0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17,
0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19,
0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82,
0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3,
0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa,
0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e,
0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f,
0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9,
0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9,
0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f,
0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b,
0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00,
0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00,
}
func (m *LogsData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LogsData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LogsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceLogs) > 0 {
for iNdEx := len(m.ResourceLogs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ResourceLogs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResourceLogs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ResourceLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DeprecatedScopeLogs) > 0 {
for iNdEx := len(m.DeprecatedScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DeprecatedScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xc2
}
}
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.ScopeLogs) > 0 {
for iNdEx := len(m.ScopeLogs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ScopeLogs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ScopeLogs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScopeLogs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ScopeLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintLogs(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.LogRecords) > 0 {
for iNdEx := len(m.LogRecords) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.LogRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *LogRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *LogRecord) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.EventName) > 0 {
i -= len(m.EventName)
copy(dAtA[i:], m.EventName)
i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName)))
i--
dAtA[i] = 0x62
}
if m.ObservedTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano))
i--
dAtA[i] = 0x59
}
{
size := m.SpanId.Size()
i -= size
if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x52
{
size := m.TraceId.Size()
i -= size
if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
if m.Flags != 0 {
i -= 4
encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
i--
dAtA[i] = 0x45
}
if m.DroppedAttributesCount != 0 {
i = encodeVarintLogs(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x38
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
}
{
size, err := m.Body.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintLogs(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
if len(m.SeverityText) > 0 {
i -= len(m.SeverityText)
copy(dAtA[i:], m.SeverityText)
i = encodeVarintLogs(dAtA, i, uint64(len(m.SeverityText)))
i--
dAtA[i] = 0x1a
}
if m.SeverityNumber != 0 {
i = encodeVarintLogs(dAtA, i, uint64(m.SeverityNumber))
i--
dAtA[i] = 0x10
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func encodeVarintLogs(dAtA []byte, offset int, v uint64) int {
offset -= sovLogs(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *LogsData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceLogs) > 0 {
for _, e := range m.ResourceLogs {
l = e.Size()
n += 1 + l + sovLogs(uint64(l))
}
}
return n
}
func (m *ResourceLogs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Resource.Size()
n += 1 + l + sovLogs(uint64(l))
if len(m.ScopeLogs) > 0 {
for _, e := range m.ScopeLogs {
l = e.Size()
n += 1 + l + sovLogs(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovLogs(uint64(l))
}
if len(m.DeprecatedScopeLogs) > 0 {
for _, e := range m.DeprecatedScopeLogs {
l = e.Size()
n += 2 + l + sovLogs(uint64(l))
}
}
return n
}
func (m *ScopeLogs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Scope.Size()
n += 1 + l + sovLogs(uint64(l))
if len(m.LogRecords) > 0 {
for _, e := range m.LogRecords {
l = e.Size()
n += 1 + l + sovLogs(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovLogs(uint64(l))
}
return n
}
func (m *LogRecord) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.TimeUnixNano != 0 {
n += 9
}
if m.SeverityNumber != 0 {
n += 1 + sovLogs(uint64(m.SeverityNumber))
}
l = len(m.SeverityText)
if l > 0 {
n += 1 + l + sovLogs(uint64(l))
}
l = m.Body.Size()
n += 1 + l + sovLogs(uint64(l))
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovLogs(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovLogs(uint64(m.DroppedAttributesCount))
}
if m.Flags != 0 {
n += 5
}
l = m.TraceId.Size()
n += 1 + l + sovLogs(uint64(l))
l = m.SpanId.Size()
n += 1 + l + sovLogs(uint64(l))
if m.ObservedTimeUnixNano != 0 {
n += 9
}
l = len(m.EventName)
if l > 0 {
n += 1 + l + sovLogs(uint64(l))
}
return n
}
func sovLogs(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozLogs(x uint64) (n int) {
return sovLogs(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *LogsData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LogsData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LogsData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceLogs = append(m.ResourceLogs, &ResourceLogs{})
if err := m.ResourceLogs[len(m.ResourceLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogs(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResourceLogs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResourceLogs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResourceLogs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ScopeLogs = append(m.ScopeLogs, &ScopeLogs{})
if err := m.ScopeLogs[len(m.ScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 1000:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeprecatedScopeLogs = append(m.DeprecatedScopeLogs, &ScopeLogs{})
if err := m.DeprecatedScopeLogs[len(m.DeprecatedScopeLogs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogs(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScopeLogs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScopeLogs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScopeLogs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LogRecords = append(m.LogRecords, &LogRecord{})
if err := m.LogRecords[len(m.LogRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogs(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *LogRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LogRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LogRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
}
m.SeverityNumber = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SeverityNumber |= SeverityNumber(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SeverityText = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 5 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
iNdEx += 4
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 11:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
}
m.ObservedTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogs
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthLogs
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthLogs
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EventName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipLogs(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthLogs
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipLogs(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowLogs
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthLogs
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupLogs
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthLogs
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthLogs = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowLogs = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupLogs = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/metrics/v1/metrics.proto
package v1
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// AggregationTemporality defines how a metric aggregator reports aggregated
// values. It describes how those values relate to the time interval over
// which they are aggregated.
type AggregationTemporality int32
const (
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
// DELTA is an AggregationTemporality for a metric aggregator which reports
// changes since last report time. Successive metrics contain aggregation of
// values from continuous and non-overlapping intervals.
//
// The values for a DELTA metric are based only on the time interval
// associated with one measurement cycle. There is no dependency on
// previous measurements like is the case for CUMULATIVE metrics.
//
// For example, consider a system measuring the number of requests that
// it receives and reports the sum of these requests every second as a
// DELTA metric:
//
// 1. The system starts receiving at time=t_0.
// 2. A request is received, the system measures 1 request.
// 3. A request is received, the system measures 1 request.
// 4. A request is received, the system measures 1 request.
// 5. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+1 with a value of 3.
// 6. A request is received, the system measures 1 request.
// 7. A request is received, the system measures 1 request.
// 8. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0+1 to
// t_0+2 with a value of 2.
AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
// reports changes since a fixed start time. This means that current values
// of a CUMULATIVE metric depend on all previous measurements since the
// start time. Because of this, the sender is required to retain this state
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
// values MUST be reset and a new fixed start time following the last
// reported measurement time sent MUST be used.
//
// For example, consider a system measuring the number of requests that
// it receives and reports the sum of these requests every second as a
// CUMULATIVE metric:
//
// 1. The system starts receiving at time=t_0.
// 2. A request is received, the system measures 1 request.
// 3. A request is received, the system measures 1 request.
// 4. A request is received, the system measures 1 request.
// 5. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+1 with a value of 3.
// 6. A request is received, the system measures 1 request.
// 7. A request is received, the system measures 1 request.
// 8. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_0 to
// t_0+2 with a value of 5.
// 9. The system experiences a fault and loses state.
// 10. The system recovers and resumes receiving at time=t_1.
// 11. A request is received, the system measures 1 request.
// 12. The 1 second collection cycle ends. A metric is exported for the
// number of requests received over the interval of time t_1 to
// t_0+1 with a value of 1.
//
// Note: Even though, when reporting changes since last report time, using
// CUMULATIVE is valid, it is not recommended. This may cause problems for
// systems that do not use start_time to determine when the aggregation
// value was reset (e.g. Prometheus).
AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
)
var AggregationTemporality_name = map[int32]string{
0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
1: "AGGREGATION_TEMPORALITY_DELTA",
2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
}
var AggregationTemporality_value = map[string]int32{
"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
"AGGREGATION_TEMPORALITY_DELTA": 1,
"AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
}
func (x AggregationTemporality) String() string {
return proto.EnumName(AggregationTemporality_name, int32(x))
}
func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{0}
}
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
// bit-field representing 32 distinct boolean flags. Each flag defined in this
// enum is a bit-mask. To test the presence of a single flag in the flags of
// a data point, for example, use an expression like:
//
// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
type DataPointFlags int32
const (
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0
// This DataPoint is valid but has no recorded value. This value
// SHOULD be used to reflect explicitly missing data in a series, as
// for an equivalent to the Prometheus "staleness marker".
DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1
)
var DataPointFlags_name = map[int32]string{
0: "DATA_POINT_FLAGS_DO_NOT_USE",
1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
}
var DataPointFlags_value = map[string]int32{
"DATA_POINT_FLAGS_DO_NOT_USE": 0,
"DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1,
}
func (x DataPointFlags) String() string {
return proto.EnumName(DataPointFlags_name, int32(x))
}
func (DataPointFlags) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{1}
}
// MetricsData represents the metrics data that can be stored in a persistent
// storage, OR can be embedded by other protocols that transfer OTLP metrics
// data but do not implement the OTLP protocol.
//
// MetricsData
// └─── ResourceMetrics
//
// ├── Resource
// ├── SchemaURL
// └── ScopeMetrics
// ├── Scope
// ├── SchemaURL
// └── Metric
// ├── Name
// ├── Description
// ├── Unit
// └── data
// ├── Gauge
// ├── Sum
// ├── Histogram
// ├── ExponentialHistogram
// └── Summary
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type MetricsData struct {
// An array of ResourceMetrics.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
}
func (m *MetricsData) Reset() { *m = MetricsData{} }
func (m *MetricsData) String() string { return proto.CompactTextString(m) }
func (*MetricsData) ProtoMessage() {}
func (*MetricsData) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{0}
}
func (m *MetricsData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MetricsData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MetricsData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MetricsData) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricsData.Merge(m, src)
}
func (m *MetricsData) XXX_Size() int {
return m.Size()
}
func (m *MetricsData) XXX_DiscardUnknown() {
xxx_messageInfo_MetricsData.DiscardUnknown(m)
}
var xxx_messageInfo_MetricsData proto.InternalMessageInfo
func (m *MetricsData) GetResourceMetrics() []*ResourceMetrics {
if m != nil {
return m.ResourceMetrics
}
return nil
}
// A collection of ScopeMetrics from a Resource.
type ResourceMetrics struct {
DeprecatedScopeMetrics []*ScopeMetrics `protobuf:"bytes,1000,rep,name=deprecated_scope_metrics,json=deprecatedScopeMetrics,proto3" json:"deprecated_scope_metrics,omitempty"`
// The resource for the metrics in this message.
// If this field is not set then no resource info is known.
Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
// A list of metrics that originate from a resource.
ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_metrics" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ResourceMetrics) Reset() { *m = ResourceMetrics{} }
func (m *ResourceMetrics) String() string { return proto.CompactTextString(m) }
func (*ResourceMetrics) ProtoMessage() {}
func (*ResourceMetrics) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{1}
}
func (m *ResourceMetrics) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResourceMetrics.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ResourceMetrics) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceMetrics.Merge(m, src)
}
func (m *ResourceMetrics) XXX_Size() int {
return m.Size()
}
func (m *ResourceMetrics) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceMetrics.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceMetrics proto.InternalMessageInfo
func (m *ResourceMetrics) GetDeprecatedScopeMetrics() []*ScopeMetrics {
if m != nil {
return m.DeprecatedScopeMetrics
}
return nil
}
func (m *ResourceMetrics) GetResource() v1.Resource {
if m != nil {
return m.Resource
}
return v1.Resource{}
}
func (m *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics {
if m != nil {
return m.ScopeMetrics
}
return nil
}
func (m *ResourceMetrics) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A collection of Metrics produced by an Scope.
type ScopeMetrics struct {
// The instrumentation scope information for the metrics in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
// A list of metrics that originate from an instrumentation library.
Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the metric data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all metrics in the "metrics" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ScopeMetrics) Reset() { *m = ScopeMetrics{} }
func (m *ScopeMetrics) String() string { return proto.CompactTextString(m) }
func (*ScopeMetrics) ProtoMessage() {}
func (*ScopeMetrics) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{2}
}
func (m *ScopeMetrics) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScopeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScopeMetrics.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ScopeMetrics) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScopeMetrics.Merge(m, src)
}
func (m *ScopeMetrics) XXX_Size() int {
return m.Size()
}
func (m *ScopeMetrics) XXX_DiscardUnknown() {
xxx_messageInfo_ScopeMetrics.DiscardUnknown(m)
}
var xxx_messageInfo_ScopeMetrics proto.InternalMessageInfo
func (m *ScopeMetrics) GetScope() v11.InstrumentationScope {
if m != nil {
return m.Scope
}
return v11.InstrumentationScope{}
}
func (m *ScopeMetrics) GetMetrics() []*Metric {
if m != nil {
return m.Metrics
}
return nil
}
func (m *ScopeMetrics) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// Defines a Metric which has one or more timeseries. The following is a
// brief summary of the Metric data model. For more details, see:
//
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
//
// The data model and relation between entities is shown in the
// diagram below. Here, "DataPoint" is the term used to refer to any
// one of the specific data point value types, and "points" is the term used
// to refer to any one of the lists of points contained in the Metric.
//
// - Metric is composed of a metadata and data.
//
// - Metadata part contains a name, description, unit.
//
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
//
// - DataPoint contains timestamps, attributes, and one of the possible value type
// fields.
//
// Metric
// +------------+
// |name |
// |description |
// |unit | +------------------------------------+
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
// +------------+ +------------------------------------+
//
// Data [One of Gauge, Sum, Histogram, Summary, ...]
// +-----------+
// |... | // Metadata about the Data.
// |points |--+
// +-----------+ |
// | +---------------------------+
// | |DataPoint 1 |
// v |+------+------+ +------+ |
// +-----+ ||label |label |...|label | |
// | 1 |-->||value1|value2|...|valueN| |
// +-----+ |+------+------+ +------+ |
// | . | |+-----+ |
// | . | ||value| |
// | . | |+-----+ |
// | . | +---------------------------+
// | . | .
// | . | .
// | . | .
// | . | +---------------------------+
// | . | |DataPoint M |
// +-----+ |+------+------+ +------+ |
// | M |-->||label |label |...|label | |
// +-----+ ||value1|value2|...|valueN| |
// |+------+------+ +------+ |
// |+-----+ |
// ||value| |
// |+-----+ |
// +---------------------------+
//
// Each distinct type of DataPoint represents the output of a specific
// aggregation function, the result of applying the DataPoint's
// associated function of to one or more measurements.
//
// All DataPoint types have three common fields:
// - Attributes includes key-value pairs associated with the data point
// - TimeUnixNano is required, set to the end time of the aggregation
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
// having an AggregationTemporality field, as discussed below.
//
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// # TimeUnixNano
//
// This field is required, having consistent interpretation across
// DataPoint types. TimeUnixNano is the moment corresponding to when
// the data point's aggregate value was captured.
//
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
// by consumers.
//
// # StartTimeUnixNano
//
// StartTimeUnixNano in general allows detecting when a sequence of
// observations is unbroken. This field indicates to consumers the
// start time for points with cumulative and delta
// AggregationTemporality, and it should be included whenever possible
// to support correct rate calculation. Although it may be omitted
// when the start time is truly unknown, setting StartTimeUnixNano is
// strongly encouraged.
type Metric struct {
// name of the metric.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// description of the metric, which can be used in documentation.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// unit in which the metric value is reported. Follows the format
// described by https://unitsofmeasure.org/ucum.html.
Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
// Data determines the aggregation type (if any) of the metric, what is the
// reported value type for the data points, as well as the relatationship to
// the time interval over which they are reported.
//
// Types that are valid to be assigned to Data:
// *Metric_Gauge
// *Metric_Sum
// *Metric_Histogram
// *Metric_ExponentialHistogram
// *Metric_Summary
Data isMetric_Data `protobuf_oneof:"data"`
// Additional metadata attributes that describe the metric. [Optional].
// Attributes are non-identifying.
// Consumers SHOULD NOT need to be aware of these attributes.
// These attributes MAY be used to encode information allowing
// for lossless roundtrip translation to / from another data model.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
Metadata []v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{3}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return m.Size()
}
func (m *Metric) XXX_DiscardUnknown() {
xxx_messageInfo_Metric.DiscardUnknown(m)
}
var xxx_messageInfo_Metric proto.InternalMessageInfo
type isMetric_Data interface {
isMetric_Data()
MarshalTo([]byte) (int, error)
Size() int
}
type Metric_Gauge struct {
Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof" json:"gauge,omitempty"`
}
type Metric_Sum struct {
Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
}
type Metric_Histogram struct {
Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof" json:"histogram,omitempty"`
}
type Metric_ExponentialHistogram struct {
ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof" json:"exponential_histogram,omitempty"`
}
type Metric_Summary struct {
Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof" json:"summary,omitempty"`
}
func (*Metric_Gauge) isMetric_Data() {}
func (*Metric_Sum) isMetric_Data() {}
func (*Metric_Histogram) isMetric_Data() {}
func (*Metric_ExponentialHistogram) isMetric_Data() {}
func (*Metric_Summary) isMetric_Data() {}
func (m *Metric) GetData() isMetric_Data {
if m != nil {
return m.Data
}
return nil
}
func (m *Metric) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Metric) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Metric) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func (m *Metric) GetGauge() *Gauge {
if x, ok := m.GetData().(*Metric_Gauge); ok {
return x.Gauge
}
return nil
}
func (m *Metric) GetSum() *Sum {
if x, ok := m.GetData().(*Metric_Sum); ok {
return x.Sum
}
return nil
}
func (m *Metric) GetHistogram() *Histogram {
if x, ok := m.GetData().(*Metric_Histogram); ok {
return x.Histogram
}
return nil
}
func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
if x, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
return x.ExponentialHistogram
}
return nil
}
func (m *Metric) GetSummary() *Summary {
if x, ok := m.GetData().(*Metric_Summary); ok {
return x.Summary
}
return nil
}
func (m *Metric) GetMetadata() []v11.KeyValue {
if m != nil {
return m.Metadata
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Metric) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Metric_Gauge)(nil),
(*Metric_Sum)(nil),
(*Metric_Histogram)(nil),
(*Metric_ExponentialHistogram)(nil),
(*Metric_Summary)(nil),
}
}
// Gauge represents the type of a scalar metric that always exports the
// "current value" for every data point. It should be used for an "unknown"
// aggregation.
//
// A Gauge does not support different aggregation temporalities. Given the
// aggregation is unknown, points cannot be combined using the same
// aggregation, regardless of aggregation temporalities. Therefore,
// AggregationTemporality is not included. Consequently, this also means
// "StartTimeUnixNano" is ignored for all data points.
type Gauge struct {
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}
func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{4}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return m.Size()
}
func (m *Gauge) XXX_DiscardUnknown() {
xxx_messageInfo_Gauge.DiscardUnknown(m)
}
var xxx_messageInfo_Gauge proto.InternalMessageInfo
func (m *Gauge) GetDataPoints() []*NumberDataPoint {
if m != nil {
return m.DataPoints
}
return nil
}
// Sum represents the type of a scalar metric that is calculated as a sum of all
// reported measurements over a time interval.
type Sum struct {
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
// If "true" means that the sum is monotonic.
IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
}
func (m *Sum) Reset() { *m = Sum{} }
func (m *Sum) String() string { return proto.CompactTextString(m) }
func (*Sum) ProtoMessage() {}
func (*Sum) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{5}
}
func (m *Sum) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Sum.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Sum) XXX_Merge(src proto.Message) {
xxx_messageInfo_Sum.Merge(m, src)
}
func (m *Sum) XXX_Size() int {
return m.Size()
}
func (m *Sum) XXX_DiscardUnknown() {
xxx_messageInfo_Sum.DiscardUnknown(m)
}
var xxx_messageInfo_Sum proto.InternalMessageInfo
func (m *Sum) GetDataPoints() []*NumberDataPoint {
if m != nil {
return m.DataPoints
}
return nil
}
func (m *Sum) GetAggregationTemporality() AggregationTemporality {
if m != nil {
return m.AggregationTemporality
}
return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
func (m *Sum) GetIsMonotonic() bool {
if m != nil {
return m.IsMonotonic
}
return false
}
// Histogram represents the type of a metric that is calculated by aggregating
// as a Histogram of all reported measurements over a time interval.
type Histogram struct {
DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return m.Size()
}
func (m *Histogram) XXX_DiscardUnknown() {
xxx_messageInfo_Histogram.DiscardUnknown(m)
}
var xxx_messageInfo_Histogram proto.InternalMessageInfo
func (m *Histogram) GetDataPoints() []*HistogramDataPoint {
if m != nil {
return m.DataPoints
}
return nil
}
func (m *Histogram) GetAggregationTemporality() AggregationTemporality {
if m != nil {
return m.AggregationTemporality
}
return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
type ExponentialHistogram struct {
DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
}
func (m *ExponentialHistogram) Reset() { *m = ExponentialHistogram{} }
func (m *ExponentialHistogram) String() string { return proto.CompactTextString(m) }
func (*ExponentialHistogram) ProtoMessage() {}
func (*ExponentialHistogram) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{7}
}
func (m *ExponentialHistogram) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExponentialHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExponentialHistogram.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExponentialHistogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExponentialHistogram.Merge(m, src)
}
func (m *ExponentialHistogram) XXX_Size() int {
return m.Size()
}
func (m *ExponentialHistogram) XXX_DiscardUnknown() {
xxx_messageInfo_ExponentialHistogram.DiscardUnknown(m)
}
var xxx_messageInfo_ExponentialHistogram proto.InternalMessageInfo
func (m *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint {
if m != nil {
return m.DataPoints
}
return nil
}
func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality {
if m != nil {
return m.AggregationTemporality
}
return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
// Summary metric data are used to convey quantile summaries,
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
// Summary metrics do not have an aggregation temporality field. This is
// because the count and sum fields of a SummaryDataPoint are assumed to be
// cumulative values.
type Summary struct {
DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{8}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return m.Size()
}
func (m *Summary) XXX_DiscardUnknown() {
xxx_messageInfo_Summary.DiscardUnknown(m)
}
var xxx_messageInfo_Summary proto.InternalMessageInfo
func (m *Summary) GetDataPoints() []*SummaryDataPoint {
if m != nil {
return m.DataPoints
}
return nil
}
// NumberDataPoint is a single data point in a timeseries that describes the
// time-varying scalar value of a metric.
type NumberDataPoint struct {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// The value itself. A point is considered invalid when one of the recognized
// value fields is not present inside this oneof.
//
// Types that are valid to be assigned to Value:
// *NumberDataPoint_AsDouble
// *NumberDataPoint_AsInt
Value isNumberDataPoint_Value `protobuf_oneof:"value"`
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
Exemplars []Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"`
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (m *NumberDataPoint) Reset() { *m = NumberDataPoint{} }
func (m *NumberDataPoint) String() string { return proto.CompactTextString(m) }
func (*NumberDataPoint) ProtoMessage() {}
func (*NumberDataPoint) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{9}
}
func (m *NumberDataPoint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *NumberDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_NumberDataPoint.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *NumberDataPoint) XXX_Merge(src proto.Message) {
xxx_messageInfo_NumberDataPoint.Merge(m, src)
}
func (m *NumberDataPoint) XXX_Size() int {
return m.Size()
}
func (m *NumberDataPoint) XXX_DiscardUnknown() {
xxx_messageInfo_NumberDataPoint.DiscardUnknown(m)
}
var xxx_messageInfo_NumberDataPoint proto.InternalMessageInfo
type isNumberDataPoint_Value interface {
isNumberDataPoint_Value()
MarshalTo([]byte) (int, error)
Size() int
}
type NumberDataPoint_AsDouble struct {
AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
}
type NumberDataPoint_AsInt struct {
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
}
func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *NumberDataPoint) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *NumberDataPoint) GetStartTimeUnixNano() uint64 {
if m != nil {
return m.StartTimeUnixNano
}
return 0
}
func (m *NumberDataPoint) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *NumberDataPoint) GetAsDouble() float64 {
if x, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
return x.AsDouble
}
return 0
}
func (m *NumberDataPoint) GetAsInt() int64 {
if x, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
return x.AsInt
}
return 0
}
func (m *NumberDataPoint) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *NumberDataPoint) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*NumberDataPoint) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*NumberDataPoint_AsDouble)(nil),
(*NumberDataPoint_AsInt)(nil),
}
}
// HistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Histogram. A Histogram contains summary statistics
// for a population of values, it may optionally contain the distribution of
// those values across a set of buckets.
//
// If the histogram contains the distribution of values, then both
// "explicit_bounds" and "bucket counts" fields must be defined.
// If the histogram does not contain the distribution of values, then both
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
// "sum" are known.
type HistogramDataPoint struct {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// count is the number of values in the population. Must be non-negative. This
// value must be equal to the sum of the "count" fields in buckets if a
// histogram is provided.
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
//
// Types that are valid to be assigned to Sum_:
// *HistogramDataPoint_Sum
Sum_ isHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
// bucket_counts is an optional field contains the count values of histogram
// for each bucket.
//
// The sum of the bucket_counts must equal the value in the count field.
//
// The number of elements in bucket_counts array must be by one greater than
// the number of elements in explicit_bounds array. The exception to this rule
// is when the length of bucket_counts is 0, then the length of explicit_bounds
// must also be 0.
BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
// explicit_bounds specifies buckets with explicitly defined bounds for values.
//
// The boundaries for bucket at index i are:
//
// (-infinity, explicit_bounds[i]] for i == 0
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
//
// The values in the explicit_bounds array must be strictly increasing.
//
// Histogram buckets are inclusive of their upper boundary, except the last
// bucket where the boundary is at infinity. This format is intentionally
// compatible with the OpenMetrics histogram definition.
//
// If bucket_counts length is 0 then explicit_bounds length must also be 0,
// otherwise the data point is invalid.
ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
Exemplars []Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"`
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
// min is the minimum value over (start_time, end_time].
//
// Types that are valid to be assigned to Min_:
// *HistogramDataPoint_Min
Min_ isHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
// max is the maximum value over (start_time, end_time].
//
// Types that are valid to be assigned to Max_:
// *HistogramDataPoint_Max
Max_ isHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
}
func (m *HistogramDataPoint) Reset() { *m = HistogramDataPoint{} }
func (m *HistogramDataPoint) String() string { return proto.CompactTextString(m) }
func (*HistogramDataPoint) ProtoMessage() {}
func (*HistogramDataPoint) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{10}
}
func (m *HistogramDataPoint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *HistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_HistogramDataPoint.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *HistogramDataPoint) XXX_Merge(src proto.Message) {
xxx_messageInfo_HistogramDataPoint.Merge(m, src)
}
func (m *HistogramDataPoint) XXX_Size() int {
return m.Size()
}
func (m *HistogramDataPoint) XXX_DiscardUnknown() {
xxx_messageInfo_HistogramDataPoint.DiscardUnknown(m)
}
var xxx_messageInfo_HistogramDataPoint proto.InternalMessageInfo
type isHistogramDataPoint_Sum_ interface {
isHistogramDataPoint_Sum_()
MarshalTo([]byte) (int, error)
Size() int
}
type isHistogramDataPoint_Min_ interface {
isHistogramDataPoint_Min_()
MarshalTo([]byte) (int, error)
Size() int
}
type isHistogramDataPoint_Max_ interface {
isHistogramDataPoint_Max_()
MarshalTo([]byte) (int, error)
Size() int
}
type HistogramDataPoint_Sum struct {
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
}
type HistogramDataPoint_Min struct {
Min float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
}
type HistogramDataPoint_Max struct {
Max float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
}
func (*HistogramDataPoint_Sum) isHistogramDataPoint_Sum_() {}
func (*HistogramDataPoint_Min) isHistogramDataPoint_Min_() {}
func (*HistogramDataPoint_Max) isHistogramDataPoint_Max_() {}
func (m *HistogramDataPoint) GetSum_() isHistogramDataPoint_Sum_ {
if m != nil {
return m.Sum_
}
return nil
}
func (m *HistogramDataPoint) GetMin_() isHistogramDataPoint_Min_ {
if m != nil {
return m.Min_
}
return nil
}
func (m *HistogramDataPoint) GetMax_() isHistogramDataPoint_Max_ {
if m != nil {
return m.Max_
}
return nil
}
func (m *HistogramDataPoint) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *HistogramDataPoint) GetStartTimeUnixNano() uint64 {
if m != nil {
return m.StartTimeUnixNano
}
return 0
}
func (m *HistogramDataPoint) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *HistogramDataPoint) GetCount() uint64 {
if m != nil {
return m.Count
}
return 0
}
func (m *HistogramDataPoint) GetSum() float64 {
if x, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
return x.Sum
}
return 0
}
func (m *HistogramDataPoint) GetBucketCounts() []uint64 {
if m != nil {
return m.BucketCounts
}
return nil
}
func (m *HistogramDataPoint) GetExplicitBounds() []float64 {
if m != nil {
return m.ExplicitBounds
}
return nil
}
func (m *HistogramDataPoint) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *HistogramDataPoint) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
func (m *HistogramDataPoint) GetMin() float64 {
if x, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
return x.Min
}
return 0
}
func (m *HistogramDataPoint) GetMax() float64 {
if x, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
return x.Max
}
return 0
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*HistogramDataPoint) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*HistogramDataPoint_Sum)(nil),
(*HistogramDataPoint_Min)(nil),
(*HistogramDataPoint_Max)(nil),
}
}
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
type ExponentialHistogramDataPoint struct {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// count is the number of values in the population. Must be
// non-negative. This value must be equal to the sum of the "bucket_counts"
// values in the positive and negative Buckets plus the "zero_count" field.
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
//
// Types that are valid to be assigned to Sum_:
// *ExponentialHistogramDataPoint_Sum
Sum_ isExponentialHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"`
// scale describes the resolution of the histogram. Boundaries are
// located at powers of the base, where:
//
// base = (2^(2^-scale))
//
// The histogram bucket identified by `index`, a signed integer,
// contains values that are greater than (base^index) and
// less than or equal to (base^(index+1)).
//
// The positive and negative ranges of the histogram are expressed
// separately. Negative values are mapped by their absolute value
// into the negative range using the same scale as the positive range.
//
// scale is not restricted by the protocol, as the permissible
// values depend on the range of the data.
Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
// zero_count is the count of values that are either exactly zero or
// within the region considered zero by the instrumentation at the
// tolerated degree of precision. This bucket stores values that
// cannot be expressed using the standard exponential formula as
// well as values that have been rounded to zero.
//
// Implementations MAY consider the zero bucket to have probability
// mass equal to (zero_count / count).
ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
// positive carries the positive range of exponential bucket counts.
Positive ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive"`
// negative carries the negative range of exponential bucket counts.
Negative ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative"`
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
Exemplars []Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars"`
// min is the minimum value over (start_time, end_time].
//
// Types that are valid to be assigned to Min_:
// *ExponentialHistogramDataPoint_Min
Min_ isExponentialHistogramDataPoint_Min_ `protobuf_oneof:"min_"`
// max is the maximum value over (start_time, end_time].
//
// Types that are valid to be assigned to Max_:
// *ExponentialHistogramDataPoint_Max
Max_ isExponentialHistogramDataPoint_Max_ `protobuf_oneof:"max_"`
// ZeroThreshold may be optionally set to convey the width of the zero
// region. Where the zero region is defined as the closed interval
// [-ZeroThreshold, ZeroThreshold].
// When ZeroThreshold is 0, zero count bucket stores values that cannot be
// expressed using the standard exponential formula as well as values that
// have been rounded to zero.
ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
}
func (m *ExponentialHistogramDataPoint) Reset() { *m = ExponentialHistogramDataPoint{} }
func (m *ExponentialHistogramDataPoint) String() string { return proto.CompactTextString(m) }
func (*ExponentialHistogramDataPoint) ProtoMessage() {}
func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{11}
}
func (m *ExponentialHistogramDataPoint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExponentialHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExponentialHistogramDataPoint.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExponentialHistogramDataPoint) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExponentialHistogramDataPoint.Merge(m, src)
}
func (m *ExponentialHistogramDataPoint) XXX_Size() int {
return m.Size()
}
func (m *ExponentialHistogramDataPoint) XXX_DiscardUnknown() {
xxx_messageInfo_ExponentialHistogramDataPoint.DiscardUnknown(m)
}
var xxx_messageInfo_ExponentialHistogramDataPoint proto.InternalMessageInfo
type isExponentialHistogramDataPoint_Sum_ interface {
isExponentialHistogramDataPoint_Sum_()
MarshalTo([]byte) (int, error)
Size() int
}
type isExponentialHistogramDataPoint_Min_ interface {
isExponentialHistogramDataPoint_Min_()
MarshalTo([]byte) (int, error)
Size() int
}
type isExponentialHistogramDataPoint_Max_ interface {
isExponentialHistogramDataPoint_Max_()
MarshalTo([]byte) (int, error)
Size() int
}
type ExponentialHistogramDataPoint_Sum struct {
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
}
type ExponentialHistogramDataPoint_Min struct {
Min float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
}
type ExponentialHistogramDataPoint_Max struct {
Max float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
}
func (*ExponentialHistogramDataPoint_Sum) isExponentialHistogramDataPoint_Sum_() {}
func (*ExponentialHistogramDataPoint_Min) isExponentialHistogramDataPoint_Min_() {}
func (*ExponentialHistogramDataPoint_Max) isExponentialHistogramDataPoint_Max_() {}
func (m *ExponentialHistogramDataPoint) GetSum_() isExponentialHistogramDataPoint_Sum_ {
if m != nil {
return m.Sum_
}
return nil
}
func (m *ExponentialHistogramDataPoint) GetMin_() isExponentialHistogramDataPoint_Min_ {
if m != nil {
return m.Min_
}
return nil
}
func (m *ExponentialHistogramDataPoint) GetMax_() isExponentialHistogramDataPoint_Max_ {
if m != nil {
return m.Max_
}
return nil
}
func (m *ExponentialHistogramDataPoint) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 {
if m != nil {
return m.StartTimeUnixNano
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetCount() uint64 {
if m != nil {
return m.Count
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetSum() float64 {
if x, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
return x.Sum
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetScale() int32 {
if m != nil {
return m.Scale
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetZeroCount() uint64 {
if m != nil {
return m.ZeroCount
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetPositive() ExponentialHistogramDataPoint_Buckets {
if m != nil {
return m.Positive
}
return ExponentialHistogramDataPoint_Buckets{}
}
func (m *ExponentialHistogramDataPoint) GetNegative() ExponentialHistogramDataPoint_Buckets {
if m != nil {
return m.Negative
}
return ExponentialHistogramDataPoint_Buckets{}
}
func (m *ExponentialHistogramDataPoint) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
func (m *ExponentialHistogramDataPoint) GetMin() float64 {
if x, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
return x.Min
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetMax() float64 {
if x, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
return x.Max
}
return 0
}
func (m *ExponentialHistogramDataPoint) GetZeroThreshold() float64 {
if m != nil {
return m.ZeroThreshold
}
return 0
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*ExponentialHistogramDataPoint) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*ExponentialHistogramDataPoint_Sum)(nil),
(*ExponentialHistogramDataPoint_Min)(nil),
(*ExponentialHistogramDataPoint_Max)(nil),
}
}
// Buckets are a set of bucket counts, encoded in a contiguous array
// of counts.
type ExponentialHistogramDataPoint_Buckets struct {
// Offset is the bucket index of the first entry in the bucket_counts array.
//
// Note: This uses a varint encoding as a simple form of compression.
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
// bucket_counts is an array of count values, where bucket_counts[i] carries
// the count of the bucket at index (offset+i). bucket_counts[i] is the count
// of values greater than base^(offset+i) and less than or equal to
// base^(offset+i+1).
//
// Note: By contrast, the explicit HistogramDataPoint uses
// fixed64. This field is expected to have many buckets,
// especially zeros, so uint64 has been selected to ensure
// varint encoding.
BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
}
func (m *ExponentialHistogramDataPoint_Buckets) Reset() { *m = ExponentialHistogramDataPoint_Buckets{} }
func (m *ExponentialHistogramDataPoint_Buckets) String() string { return proto.CompactTextString(m) }
func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {}
func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{11, 0}
}
func (m *ExponentialHistogramDataPoint_Buckets) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ExponentialHistogramDataPoint_Buckets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ExponentialHistogramDataPoint_Buckets) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Merge(m, src)
}
func (m *ExponentialHistogramDataPoint_Buckets) XXX_Size() int {
return m.Size()
}
func (m *ExponentialHistogramDataPoint_Buckets) XXX_DiscardUnknown() {
xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.DiscardUnknown(m)
}
var xxx_messageInfo_ExponentialHistogramDataPoint_Buckets proto.InternalMessageInfo
func (m *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 {
if m != nil {
return m.Offset
}
return 0
}
func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 {
if m != nil {
return m.BucketCounts
}
return nil
}
// SummaryDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Summary metric. The count and sum fields represent
// cumulative values.
type SummaryDataPoint struct {
// The set of key/value pairs that uniquely identify the timeseries from
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
// TimeUnixNano is required, see the detailed comments above Metric.
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// count is the number of values in the population. Must be non-negative.
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
// sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
// events, and is assumed to be monotonic over the values of these events.
// Negative events *can* be recorded, but sum should not be filled out when
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
// (Optional) list of values at different quantiles of the distribution calculated
// from the current snapshot. The quantiles must be strictly increasing.
QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
// Flags that apply to this specific data point. See DataPointFlags
// for the available flags and their meaning.
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (m *SummaryDataPoint) Reset() { *m = SummaryDataPoint{} }
func (m *SummaryDataPoint) String() string { return proto.CompactTextString(m) }
func (*SummaryDataPoint) ProtoMessage() {}
func (*SummaryDataPoint) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{12}
}
func (m *SummaryDataPoint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SummaryDataPoint.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *SummaryDataPoint) XXX_Merge(src proto.Message) {
xxx_messageInfo_SummaryDataPoint.Merge(m, src)
}
func (m *SummaryDataPoint) XXX_Size() int {
return m.Size()
}
func (m *SummaryDataPoint) XXX_DiscardUnknown() {
xxx_messageInfo_SummaryDataPoint.DiscardUnknown(m)
}
var xxx_messageInfo_SummaryDataPoint proto.InternalMessageInfo
func (m *SummaryDataPoint) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *SummaryDataPoint) GetStartTimeUnixNano() uint64 {
if m != nil {
return m.StartTimeUnixNano
}
return 0
}
func (m *SummaryDataPoint) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *SummaryDataPoint) GetCount() uint64 {
if m != nil {
return m.Count
}
return 0
}
func (m *SummaryDataPoint) GetSum() float64 {
if m != nil {
return m.Sum
}
return 0
}
func (m *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile {
if m != nil {
return m.QuantileValues
}
return nil
}
func (m *SummaryDataPoint) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
// Represents the value at a given quantile of a distribution.
//
// To record Min and Max values following conventions are used:
// - The 1.0 quantile is equivalent to the maximum value observed.
// - The 0.0 quantile is equivalent to the minimum value observed.
//
// See the following issue for more context:
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
type SummaryDataPoint_ValueAtQuantile struct {
// The quantile of a distribution. Must be in the interval
// [0.0, 1.0].
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
// The value at the given quantile of a distribution.
//
// Quantile values must NOT be negative.
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *SummaryDataPoint_ValueAtQuantile) Reset() { *m = SummaryDataPoint_ValueAtQuantile{} }
func (m *SummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) }
func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {}
func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{12, 0}
}
func (m *SummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *SummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Merge(m, src)
}
func (m *SummaryDataPoint_ValueAtQuantile) XXX_Size() int {
return m.Size()
}
func (m *SummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() {
xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.DiscardUnknown(m)
}
var xxx_messageInfo_SummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo
func (m *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 {
if m != nil {
return m.Quantile
}
return 0
}
func (m *SummaryDataPoint_ValueAtQuantile) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
// A representation of an exemplar, which is a sample input measurement.
// Exemplars also hold information about the environment when the measurement
// was recorded, for example the span and trace ID of the active span when the
// exemplar was recorded.
type Exemplar struct {
// The set of key/value pairs that were filtered out by the aggregator, but
// recorded alongside the original measurement. Only key/value pairs that were
// filtered out by the aggregator should be included
FilteredAttributes []v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes"`
// time_unix_nano is the exact time when this exemplar was recorded
//
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// The value of the measurement that was recorded. An exemplar is
// considered invalid when one of the recognized value fields is not present
// inside this oneof.
//
// Types that are valid to be assigned to Value:
// *Exemplar_AsDouble
// *Exemplar_AsInt
Value isExemplar_Value `protobuf_oneof:"value"`
// (Optional) Span ID of the exemplar trace.
// span_id may be missing if the measurement is not recorded inside a trace
// or if the trace is not sampled.
SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
// (Optional) Trace ID of the exemplar trace.
// trace_id may be missing if the measurement is not recorded inside a trace
// or if the trace is not sampled.
TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_3c3112f9fa006917, []int{13}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
type isExemplar_Value interface {
isExemplar_Value()
MarshalTo([]byte) (int, error)
Size() int
}
type Exemplar_AsDouble struct {
AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"`
}
type Exemplar_AsInt struct {
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"`
}
func (*Exemplar_AsDouble) isExemplar_Value() {}
func (*Exemplar_AsInt) isExemplar_Value() {}
func (m *Exemplar) GetValue() isExemplar_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *Exemplar) GetFilteredAttributes() []v11.KeyValue {
if m != nil {
return m.FilteredAttributes
}
return nil
}
func (m *Exemplar) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *Exemplar) GetAsDouble() float64 {
if x, ok := m.GetValue().(*Exemplar_AsDouble); ok {
return x.AsDouble
}
return 0
}
func (m *Exemplar) GetAsInt() int64 {
if x, ok := m.GetValue().(*Exemplar_AsInt); ok {
return x.AsInt
}
return 0
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Exemplar) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Exemplar_AsDouble)(nil),
(*Exemplar_AsInt)(nil),
}
}
func init() {
proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
proto.RegisterEnum("opentelemetry.proto.metrics.v1.DataPointFlags", DataPointFlags_name, DataPointFlags_value)
proto.RegisterType((*MetricsData)(nil), "opentelemetry.proto.metrics.v1.MetricsData")
proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics")
proto.RegisterType((*ScopeMetrics)(nil), "opentelemetry.proto.metrics.v1.ScopeMetrics")
proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric")
proto.RegisterType((*Gauge)(nil), "opentelemetry.proto.metrics.v1.Gauge")
proto.RegisterType((*Sum)(nil), "opentelemetry.proto.metrics.v1.Sum")
proto.RegisterType((*Histogram)(nil), "opentelemetry.proto.metrics.v1.Histogram")
proto.RegisterType((*ExponentialHistogram)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogram")
proto.RegisterType((*Summary)(nil), "opentelemetry.proto.metrics.v1.Summary")
proto.RegisterType((*NumberDataPoint)(nil), "opentelemetry.proto.metrics.v1.NumberDataPoint")
proto.RegisterType((*HistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.HistogramDataPoint")
proto.RegisterType((*ExponentialHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint")
proto.RegisterType((*ExponentialHistogramDataPoint_Buckets)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets")
proto.RegisterType((*SummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint")
proto.RegisterType((*SummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile")
proto.RegisterType((*Exemplar)(nil), "opentelemetry.proto.metrics.v1.Exemplar")
}
func init() {
proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917)
}
var fileDescriptor_3c3112f9fa006917 = []byte{
// 1568 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x4f, 0x1b, 0x49,
0x16, 0x77, 0xfb, 0xdb, 0xcf, 0x06, 0x9c, 0x5a, 0x96, 0xb4, 0x58, 0xe1, 0x38, 0xce, 0x26, 0xb0,
0xd9, 0xc8, 0x5e, 0xc8, 0x6a, 0x3f, 0x0e, 0x91, 0x62, 0x63, 0x03, 0x26, 0x80, 0x49, 0xd9, 0x20,
0x25, 0x8a, 0xd2, 0x2a, 0xec, 0xc2, 0xb4, 0xd2, 0xdd, 0xe5, 0xed, 0xae, 0x46, 0xb0, 0xff, 0xc1,
0x4a, 0x7b, 0xc8, 0xdf, 0xb1, 0xca, 0x6d, 0x4f, 0x73, 0x9b, 0x63, 0x8e, 0x99, 0xdb, 0x68, 0x34,
0x8a, 0x46, 0xe4, 0x30, 0x23, 0xcd, 0x3f, 0x31, 0xaa, 0xea, 0x6e, 0xfc, 0x81, 0x89, 0xc9, 0xc7,
0x21, 0x39, 0xb9, 0xea, 0xd5, 0x7b, 0xbf, 0x7a, 0xaf, 0xde, 0xef, 0xd5, 0x2b, 0x37, 0xdc, 0x63,
0x3d, 0x6a, 0x71, 0x6a, 0x50, 0x93, 0x72, 0xfb, 0xb4, 0xd4, 0xb3, 0x19, 0x67, 0x25, 0x31, 0xd6,
0xdb, 0x4e, 0xe9, 0x78, 0x39, 0x18, 0x16, 0xe5, 0x02, 0xca, 0x0d, 0x69, 0x7b, 0xc2, 0x62, 0xa0,
0x72, 0xbc, 0x3c, 0x3f, 0xdb, 0x65, 0x5d, 0xe6, 0x61, 0x88, 0x91, 0xa7, 0x30, 0x7f, 0x77, 0xdc,
0x1e, 0x6d, 0x66, 0x9a, 0xcc, 0x12, 0x5b, 0x78, 0x23, 0x5f, 0xb7, 0x38, 0x4e, 0xd7, 0xa6, 0x0e,
0x73, 0xed, 0x36, 0x15, 0xda, 0xc1, 0xd8, 0xd3, 0x2f, 0xe8, 0x90, 0xde, 0xf6, 0xf6, 0xaf, 0x12,
0x4e, 0xd0, 0x53, 0xc8, 0x06, 0x0a, 0x9a, 0xef, 0x97, 0xaa, 0xe4, 0x23, 0x4b, 0xe9, 0x95, 0x52,
0xf1, 0xfd, 0xbe, 0x17, 0xb1, 0x6f, 0xe7, 0xc3, 0xe1, 0x19, 0x7b, 0x58, 0x50, 0xf8, 0x26, 0x0c,
0x33, 0x23, 0x4a, 0xa8, 0x0b, 0x6a, 0x87, 0xf6, 0x6c, 0xda, 0x26, 0x9c, 0x76, 0x34, 0xa7, 0xcd,
0x7a, 0xfd, 0x7d, 0x7f, 0x49, 0xc8, 0x8d, 0xef, 0x4d, 0xda, 0xb8, 0x29, 0xac, 0x82, 0x5d, 0xe7,
0xfa, 0x70, 0x83, 0x72, 0xf4, 0x08, 0x92, 0x81, 0x3f, 0xaa, 0x92, 0x57, 0x96, 0xd2, 0x2b, 0x7f,
0x1a, 0x8b, 0x7b, 0x7e, 0x3c, 0x03, 0x11, 0x55, 0xa2, 0xaf, 0xdf, 0xde, 0x08, 0xe1, 0x73, 0x00,
0xf4, 0x18, 0xa6, 0x86, 0x5d, 0x0d, 0x7f, 0x84, 0xa7, 0x19, 0x67, 0xd0, 0xbf, 0x05, 0x00, 0xa7,
0x7d, 0x44, 0x4d, 0xa2, 0xb9, 0xb6, 0xa1, 0x46, 0xf2, 0xca, 0x52, 0x0a, 0xa7, 0x3c, 0xc9, 0x9e,
0x6d, 0x14, 0xbe, 0x55, 0x20, 0x33, 0x14, 0x4f, 0x03, 0x62, 0xd2, 0xde, 0x0f, 0xe6, 0xfe, 0xd8,
0xad, 0x7d, 0x66, 0x1c, 0x2f, 0x17, 0xeb, 0x96, 0xc3, 0x6d, 0xd7, 0xa4, 0x16, 0x27, 0x5c, 0x67,
0x96, 0x84, 0xf2, 0xc3, 0xf2, 0x70, 0xd0, 0x43, 0x48, 0x0c, 0x47, 0x73, 0x67, 0x52, 0x34, 0x9e,
0x2b, 0x38, 0x30, 0x9b, 0x14, 0xc2, 0xab, 0x28, 0xc4, 0x3d, 0x13, 0x84, 0x20, 0x6a, 0x11, 0xd3,
0xf3, 0x3d, 0x85, 0xe5, 0x18, 0xe5, 0x21, 0xdd, 0xa1, 0x4e, 0xdb, 0xd6, 0x7b, 0xc2, 0x41, 0x35,
0x2c, 0x97, 0x06, 0x45, 0xc2, 0xca, 0xb5, 0x74, 0xee, 0x23, 0xcb, 0x31, 0x7a, 0x00, 0xb1, 0x2e,
0x71, 0xbb, 0x54, 0x8d, 0xc9, 0x63, 0xb8, 0x3d, 0xc9, 0xe7, 0x75, 0xa1, 0xbc, 0x11, 0xc2, 0x9e,
0x15, 0xfa, 0x3b, 0x44, 0x1c, 0xd7, 0x54, 0x13, 0xd2, 0xf8, 0xd6, 0xc4, 0xf4, 0xb9, 0xe6, 0x46,
0x08, 0x0b, 0x0b, 0x54, 0x87, 0xd4, 0x91, 0xee, 0x70, 0xd6, 0xb5, 0x89, 0xa9, 0xa6, 0xde, 0xc3,
0xa7, 0x01, 0xf3, 0x8d, 0xc0, 0x60, 0x23, 0x84, 0xfb, 0xd6, 0xe8, 0x05, 0xfc, 0x9e, 0x9e, 0xf4,
0x98, 0x45, 0x2d, 0xae, 0x13, 0x43, 0xeb, 0xc3, 0x82, 0x84, 0xfd, 0xeb, 0x24, 0xd8, 0x5a, 0xdf,
0x78, 0x70, 0x87, 0x59, 0x3a, 0x46, 0x8e, 0x56, 0x21, 0xe1, 0xb8, 0xa6, 0x49, 0xec, 0x53, 0x35,
0x2d, 0xe1, 0x17, 0xaf, 0x10, 0xb4, 0x50, 0xdf, 0x08, 0xe1, 0xc0, 0x12, 0xd5, 0x21, 0x69, 0x52,
0x4e, 0x3a, 0x84, 0x13, 0x35, 0x23, 0xb9, 0xb2, 0x38, 0x81, 0x7e, 0x8f, 0xe8, 0xe9, 0x3e, 0x31,
0xdc, 0xf3, 0x4a, 0x0a, 0xcc, 0x2b, 0x71, 0x88, 0x8a, 0xdf, 0xcd, 0x68, 0x32, 0x9a, 0x8d, 0x6d,
0x46, 0x93, 0xf1, 0x6c, 0x62, 0x33, 0x9a, 0x4c, 0x66, 0x53, 0x85, 0x27, 0x10, 0x93, 0xc9, 0x42,
0xbb, 0x90, 0x16, 0x2a, 0x5a, 0x8f, 0xe9, 0x16, 0xbf, 0xf2, 0x6d, 0xb4, 0xe3, 0x9a, 0x07, 0xd4,
0x16, 0x77, 0xda, 0xae, 0xb0, 0xc3, 0xd0, 0x09, 0x86, 0x4e, 0xe1, 0x57, 0x05, 0x22, 0x4d, 0xd7,
0xfc, 0xfc, 0xc8, 0x88, 0xc1, 0x75, 0xd2, 0xed, 0xda, 0xb4, 0x2b, 0xab, 0x4c, 0xe3, 0xd4, 0xec,
0x31, 0x9b, 0x18, 0x3a, 0x3f, 0x95, 0x84, 0x9e, 0x5e, 0xf9, 0xdb, 0x24, 0xf4, 0x72, 0xdf, 0xbc,
0xd5, 0xb7, 0xc6, 0x73, 0x64, 0xac, 0x1c, 0xdd, 0x84, 0x8c, 0xee, 0x68, 0x26, 0xb3, 0x18, 0x67,
0x96, 0xde, 0x96, 0xb5, 0x91, 0xc4, 0x69, 0xdd, 0xd9, 0x0e, 0x44, 0x85, 0xef, 0x14, 0x48, 0xf5,
0x09, 0xd0, 0x1c, 0x17, 0xf3, 0xca, 0x95, 0xa9, 0xfb, 0x65, 0x84, 0x5d, 0xf8, 0x59, 0x81, 0xd9,
0x71, 0xbc, 0x47, 0xcf, 0xc7, 0x85, 0xf7, 0xe0, 0x63, 0x4a, 0xe8, 0x0b, 0x89, 0xf4, 0x19, 0x24,
0xfc, 0x0a, 0x44, 0x8f, 0xc7, 0xc5, 0xf6, 0x97, 0x2b, 0xd6, 0xef, 0xf8, 0x4a, 0x38, 0x0b, 0xc3,
0xcc, 0x08, 0x9f, 0xd1, 0x36, 0x00, 0xe1, 0xdc, 0xd6, 0x0f, 0x5c, 0x4e, 0x1d, 0x35, 0xf1, 0x31,
0xf5, 0x3d, 0x00, 0x80, 0x4a, 0x30, 0xeb, 0x70, 0x62, 0x73, 0x8d, 0xeb, 0x26, 0xd5, 0x5c, 0x4b,
0x3f, 0xd1, 0x2c, 0x62, 0x31, 0x79, 0x5c, 0x71, 0x7c, 0x4d, 0xae, 0xb5, 0x74, 0x93, 0xee, 0x59,
0xfa, 0xc9, 0x0e, 0xb1, 0x18, 0xfa, 0x23, 0x4c, 0x8f, 0xa8, 0x46, 0xa4, 0x6a, 0x86, 0x0f, 0x6a,
0x2d, 0x40, 0x8a, 0x38, 0x5a, 0x87, 0xb9, 0x07, 0x06, 0x55, 0xa3, 0x79, 0x65, 0x49, 0xd9, 0x08,
0xe1, 0x24, 0x71, 0xaa, 0x52, 0x82, 0xae, 0x43, 0x9c, 0x38, 0x9a, 0x6e, 0x71, 0x35, 0x9e, 0x57,
0x96, 0xb2, 0xe2, 0xc6, 0x27, 0x4e, 0xdd, 0xe2, 0x68, 0x0b, 0x52, 0xf4, 0x84, 0x9a, 0x3d, 0x83,
0xd8, 0x8e, 0x1a, 0x93, 0xc1, 0x2d, 0x4d, 0xa6, 0x87, 0x67, 0xe0, 0x47, 0xd7, 0x07, 0x40, 0xb3,
0x10, 0x3b, 0x34, 0x48, 0xd7, 0x51, 0x93, 0x79, 0x65, 0x69, 0x0a, 0x7b, 0x93, 0x4a, 0x02, 0x62,
0xc7, 0xe2, 0x34, 0x36, 0xa3, 0x49, 0x25, 0x1b, 0x2e, 0xfc, 0x18, 0x01, 0x74, 0x91, 0x56, 0x23,
0xe7, 0x9c, 0xfa, 0x42, 0xcf, 0x79, 0x16, 0x62, 0x6d, 0xe6, 0x5a, 0x5c, 0x9e, 0x71, 0x1c, 0x7b,
0x13, 0x84, 0xbc, 0xbe, 0x19, 0xf3, 0xcf, 0x5d, 0xb6, 0xc4, 0x5b, 0x30, 0x75, 0xe0, 0xb6, 0x5f,
0x50, 0xae, 0x49, 0x1d, 0x47, 0x8d, 0xe7, 0x23, 0x02, 0xce, 0x13, 0xae, 0x4a, 0x19, 0x5a, 0x84,
0x19, 0x7a, 0xd2, 0x33, 0xf4, 0xb6, 0xce, 0xb5, 0x03, 0xe6, 0x5a, 0x1d, 0x8f, 0x61, 0x0a, 0x9e,
0x0e, 0xc4, 0x15, 0x29, 0x1d, 0xce, 0x53, 0xf2, 0xb3, 0xe5, 0x09, 0x06, 0xf2, 0x24, 0xa2, 0x30,
0x75, 0x4b, 0x36, 0x42, 0x65, 0x43, 0xc1, 0x62, 0x22, 0x65, 0xe4, 0x44, 0xcd, 0x48, 0x59, 0x18,
0x8b, 0x89, 0x68, 0x52, 0x8e, 0x6b, 0x6a, 0xe2, 0xd7, 0xd4, 0x2d, 0xef, 0x97, 0x9c, 0x68, 0x7e,
0x7a, 0xff, 0x13, 0x87, 0x85, 0xf7, 0x5e, 0x20, 0x23, 0x99, 0x56, 0xbe, 0xfa, 0x4c, 0xcf, 0x8a,
0xb7, 0x27, 0x31, 0xa8, 0xac, 0xad, 0x6b, 0xd8, 0x9b, 0x88, 0xe7, 0xdf, 0xbf, 0xa9, 0xcd, 0xbc,
0xec, 0xcb, 0x27, 0x55, 0x1c, 0xa7, 0x84, 0x44, 0xa6, 0x1e, 0x75, 0x21, 0xd9, 0x63, 0x8e, 0xce,
0xf5, 0x63, 0x2a, 0xab, 0x25, 0xbd, 0x52, 0xfb, 0xa4, 0x6b, 0xb9, 0x58, 0x91, 0xbc, 0x72, 0x82,
0x27, 0x45, 0x00, 0x2e, 0x36, 0xb2, 0xe4, 0x45, 0x7a, 0x4c, 0xfd, 0x97, 0xd9, 0xe7, 0xdd, 0x28,
0x00, 0xbf, 0x84, 0x54, 0x43, 0xc4, 0x4d, 0x7f, 0x2a, 0x71, 0x7d, 0x8a, 0x66, 0xc6, 0x50, 0x74,
0x6a, 0x80, 0xa2, 0xe8, 0x36, 0x4c, 0xcb, 0xc3, 0xe7, 0x47, 0x36, 0x75, 0x8e, 0x98, 0xd1, 0x51,
0xa7, 0xc5, 0x32, 0x9e, 0x12, 0xd2, 0x56, 0x20, 0x9c, 0x5f, 0x83, 0x84, 0x1f, 0x0d, 0x9a, 0x83,
0x38, 0x3b, 0x3c, 0x74, 0x28, 0x97, 0xaf, 0xf0, 0x6b, 0xd8, 0x9f, 0x5d, 0x2c, 0x63, 0xf1, 0x6f,
0x20, 0x3a, 0x5c, 0xc6, 0x97, 0x55, 0x44, 0xe1, 0x55, 0x04, 0xb2, 0xa3, 0x0d, 0xe7, 0x2b, 0x69,
0x28, 0xe3, 0xe9, 0x9f, 0x1d, 0xa0, 0xbf, 0x47, 0x7e, 0x1d, 0x66, 0xfe, 0xe5, 0x12, 0x8b, 0xeb,
0x06, 0xd5, 0xe4, 0x2d, 0xef, 0x5d, 0x74, 0xe9, 0x95, 0x87, 0x1f, 0xda, 0x89, 0x8b, 0x32, 0xc2,
0x32, 0x7f, 0xec, 0xc3, 0xe1, 0xe9, 0x00, 0x58, 0x2e, 0x5c, 0xd2, 0x5d, 0xe6, 0x57, 0x61, 0x66,
0xc4, 0x10, 0xcd, 0x43, 0x32, 0x30, 0x95, 0xd9, 0x54, 0xf0, 0xf9, 0x5c, 0x80, 0x48, 0x37, 0xe5,
0xf9, 0x28, 0x78, 0xa8, 0x33, 0xbd, 0x8c, 0x40, 0x32, 0xe0, 0x1e, 0x7a, 0x0e, 0xbf, 0x3b, 0xd4,
0x0d, 0x4e, 0x6d, 0xda, 0xd1, 0x3e, 0x35, 0x5f, 0x28, 0x40, 0x2a, 0xf7, 0xf3, 0x76, 0x31, 0x0d,
0xe1, 0x49, 0x7d, 0x3d, 0x72, 0xf5, 0xbe, 0xfe, 0x04, 0x12, 0x4e, 0x8f, 0x58, 0x9a, 0xde, 0x91,
0x09, 0xcc, 0x54, 0x1e, 0x0a, 0x47, 0x7e, 0x78, 0x7b, 0xe3, 0x1f, 0x5d, 0x36, 0xe2, 0xbb, 0xce,
0x4a, 0x6d, 0x66, 0x18, 0xb4, 0xcd, 0x99, 0x5d, 0xea, 0x89, 0xd7, 0x50, 0x49, 0xb7, 0x38, 0xb5,
0x2d, 0x62, 0x94, 0xc4, 0xac, 0xd8, 0xec, 0x11, 0xab, 0x5e, 0xc5, 0x71, 0x01, 0x58, 0xef, 0xa0,
0x67, 0x90, 0xe4, 0x36, 0x69, 0x53, 0x81, 0x1d, 0x93, 0xd8, 0x65, 0x1f, 0xfb, 0x9f, 0x1f, 0x8e,
0xdd, 0x12, 0x48, 0xf5, 0x2a, 0x4e, 0x48, 0xc8, 0x7a, 0x67, 0xe4, 0xb1, 0x70, 0xf7, 0xbf, 0x0a,
0xcc, 0x8d, 0x7f, 0x22, 0xa2, 0x45, 0xb8, 0x55, 0x5e, 0x5f, 0xc7, 0xb5, 0xf5, 0x72, 0xab, 0xde,
0xd8, 0xd1, 0x5a, 0xb5, 0xed, 0xdd, 0x06, 0x2e, 0x6f, 0xd5, 0x5b, 0x4f, 0xb4, 0xbd, 0x9d, 0xe6,
0x6e, 0x6d, 0xb5, 0xbe, 0x56, 0xaf, 0x55, 0xb3, 0x21, 0x74, 0x13, 0x16, 0x2e, 0x53, 0xac, 0xd6,
0xb6, 0x5a, 0xe5, 0xac, 0x82, 0xee, 0x40, 0xe1, 0x32, 0x95, 0xd5, 0xbd, 0xed, 0xbd, 0xad, 0x72,
0xab, 0xbe, 0x5f, 0xcb, 0x86, 0xef, 0x3e, 0x87, 0xe9, 0x73, 0xbe, 0xae, 0xc9, 0xfb, 0xed, 0x06,
0xfc, 0xa1, 0x5a, 0x6e, 0x95, 0xb5, 0xdd, 0x46, 0x7d, 0xa7, 0xa5, 0xad, 0x6d, 0x95, 0xd7, 0x9b,
0x5a, 0xb5, 0xa1, 0xed, 0x34, 0x5a, 0xda, 0x5e, 0xb3, 0x96, 0x0d, 0xa1, 0x3f, 0xc3, 0xe2, 0x05,
0x85, 0x9d, 0x86, 0x86, 0x6b, 0xab, 0x0d, 0x5c, 0xad, 0x55, 0xb5, 0xfd, 0xf2, 0xd6, 0x5e, 0x4d,
0xdb, 0x2e, 0x37, 0x1f, 0x65, 0x95, 0xca, 0xff, 0x95, 0xd7, 0x67, 0x39, 0xe5, 0xcd, 0x59, 0x4e,
0xf9, 0xe9, 0x2c, 0xa7, 0xbc, 0x7c, 0x97, 0x0b, 0xbd, 0x79, 0x97, 0x0b, 0x7d, 0xff, 0x2e, 0x17,
0x82, 0x9b, 0x3a, 0x9b, 0x50, 0x51, 0x95, 0x8c, 0xff, 0x35, 0x64, 0x57, 0x2c, 0xec, 0x2a, 0x4f,
0x6b, 0x1f, 0x9c, 0x0f, 0xef, 0x03, 0x59, 0x97, 0x5a, 0x03, 0xdf, 0xec, 0xfe, 0x17, 0xce, 0x35,
0x7a, 0xd4, 0x6a, 0x9d, 0x83, 0x48, 0x78, 0xff, 0x73, 0x87, 0x53, 0xdc, 0x5f, 0x3e, 0x88, 0x4b,
0xab, 0xfb, 0xbf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x00, 0xa3, 0x78, 0x2c, 0xfd, 0x13, 0x00, 0x00,
}
func (m *MetricsData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MetricsData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MetricsData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceMetrics) > 0 {
for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DeprecatedScopeMetrics) > 0 {
for iNdEx := len(m.DeprecatedScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DeprecatedScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xc2
}
}
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.ScopeMetrics) > 0 {
for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ScopeMetrics) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScopeMetrics) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ScopeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.Metrics) > 0 {
for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Metric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Metadata) > 0 {
for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x62
}
}
if m.Data != nil {
{
size := m.Data.Size()
i -= size
if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if len(m.Unit) > 0 {
i -= len(m.Unit)
copy(dAtA[i:], m.Unit)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit)))
i--
dAtA[i] = 0x1a
}
if len(m.Description) > 0 {
i -= len(m.Description)
copy(dAtA[i:], m.Description)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Metric_Gauge) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric_Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Gauge != nil {
{
size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
return len(dAtA) - i, nil
}
func (m *Metric_Sum) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Sum != nil {
{
size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
return len(dAtA) - i, nil
}
func (m *Metric_Histogram) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric_Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Histogram != nil {
{
size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
return len(dAtA) - i, nil
}
func (m *Metric_ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric_ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.ExponentialHistogram != nil {
{
size, err := m.ExponentialHistogram.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x52
}
return len(dAtA) - i, nil
}
func (m *Metric_Summary) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric_Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Summary != nil {
{
size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x5a
}
return len(dAtA) - i, nil
}
func (m *Gauge) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Gauge) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DataPoints) > 0 {
for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Sum) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Sum) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.IsMonotonic {
i--
if m.IsMonotonic {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if m.AggregationTemporality != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
i--
dAtA[i] = 0x10
}
if len(m.DataPoints) > 0 {
for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Histogram) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AggregationTemporality != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
i--
dAtA[i] = 0x10
}
if len(m.DataPoints) > 0 {
for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExponentialHistogram) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AggregationTemporality != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality))
i--
dAtA[i] = 0x10
}
if len(m.DataPoints) > 0 {
for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Summary) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Summary) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DataPoints) > 0 {
for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *NumberDataPoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NumberDataPoint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NumberDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Flags != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
i--
dAtA[i] = 0x40
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
}
if m.Value != nil {
{
size := m.Value.Size()
i -= size
if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x19
}
if m.StartTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
i--
dAtA[i] = 0x11
}
return len(dAtA) - i, nil
}
func (m *NumberDataPoint_AsDouble) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NumberDataPoint_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
i--
dAtA[i] = 0x21
return len(dAtA) - i, nil
}
func (m *NumberDataPoint_AsInt) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *NumberDataPoint_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
i--
dAtA[i] = 0x31
return len(dAtA) - i, nil
}
func (m *HistogramDataPoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *HistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Max_ != nil {
{
size := m.Max_.Size()
i -= size
if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if m.Min_ != nil {
{
size := m.Min_.Size()
i -= size
if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if m.Flags != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
i--
dAtA[i] = 0x50
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
}
}
if len(m.ExplicitBounds) > 0 {
for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- {
f8 := math.Float64bits(float64(m.ExplicitBounds[iNdEx]))
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f8))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8))
i--
dAtA[i] = 0x3a
}
if len(m.BucketCounts) > 0 {
for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx]))
}
i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8))
i--
dAtA[i] = 0x32
}
if m.Sum_ != nil {
{
size := m.Sum_.Size()
i -= size
if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if m.Count != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
i--
dAtA[i] = 0x21
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x19
}
if m.StartTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
i--
dAtA[i] = 0x11
}
return len(dAtA) - i, nil
}
func (m *HistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
i--
dAtA[i] = 0x29
return len(dAtA) - i, nil
}
func (m *HistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
i--
dAtA[i] = 0x59
return len(dAtA) - i, nil
}
func (m *HistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *HistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
i--
dAtA[i] = 0x61
return len(dAtA) - i, nil
}
func (m *ExponentialHistogramDataPoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExponentialHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ZeroThreshold != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
i--
dAtA[i] = 0x71
}
if m.Max_ != nil {
{
size := m.Max_.Size()
i -= size
if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if m.Min_ != nil {
{
size := m.Min_.Size()
i -= size
if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x5a
}
}
if m.Flags != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
i--
dAtA[i] = 0x50
}
{
size, err := m.Negative.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
{
size, err := m.Positive.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
if m.ZeroCount != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount))
i--
dAtA[i] = 0x39
}
if m.Scale != 0 {
i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31))))
i--
dAtA[i] = 0x30
}
if m.Sum_ != nil {
{
size := m.Sum_.Size()
i -= size
if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
if m.Count != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
i--
dAtA[i] = 0x21
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x19
}
if m.StartTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
i--
dAtA[i] = 0x11
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ExponentialHistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
i--
dAtA[i] = 0x29
return len(dAtA) - i, nil
}
func (m *ExponentialHistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min))))
i--
dAtA[i] = 0x61
return len(dAtA) - i, nil
}
func (m *ExponentialHistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max))))
i--
dAtA[i] = 0x69
return len(dAtA) - i, nil
}
func (m *ExponentialHistogramDataPoint_Buckets) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExponentialHistogramDataPoint_Buckets) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.BucketCounts) > 0 {
dAtA12 := make([]byte, len(m.BucketCounts)*10)
var j11 int
for _, num := range m.BucketCounts {
for num >= 1<<7 {
dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j11++
}
dAtA12[j11] = uint8(num)
j11++
}
i -= j11
copy(dAtA[i:], dAtA12[:j11])
i = encodeVarintMetrics(dAtA, i, uint64(j11))
i--
dAtA[i] = 0x12
}
if m.Offset != 0 {
i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *SummaryDataPoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SummaryDataPoint) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Flags != 0 {
i = encodeVarintMetrics(dAtA, i, uint64(m.Flags))
i--
dAtA[i] = 0x40
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
}
if len(m.QuantileValues) > 0 {
for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
}
if m.Sum != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
i--
dAtA[i] = 0x29
}
if m.Count != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
i--
dAtA[i] = 0x21
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x19
}
if m.StartTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
i--
dAtA[i] = 0x11
}
return len(dAtA) - i, nil
}
func (m *SummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x11
}
if m.Quantile != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Exemplar) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.FilteredAttributes) > 0 {
for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
}
if m.Value != nil {
{
size := m.Value.Size()
i -= size
if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
}
}
{
size := m.TraceId.Size()
i -= size
if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
{
size := m.SpanId.Size()
i -= size
if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x11
}
return len(dAtA) - i, nil
}
func (m *Exemplar_AsDouble) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Exemplar_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
i--
dAtA[i] = 0x19
return len(dAtA) - i, nil
}
func (m *Exemplar_AsInt) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Exemplar_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
i--
dAtA[i] = 0x31
return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
offset -= sovMetrics(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *MetricsData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceMetrics) > 0 {
for _, e := range m.ResourceMetrics {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *ResourceMetrics) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Resource.Size()
n += 1 + l + sovMetrics(uint64(l))
if len(m.ScopeMetrics) > 0 {
for _, e := range m.ScopeMetrics {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if len(m.DeprecatedScopeMetrics) > 0 {
for _, e := range m.DeprecatedScopeMetrics {
l = e.Size()
n += 2 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *ScopeMetrics) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Scope.Size()
n += 1 + l + sovMetrics(uint64(l))
if len(m.Metrics) > 0 {
for _, e := range m.Metrics {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Metric) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
l = len(m.Description)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
l = len(m.Unit)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.Data != nil {
n += m.Data.Size()
}
if len(m.Metadata) > 0 {
for _, e := range m.Metadata {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *Metric_Gauge) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Gauge != nil {
l = m.Gauge.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Metric_Sum) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Sum != nil {
l = m.Sum.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Metric_Histogram) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Histogram != nil {
l = m.Histogram.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Metric_ExponentialHistogram) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.ExponentialHistogram != nil {
l = m.ExponentialHistogram.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Metric_Summary) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Summary != nil {
l = m.Summary.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func (m *Gauge) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.DataPoints) > 0 {
for _, e := range m.DataPoints {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *Sum) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.DataPoints) > 0 {
for _, e := range m.DataPoints {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.AggregationTemporality != 0 {
n += 1 + sovMetrics(uint64(m.AggregationTemporality))
}
if m.IsMonotonic {
n += 2
}
return n
}
func (m *Histogram) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.DataPoints) > 0 {
for _, e := range m.DataPoints {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.AggregationTemporality != 0 {
n += 1 + sovMetrics(uint64(m.AggregationTemporality))
}
return n
}
func (m *ExponentialHistogram) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.DataPoints) > 0 {
for _, e := range m.DataPoints {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.AggregationTemporality != 0 {
n += 1 + sovMetrics(uint64(m.AggregationTemporality))
}
return n
}
func (m *Summary) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.DataPoints) > 0 {
for _, e := range m.DataPoints {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *NumberDataPoint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.StartTimeUnixNano != 0 {
n += 9
}
if m.TimeUnixNano != 0 {
n += 9
}
if m.Value != nil {
n += m.Value.Size()
}
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Flags != 0 {
n += 1 + sovMetrics(uint64(m.Flags))
}
return n
}
func (m *NumberDataPoint_AsDouble) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *NumberDataPoint_AsInt) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *HistogramDataPoint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.StartTimeUnixNano != 0 {
n += 9
}
if m.TimeUnixNano != 0 {
n += 9
}
if m.Count != 0 {
n += 9
}
if m.Sum_ != nil {
n += m.Sum_.Size()
}
if len(m.BucketCounts) > 0 {
n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8
}
if len(m.ExplicitBounds) > 0 {
n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8
}
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Flags != 0 {
n += 1 + sovMetrics(uint64(m.Flags))
}
if m.Min_ != nil {
n += m.Min_.Size()
}
if m.Max_ != nil {
n += m.Max_.Size()
}
return n
}
func (m *HistogramDataPoint_Sum) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *HistogramDataPoint_Min) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *HistogramDataPoint_Max) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *ExponentialHistogramDataPoint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.StartTimeUnixNano != 0 {
n += 9
}
if m.TimeUnixNano != 0 {
n += 9
}
if m.Count != 0 {
n += 9
}
if m.Sum_ != nil {
n += m.Sum_.Size()
}
if m.Scale != 0 {
n += 1 + sozMetrics(uint64(m.Scale))
}
if m.ZeroCount != 0 {
n += 9
}
l = m.Positive.Size()
n += 1 + l + sovMetrics(uint64(l))
l = m.Negative.Size()
n += 1 + l + sovMetrics(uint64(l))
if m.Flags != 0 {
n += 1 + sovMetrics(uint64(m.Flags))
}
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Min_ != nil {
n += m.Min_.Size()
}
if m.Max_ != nil {
n += m.Max_.Size()
}
if m.ZeroThreshold != 0 {
n += 9
}
return n
}
func (m *ExponentialHistogramDataPoint_Sum) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *ExponentialHistogramDataPoint_Min) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *ExponentialHistogramDataPoint_Max) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *ExponentialHistogramDataPoint_Buckets) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Offset != 0 {
n += 1 + sozMetrics(uint64(m.Offset))
}
if len(m.BucketCounts) > 0 {
l = 0
for _, e := range m.BucketCounts {
l += sovMetrics(uint64(e))
}
n += 1 + sovMetrics(uint64(l)) + l
}
return n
}
func (m *SummaryDataPoint) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.StartTimeUnixNano != 0 {
n += 9
}
if m.TimeUnixNano != 0 {
n += 9
}
if m.Count != 0 {
n += 9
}
if m.Sum != 0 {
n += 9
}
if len(m.QuantileValues) > 0 {
for _, e := range m.QuantileValues {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
if m.Flags != 0 {
n += 1 + sovMetrics(uint64(m.Flags))
}
return n
}
func (m *SummaryDataPoint_ValueAtQuantile) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Quantile != 0 {
n += 9
}
if m.Value != 0 {
n += 9
}
return n
}
func (m *Exemplar) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.TimeUnixNano != 0 {
n += 9
}
if m.Value != nil {
n += m.Value.Size()
}
l = m.SpanId.Size()
n += 1 + l + sovMetrics(uint64(l))
l = m.TraceId.Size()
n += 1 + l + sovMetrics(uint64(l))
if len(m.FilteredAttributes) > 0 {
for _, e := range m.FilteredAttributes {
l = e.Size()
n += 1 + l + sovMetrics(uint64(l))
}
}
return n
}
func (m *Exemplar_AsDouble) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func (m *Exemplar_AsInt) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
n += 9
return n
}
func sovMetrics(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *MetricsData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MetricsData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResourceMetrics) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{})
if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 1000:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeprecatedScopeMetrics = append(m.DeprecatedScopeMetrics, &ScopeMetrics{})
if err := m.DeprecatedScopeMetrics[len(m.DeprecatedScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScopeMetrics) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Metrics = append(m.Metrics, &Metric{})
if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Metric) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Unit = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Gauge{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Data = &Metric_Gauge{v}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Sum{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Data = &Metric_Sum{v}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Histogram{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Data = &Metric_Histogram{v}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &ExponentialHistogram{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Data = &Metric_ExponentialHistogram{v}
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Summary{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Data = &Metric_Summary{v}
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Metadata = append(m.Metadata, v11.KeyValue{})
if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Gauge) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Gauge: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Sum) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Sum: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
m.AggregationTemporality = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.IsMonotonic = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Histogram) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DataPoints = append(m.DataPoints, &HistogramDataPoint{})
if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
m.AggregationTemporality = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExponentialHistogram) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{})
if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
m.AggregationTemporality = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Summary) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Summary: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DataPoints = append(m.DataPoints, &SummaryDataPoint{})
if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NumberDataPoint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
m.StartTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = &NumberDataPoint_AsDouble{float64(math.Float64frombits(v))}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var v int64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = &NumberDataPoint_AsInt{v}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Flags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *HistogramDataPoint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
m.StartTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
m.Count = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 5:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Sum_ = &HistogramDataPoint_Sum{float64(math.Float64frombits(v))}
case 6:
if wireType == 1 {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.BucketCounts = append(m.BucketCounts, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 8
if elementCount != 0 && len(m.BucketCounts) == 0 {
m.BucketCounts = make([]uint64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.BucketCounts = append(m.BucketCounts, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
case 7:
if wireType == 1 {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.ExplicitBounds = append(m.ExplicitBounds, v2)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 8
if elementCount != 0 && len(m.ExplicitBounds) == 0 {
m.ExplicitBounds = make([]float64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
v2 := float64(math.Float64frombits(v))
m.ExplicitBounds = append(m.ExplicitBounds, v2)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
}
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Flags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Min_ = &HistogramDataPoint_Min{float64(math.Float64frombits(v))}
case 12:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Max_ = &HistogramDataPoint_Max{float64(math.Float64frombits(v))}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExponentialHistogramDataPoint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
m.StartTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
m.Count = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 5:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Sum_ = &ExponentialHistogramDataPoint_Sum{float64(math.Float64frombits(v))}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
m.Scale = v
case 7:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
}
m.ZeroCount = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.ZeroCount = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Positive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Negative.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Flags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 12:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Min_ = &ExponentialHistogramDataPoint_Min{float64(math.Float64frombits(v))}
case 13:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Max_ = &ExponentialHistogramDataPoint_Max{float64(math.Float64frombits(v))}
case 14:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.ZeroThreshold = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExponentialHistogramDataPoint_Buckets) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Buckets: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Buckets: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
m.Offset = v
case 2:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.BucketCounts = append(m.BucketCounts, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.BucketCounts) == 0 {
m.BucketCounts = make([]uint64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.BucketCounts = append(m.BucketCounts, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SummaryDataPoint) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
m.StartTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 4:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
m.Count = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 5:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Sum = float64(math.Float64frombits(v))
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{})
if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Flags |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Quantile = float64(math.Float64frombits(v))
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Exemplar) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = &Exemplar_AsDouble{float64(math.Float64frombits(v))}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var v int64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = &Exemplar_AsInt{v}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FilteredAttributes = append(m.FilteredAttributes, v11.KeyValue{})
if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMetrics
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMetrics
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMetrics
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/profiles/v1development/profiles.proto
package v1development
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Specifies the method of aggregating metric values, either DELTA (change since last report)
// or CUMULATIVE (total since a fixed start time).
type AggregationTemporality int32
const (
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
//* DELTA is an AggregationTemporality for a profiler which reports
//changes since last report time. Successive metrics contain aggregation of
//values from continuous and non-overlapping intervals.
//
//The values for a DELTA metric are based only on the time interval
//associated with one measurement cycle. There is no dependency on
//previous measurements like is the case for CUMULATIVE metrics.
//
//For example, consider a system measuring the number of requests that
//it receives and reports the sum of these requests every second as a
//DELTA metric:
//
//1. The system starts receiving at time=t_0.
//2. A request is received, the system measures 1 request.
//3. A request is received, the system measures 1 request.
//4. A request is received, the system measures 1 request.
//5. The 1 second collection cycle ends. A metric is exported for the
//number of requests received over the interval of time t_0 to
//t_0+1 with a value of 3.
//6. A request is received, the system measures 1 request.
//7. A request is received, the system measures 1 request.
//8. The 1 second collection cycle ends. A metric is exported for the
//number of requests received over the interval of time t_0+1 to
//t_0+2 with a value of 2.
AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
//* CUMULATIVE is an AggregationTemporality for a profiler which
//reports changes since a fixed start time. This means that current values
//of a CUMULATIVE metric depend on all previous measurements since the
//start time. Because of this, the sender is required to retain this state
//in some form. If this state is lost or invalidated, the CUMULATIVE metric
//values MUST be reset and a new fixed start time following the last
//reported measurement time sent MUST be used.
//
//For example, consider a system measuring the number of requests that
//it receives and reports the sum of these requests every second as a
//CUMULATIVE metric:
//
//1. The system starts receiving at time=t_0.
//2. A request is received, the system measures 1 request.
//3. A request is received, the system measures 1 request.
//4. A request is received, the system measures 1 request.
//5. The 1 second collection cycle ends. A metric is exported for the
//number of requests received over the interval of time t_0 to
//t_0+1 with a value of 3.
//6. A request is received, the system measures 1 request.
//7. A request is received, the system measures 1 request.
//8. The 1 second collection cycle ends. A metric is exported for the
//number of requests received over the interval of time t_0 to
//t_0+2 with a value of 5.
//9. The system experiences a fault and loses state.
//10. The system recovers and resumes receiving at time=t_1.
//11. A request is received, the system measures 1 request.
//12. The 1 second collection cycle ends. A metric is exported for the
//number of requests received over the interval of time t_1 to
//t_1+1 with a value of 1.
//
//Note: Even though, when reporting changes since last report time, using
//CUMULATIVE is valid, it is not recommended.
AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
)
var AggregationTemporality_name = map[int32]string{
0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
1: "AGGREGATION_TEMPORALITY_DELTA",
2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
}
var AggregationTemporality_value = map[string]int32{
"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
"AGGREGATION_TEMPORALITY_DELTA": 1,
"AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
}
func (x AggregationTemporality) String() string {
return proto.EnumName(AggregationTemporality_name, int32(x))
}
func (AggregationTemporality) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{0}
}
// ProfilesDictionary represents the profiles data shared across the
// entire message being sent.
//
// Note that all fields in this message MUST have a zero value encoded as the first element.
// This allows for _index fields pointing into the dictionary to use a 0 pointer value
// to indicate 'null' / 'not set'. Unless otherwise defined, a 'zero value' message value
// is one with all default field values, so as to minimize wire encoded size.
type ProfilesDictionary struct {
// Mappings from address ranges to the image/binary/library mapped
// into that address range referenced by locations via Location.mapping_index.
MappingTable []*Mapping `protobuf:"bytes,1,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"`
// Locations referenced by samples via Stack.location_indices.
LocationTable []*Location `protobuf:"bytes,2,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"`
// Functions referenced by locations via Line.function_index.
FunctionTable []*Function `protobuf:"bytes,3,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"`
// Links referenced by samples via Sample.link_index.
LinkTable []*Link `protobuf:"bytes,4,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"`
// A common table for strings referenced by various messages.
// string_table[0] must always be "".
StringTable []string `protobuf:"bytes,5,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"`
// A common table for attributes referenced by various messages.
// It is a collection of key/value pairs. Note, global attributes
// like server name can be set using the resource API. Examples of attributes:
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
// "/http/server_latency": 300
// "abc.com/myattribute": true
// "abc.com/score": 10.239
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
AttributeTable []*KeyValueAndUnit `protobuf:"bytes,6,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table,omitempty"`
// Stacks referenced by samples via Sample.stack_index.
StackTable []*Stack `protobuf:"bytes,7,rep,name=stack_table,json=stackTable,proto3" json:"stack_table,omitempty"`
}
func (m *ProfilesDictionary) Reset() { *m = ProfilesDictionary{} }
func (m *ProfilesDictionary) String() string { return proto.CompactTextString(m) }
func (*ProfilesDictionary) ProtoMessage() {}
func (*ProfilesDictionary) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{0}
}
func (m *ProfilesDictionary) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProfilesDictionary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProfilesDictionary.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ProfilesDictionary) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProfilesDictionary.Merge(m, src)
}
func (m *ProfilesDictionary) XXX_Size() int {
return m.Size()
}
func (m *ProfilesDictionary) XXX_DiscardUnknown() {
xxx_messageInfo_ProfilesDictionary.DiscardUnknown(m)
}
var xxx_messageInfo_ProfilesDictionary proto.InternalMessageInfo
func (m *ProfilesDictionary) GetMappingTable() []*Mapping {
if m != nil {
return m.MappingTable
}
return nil
}
func (m *ProfilesDictionary) GetLocationTable() []*Location {
if m != nil {
return m.LocationTable
}
return nil
}
func (m *ProfilesDictionary) GetFunctionTable() []*Function {
if m != nil {
return m.FunctionTable
}
return nil
}
func (m *ProfilesDictionary) GetLinkTable() []*Link {
if m != nil {
return m.LinkTable
}
return nil
}
func (m *ProfilesDictionary) GetStringTable() []string {
if m != nil {
return m.StringTable
}
return nil
}
func (m *ProfilesDictionary) GetAttributeTable() []*KeyValueAndUnit {
if m != nil {
return m.AttributeTable
}
return nil
}
func (m *ProfilesDictionary) GetStackTable() []*Stack {
if m != nil {
return m.StackTable
}
return nil
}
// ProfilesData represents the profiles data that can be stored in persistent storage,
// OR can be embedded by other protocols that transfer OTLP profiles data but do not
// implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type ProfilesData struct {
// An array of ResourceProfiles.
// For data coming from an SDK profiler, this array will typically contain one
// element. Host-level profilers will usually create one ResourceProfile per
// container, as well as one additional ResourceProfile grouping all samples
// from non-containerized processes.
// Other resource groupings are possible as well and clarified via
// Resource.attributes and semantic conventions.
// Tools that visualize profiles should prefer displaying
// resources_profiles[0].scope_profiles[0].profiles[0] by default.
ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"`
// One instance of ProfilesDictionary
Dictionary ProfilesDictionary `protobuf:"bytes,2,opt,name=dictionary,proto3" json:"dictionary"`
}
func (m *ProfilesData) Reset() { *m = ProfilesData{} }
func (m *ProfilesData) String() string { return proto.CompactTextString(m) }
func (*ProfilesData) ProtoMessage() {}
func (*ProfilesData) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{1}
}
func (m *ProfilesData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ProfilesData) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProfilesData.Merge(m, src)
}
func (m *ProfilesData) XXX_Size() int {
return m.Size()
}
func (m *ProfilesData) XXX_DiscardUnknown() {
xxx_messageInfo_ProfilesData.DiscardUnknown(m)
}
var xxx_messageInfo_ProfilesData proto.InternalMessageInfo
func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles {
if m != nil {
return m.ResourceProfiles
}
return nil
}
func (m *ProfilesData) GetDictionary() ProfilesDictionary {
if m != nil {
return m.Dictionary
}
return ProfilesDictionary{}
}
// A collection of ScopeProfiles from a Resource.
type ResourceProfiles struct {
// The resource for the profiles in this message.
// If this field is not set then no resource info is known.
Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
// A list of ScopeProfiles that originate from a resource.
ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_profiles" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} }
func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) }
func (*ResourceProfiles) ProtoMessage() {}
func (*ResourceProfiles) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{2}
}
func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ResourceProfiles) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceProfiles.Merge(m, src)
}
func (m *ResourceProfiles) XXX_Size() int {
return m.Size()
}
func (m *ResourceProfiles) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceProfiles.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo
func (m *ResourceProfiles) GetResource() v1.Resource {
if m != nil {
return m.Resource
}
return v1.Resource{}
}
func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles {
if m != nil {
return m.ScopeProfiles
}
return nil
}
func (m *ResourceProfiles) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A collection of Profiles produced by an InstrumentationScope.
type ScopeProfiles struct {
// The instrumentation scope information for the profiles in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
// A list of Profiles that originate from an instrumentation scope.
Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the profile data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all profiles in the "profiles" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} }
func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) }
func (*ScopeProfiles) ProtoMessage() {}
func (*ScopeProfiles) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{3}
}
func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ScopeProfiles) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScopeProfiles.Merge(m, src)
}
func (m *ScopeProfiles) XXX_Size() int {
return m.Size()
}
func (m *ScopeProfiles) XXX_DiscardUnknown() {
xxx_messageInfo_ScopeProfiles.DiscardUnknown(m)
}
var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo
func (m *ScopeProfiles) GetScope() v11.InstrumentationScope {
if m != nil {
return m.Scope
}
return v11.InstrumentationScope{}
}
func (m *ScopeProfiles) GetProfiles() []*Profile {
if m != nil {
return m.Profiles
}
return nil
}
func (m *ScopeProfiles) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// Represents a complete profile, including sample types, samples, mappings to
// binaries, stacks, locations, functions, string table, and additional
// metadata. It modifies and annotates pprof Profile with OpenTelemetry
// specific fields.
//
// Note that whilst fields in this message retain the name and field id from pprof in most cases
// for ease of understanding data migration, it is not intended that pprof:Profile and
// OpenTelemetry:Profile encoding be wire compatible.
type Profile struct {
// The type and unit of all Sample.values in this profile.
// For a cpu or off-cpu profile this might be:
// ["cpu","nanoseconds"] or ["off_cpu","nanoseconds"]
// For a heap profile, this might be:
// ["allocated_objects","count"] or ["allocated_space","bytes"],
SampleType ValueType `protobuf:"bytes,1,opt,name=sample_type,json=sampleType,proto3" json:"sample_type"`
// The set of samples recorded in this profile.
Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"`
// Time of collection (UTC) represented as nanoseconds past the epoch.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// Duration of the profile, if a duration makes sense.
DurationNano uint64 `protobuf:"varint,4,opt,name=duration_nano,json=durationNano,proto3" json:"duration_nano,omitempty"`
// The kind of events between sampled occurrences.
// e.g [ "cpu","cycles" ] or [ "heap","bytes" ]
PeriodType ValueType `protobuf:"bytes,5,opt,name=period_type,json=periodType,proto3" json:"period_type"`
// The number of events between sampled occurrences.
Period int64 `protobuf:"varint,6,opt,name=period,proto3" json:"period,omitempty"`
// Free-form text associated with the profile. The text is displayed as is
// to the user by the tools that read profiles (e.g. by pprof). This field
// should not be used to store any machine-readable information, it is only
// for human-friendly content. The profile must stay functional if this field
// is cleaned.
CommentStrindices []int32 `protobuf:"varint,7,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"`
// A globally unique identifier for a profile. The ID is a 16-byte array. An ID with
// all zeroes is considered invalid. It may be used for deduplication and signal
// correlation purposes. It is acceptable to treat two profiles with different values
// in this field as not equal, even if they represented the same object at an earlier
// time.
// This field is optional; an ID may be assigned to an ID-less profile in a later step.
ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,8,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"`
// dropped_attributes_count is the number of attributes that were discarded. Attributes
// can be discarded because their keys are too long or because there are too many
// attributes. If this value is 0, then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,9,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present]
OriginalPayloadFormat string `protobuf:"bytes,10,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"`
// Original payload can be stored in this field. This can be useful for users who want to get the original payload.
// Formats such as JFR are highly extensible and can contain more information than what is defined in this spec.
// Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload.
// If the original payload is in pprof format, it SHOULD not be included in this field.
// The field is optional, however if it is present then equivalent converted data should be populated in other fields
// of this message as far as is practicable.
OriginalPayload []byte `protobuf:"bytes,11,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"`
// References to attributes in attribute_table. [optional]
AttributeIndices []int32 `protobuf:"varint,12,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
}
func (m *Profile) Reset() { *m = Profile{} }
func (m *Profile) String() string { return proto.CompactTextString(m) }
func (*Profile) ProtoMessage() {}
func (*Profile) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{4}
}
func (m *Profile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Profile.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Profile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Profile.Merge(m, src)
}
func (m *Profile) XXX_Size() int {
return m.Size()
}
func (m *Profile) XXX_DiscardUnknown() {
xxx_messageInfo_Profile.DiscardUnknown(m)
}
var xxx_messageInfo_Profile proto.InternalMessageInfo
func (m *Profile) GetSampleType() ValueType {
if m != nil {
return m.SampleType
}
return ValueType{}
}
func (m *Profile) GetSample() []*Sample {
if m != nil {
return m.Sample
}
return nil
}
func (m *Profile) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *Profile) GetDurationNano() uint64 {
if m != nil {
return m.DurationNano
}
return 0
}
func (m *Profile) GetPeriodType() ValueType {
if m != nil {
return m.PeriodType
}
return ValueType{}
}
func (m *Profile) GetPeriod() int64 {
if m != nil {
return m.Period
}
return 0
}
func (m *Profile) GetCommentStrindices() []int32 {
if m != nil {
return m.CommentStrindices
}
return nil
}
func (m *Profile) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
func (m *Profile) GetOriginalPayloadFormat() string {
if m != nil {
return m.OriginalPayloadFormat
}
return ""
}
func (m *Profile) GetOriginalPayload() []byte {
if m != nil {
return m.OriginalPayload
}
return nil
}
func (m *Profile) GetAttributeIndices() []int32 {
if m != nil {
return m.AttributeIndices
}
return nil
}
// A pointer from a profile Sample to a trace Span.
// Connects a profile sample to a trace span, identified by unique trace and span IDs.
type Link struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
// A unique identifier for the linked span. The ID is an 8-byte array.
SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
}
func (m *Link) Reset() { *m = Link{} }
func (m *Link) String() string { return proto.CompactTextString(m) }
func (*Link) ProtoMessage() {}
func (*Link) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{5}
}
func (m *Link) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Link.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Link) XXX_Merge(src proto.Message) {
xxx_messageInfo_Link.Merge(m, src)
}
func (m *Link) XXX_Size() int {
return m.Size()
}
func (m *Link) XXX_DiscardUnknown() {
xxx_messageInfo_Link.DiscardUnknown(m)
}
var xxx_messageInfo_Link proto.InternalMessageInfo
// ValueType describes the type and units of a value, with an optional aggregation temporality.
type ValueType struct {
TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"`
UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"`
}
func (m *ValueType) Reset() { *m = ValueType{} }
func (m *ValueType) String() string { return proto.CompactTextString(m) }
func (*ValueType) ProtoMessage() {}
func (*ValueType) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{6}
}
func (m *ValueType) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ValueType.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ValueType) XXX_Merge(src proto.Message) {
xxx_messageInfo_ValueType.Merge(m, src)
}
func (m *ValueType) XXX_Size() int {
return m.Size()
}
func (m *ValueType) XXX_DiscardUnknown() {
xxx_messageInfo_ValueType.DiscardUnknown(m)
}
var xxx_messageInfo_ValueType proto.InternalMessageInfo
func (m *ValueType) GetTypeStrindex() int32 {
if m != nil {
return m.TypeStrindex
}
return 0
}
func (m *ValueType) GetUnitStrindex() int32 {
if m != nil {
return m.UnitStrindex
}
return 0
}
func (m *ValueType) GetAggregationTemporality() AggregationTemporality {
if m != nil {
return m.AggregationTemporality
}
return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
// Each Sample records values encountered in some program context. The program
// context is typically a stack trace, perhaps augmented with auxiliary
// information like the thread-id, some indicator of a higher level request
// being handled etc.
//
// A Sample MUST have have at least one values or timestamps_unix_nano entry. If
// both fields are populated, they MUST contain the same number of elements, and
// the elements at the same index MUST refer to the same event.
//
// Examples of different ways of representing a sample with the total value of 10:
//
// Report of a stacktrace at 10 timestamps (consumers must assume the value is 1 for each point):
//
// values: []
// timestamps_unix_nano: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
//
// Report of a stacktrace with an aggregated value without timestamps:
//
// values: [10]
// timestamps_unix_nano: []
//
// Report of a stacktrace at 4 timestamps where each point records a specific value:
//
// values: [2, 2, 3, 3]
// timestamps_unix_nano: [1, 2, 3, 4]
type Sample struct {
// Reference to stack in ProfilesDictionary.stack_table.
StackIndex int32 `protobuf:"varint,1,opt,name=stack_index,json=stackIndex,proto3" json:"stack_index,omitempty"`
// The type and unit of each value is defined by Profile.sample_type.
Values []int64 `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"`
// References to attributes in ProfilesDictionary.attribute_table. [optional]
AttributeIndices []int32 `protobuf:"varint,3,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
// Reference to link in ProfilesDictionary.link_table. [optional]
// It can be unset / set to 0 if no link exists, as link_table[0] is always a 'null' default value.
LinkIndex int32 `protobuf:"varint,4,opt,name=link_index,json=linkIndex,proto3" json:"link_index,omitempty"`
// Timestamps associated with Sample represented in nanoseconds. These
// timestamps should fall within the Profile's time range.
TimestampsUnixNano []uint64 `protobuf:"fixed64,5,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{7}
}
func (m *Sample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Sample) XXX_Merge(src proto.Message) {
xxx_messageInfo_Sample.Merge(m, src)
}
func (m *Sample) XXX_Size() int {
return m.Size()
}
func (m *Sample) XXX_DiscardUnknown() {
xxx_messageInfo_Sample.DiscardUnknown(m)
}
var xxx_messageInfo_Sample proto.InternalMessageInfo
func (m *Sample) GetStackIndex() int32 {
if m != nil {
return m.StackIndex
}
return 0
}
func (m *Sample) GetValues() []int64 {
if m != nil {
return m.Values
}
return nil
}
func (m *Sample) GetAttributeIndices() []int32 {
if m != nil {
return m.AttributeIndices
}
return nil
}
func (m *Sample) GetLinkIndex() int32 {
if m != nil {
return m.LinkIndex
}
return 0
}
func (m *Sample) GetTimestampsUnixNano() []uint64 {
if m != nil {
return m.TimestampsUnixNano
}
return nil
}
// Describes the mapping of a binary in memory, including its address range,
// file offset, and metadata like build ID
type Mapping struct {
// Address at which the binary (or DLL) is loaded into memory.
MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"`
// The limit of the address range occupied by this mapping.
MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"`
// Offset in the binary that corresponds to the first mapped address.
FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"`
// The object this entry is loaded from. This can be a filename on
// disk for the main binary and shared libraries, or virtual
// abstractions like "[vdso]".
FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
// References to attributes in ProfilesDictionary.attribute_table. [optional]
AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
}
func (m *Mapping) Reset() { *m = Mapping{} }
func (m *Mapping) String() string { return proto.CompactTextString(m) }
func (*Mapping) ProtoMessage() {}
func (*Mapping) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{8}
}
func (m *Mapping) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Mapping.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Mapping) XXX_Merge(src proto.Message) {
xxx_messageInfo_Mapping.Merge(m, src)
}
func (m *Mapping) XXX_Size() int {
return m.Size()
}
func (m *Mapping) XXX_DiscardUnknown() {
xxx_messageInfo_Mapping.DiscardUnknown(m)
}
var xxx_messageInfo_Mapping proto.InternalMessageInfo
func (m *Mapping) GetMemoryStart() uint64 {
if m != nil {
return m.MemoryStart
}
return 0
}
func (m *Mapping) GetMemoryLimit() uint64 {
if m != nil {
return m.MemoryLimit
}
return 0
}
func (m *Mapping) GetFileOffset() uint64 {
if m != nil {
return m.FileOffset
}
return 0
}
func (m *Mapping) GetFilenameStrindex() int32 {
if m != nil {
return m.FilenameStrindex
}
return 0
}
func (m *Mapping) GetAttributeIndices() []int32 {
if m != nil {
return m.AttributeIndices
}
return nil
}
// A Stack represents a stack trace as a list of locations.
type Stack struct {
// References to locations in ProfilesDictionary.location_table.
// The first location is the leaf frame.
LocationIndices []int32 `protobuf:"varint,1,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"`
}
func (m *Stack) Reset() { *m = Stack{} }
func (m *Stack) String() string { return proto.CompactTextString(m) }
func (*Stack) ProtoMessage() {}
func (*Stack) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{9}
}
func (m *Stack) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Stack.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Stack) XXX_Merge(src proto.Message) {
xxx_messageInfo_Stack.Merge(m, src)
}
func (m *Stack) XXX_Size() int {
return m.Size()
}
func (m *Stack) XXX_DiscardUnknown() {
xxx_messageInfo_Stack.DiscardUnknown(m)
}
var xxx_messageInfo_Stack proto.InternalMessageInfo
func (m *Stack) GetLocationIndices() []int32 {
if m != nil {
return m.LocationIndices
}
return nil
}
// Describes function and line table debug information.
type Location struct {
// Reference to mapping in ProfilesDictionary.mapping_table.
// It can be unset / set to 0 if the mapping is unknown or not applicable for
// this profile type, as mapping_table[0] is always a 'null' default mapping.
MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3" json:"mapping_index,omitempty"`
// The instruction address for this location, if available. It
// should be within [Mapping.memory_start...Mapping.memory_limit]
// for the corresponding mapping. A non-leaf address may be in the
// middle of a call instruction. It is up to display tools to find
// the beginning of the instruction if necessary.
Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"`
// Multiple line indicates this location has inlined functions,
// where the last entry represents the caller into which the
// preceding entries were inlined.
//
// E.g., if memcpy() is inlined into printf:
// line[0].function_name == "memcpy"
// line[1].function_name == "printf"
Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"`
// References to attributes in ProfilesDictionary.attribute_table. [optional]
AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{10}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return m.Size()
}
func (m *Location) XXX_DiscardUnknown() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
func (m *Location) GetMappingIndex() int32 {
if m != nil {
return m.MappingIndex
}
return 0
}
func (m *Location) GetAddress() uint64 {
if m != nil {
return m.Address
}
return 0
}
func (m *Location) GetLine() []*Line {
if m != nil {
return m.Line
}
return nil
}
func (m *Location) GetAttributeIndices() []int32 {
if m != nil {
return m.AttributeIndices
}
return nil
}
// Details a specific line in a source code, linked to a function.
type Line struct {
// Reference to function in ProfilesDictionary.function_table.
FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"`
// Line number in source code. 0 means unset.
Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"`
// Column number in source code. 0 means unset.
Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"`
}
func (m *Line) Reset() { *m = Line{} }
func (m *Line) String() string { return proto.CompactTextString(m) }
func (*Line) ProtoMessage() {}
func (*Line) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{11}
}
func (m *Line) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Line.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Line) XXX_Merge(src proto.Message) {
xxx_messageInfo_Line.Merge(m, src)
}
func (m *Line) XXX_Size() int {
return m.Size()
}
func (m *Line) XXX_DiscardUnknown() {
xxx_messageInfo_Line.DiscardUnknown(m)
}
var xxx_messageInfo_Line proto.InternalMessageInfo
func (m *Line) GetFunctionIndex() int32 {
if m != nil {
return m.FunctionIndex
}
return 0
}
func (m *Line) GetLine() int64 {
if m != nil {
return m.Line
}
return 0
}
func (m *Line) GetColumn() int64 {
if m != nil {
return m.Column
}
return 0
}
// Describes a function, including its human-readable name, system name,
// source file, and starting line number in the source.
type Function struct {
// Function name. Empty string if not available.
NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"`
// Function name, as identified by the system. For instance,
// it can be a C++ mangled name. Empty string if not available.
SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"`
// Source file containing the function. Empty string if not available.
FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"`
// Line number in source file. 0 means unset.
StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"`
}
func (m *Function) Reset() { *m = Function{} }
func (m *Function) String() string { return proto.CompactTextString(m) }
func (*Function) ProtoMessage() {}
func (*Function) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{12}
}
func (m *Function) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Function.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Function) XXX_Merge(src proto.Message) {
xxx_messageInfo_Function.Merge(m, src)
}
func (m *Function) XXX_Size() int {
return m.Size()
}
func (m *Function) XXX_DiscardUnknown() {
xxx_messageInfo_Function.DiscardUnknown(m)
}
var xxx_messageInfo_Function proto.InternalMessageInfo
func (m *Function) GetNameStrindex() int32 {
if m != nil {
return m.NameStrindex
}
return 0
}
func (m *Function) GetSystemNameStrindex() int32 {
if m != nil {
return m.SystemNameStrindex
}
return 0
}
func (m *Function) GetFilenameStrindex() int32 {
if m != nil {
return m.FilenameStrindex
}
return 0
}
func (m *Function) GetStartLine() int64 {
if m != nil {
return m.StartLine
}
return 0
}
// A custom 'dictionary native' style of encoding attributes which is more convenient
// for profiles than opentelemetry.proto.common.v1.KeyValue
// Specifically, uses the string table for keys and allows optional unit information.
type KeyValueAndUnit struct {
KeyStrindex int32 `protobuf:"varint,1,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"`
Value v11.AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value"`
UnitStrindex int32 `protobuf:"varint,3,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"`
}
func (m *KeyValueAndUnit) Reset() { *m = KeyValueAndUnit{} }
func (m *KeyValueAndUnit) String() string { return proto.CompactTextString(m) }
func (*KeyValueAndUnit) ProtoMessage() {}
func (*KeyValueAndUnit) Descriptor() ([]byte, []int) {
return fileDescriptor_ddd0cf081a2fe76f, []int{13}
}
func (m *KeyValueAndUnit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *KeyValueAndUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_KeyValueAndUnit.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *KeyValueAndUnit) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyValueAndUnit.Merge(m, src)
}
func (m *KeyValueAndUnit) XXX_Size() int {
return m.Size()
}
func (m *KeyValueAndUnit) XXX_DiscardUnknown() {
xxx_messageInfo_KeyValueAndUnit.DiscardUnknown(m)
}
var xxx_messageInfo_KeyValueAndUnit proto.InternalMessageInfo
func (m *KeyValueAndUnit) GetKeyStrindex() int32 {
if m != nil {
return m.KeyStrindex
}
return 0
}
func (m *KeyValueAndUnit) GetValue() v11.AnyValue {
if m != nil {
return m.Value
}
return v11.AnyValue{}
}
func (m *KeyValueAndUnit) GetUnitStrindex() int32 {
if m != nil {
return m.UnitStrindex
}
return 0
}
func init() {
proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value)
proto.RegisterType((*ProfilesDictionary)(nil), "opentelemetry.proto.profiles.v1development.ProfilesDictionary")
proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData")
proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles")
proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles")
proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile")
proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link")
proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType")
proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample")
proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping")
proto.RegisterType((*Stack)(nil), "opentelemetry.proto.profiles.v1development.Stack")
proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location")
proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line")
proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function")
proto.RegisterType((*KeyValueAndUnit)(nil), "opentelemetry.proto.profiles.v1development.KeyValueAndUnit")
}
func init() {
proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f)
}
var fileDescriptor_ddd0cf081a2fe76f = []byte{
// 1475 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x97, 0x5f, 0x6f, 0x13, 0xc7,
0x16, 0xc0, 0xb3, 0xf1, 0xbf, 0xf8, 0xd8, 0x4e, 0xcc, 0x88, 0x1b, 0x2c, 0x24, 0x82, 0x31, 0xf7,
0x5e, 0x4c, 0xae, 0x48, 0x48, 0xb8, 0xad, 0x40, 0x54, 0x55, 0x9d, 0x38, 0xa0, 0x85, 0x90, 0xa4,
0x13, 0x07, 0x95, 0x16, 0x69, 0x3b, 0xf1, 0x4e, 0xdc, 0x55, 0x76, 0x67, 0x57, 0xbb, 0xe3, 0x08,
0xab, 0x5f, 0xa1, 0x0f, 0xfd, 0x04, 0xfd, 0x00, 0x95, 0xfa, 0x0d, 0x2a, 0xf5, 0x15, 0xf5, 0x89,
0xf6, 0xa1, 0x42, 0x7d, 0xa0, 0x15, 0xbc, 0xd0, 0x6f, 0x51, 0xcd, 0x9f, 0x5d, 0xff, 0x89, 0x23,
0xba, 0x55, 0x5f, 0xac, 0x9d, 0x33, 0x67, 0x7e, 0x67, 0xce, 0x99, 0x33, 0x67, 0x8e, 0xe1, 0x8e,
0x1f, 0x50, 0xc6, 0xa9, 0x4b, 0x3d, 0xca, 0xc3, 0xc1, 0x6a, 0x10, 0xfa, 0xdc, 0x17, 0xbf, 0x47,
0x8e, 0x4b, 0xa3, 0xd5, 0x93, 0x35, 0x9b, 0x9e, 0x50, 0xd7, 0x0f, 0x3c, 0xca, 0x78, 0x22, 0x5e,
0x91, 0x5a, 0x68, 0x79, 0x6c, 0xa9, 0x12, 0xae, 0x24, 0x3a, 0x63, 0x4b, 0x2f, 0x9e, 0xef, 0xf9,
0x3d, 0x5f, 0xc1, 0xc5, 0x97, 0x52, 0xbe, 0xb8, 0x3c, 0xcd, 0x78, 0xd7, 0xf7, 0x3c, 0x9f, 0xad,
0x9e, 0xac, 0xe9, 0x2f, 0xad, 0xbb, 0x32, 0x4d, 0x37, 0xa4, 0x91, 0xdf, 0x0f, 0xbb, 0x54, 0x68,
0xc7, 0xdf, 0x4a, 0xbf, 0xf1, 0x4b, 0x16, 0xd0, 0x9e, 0xde, 0x4c, 0xdb, 0xe9, 0x72, 0xc7, 0x67,
0x24, 0x1c, 0xa0, 0x4f, 0xa0, 0xe2, 0x91, 0x20, 0x70, 0x58, 0xcf, 0xe2, 0xe4, 0xd0, 0xa5, 0x35,
0xa3, 0x9e, 0x69, 0x96, 0xd6, 0x6f, 0xad, 0xfc, 0x75, 0x67, 0x56, 0x1e, 0x29, 0x00, 0x2e, 0x6b,
0x52, 0x47, 0x80, 0xd0, 0x67, 0x30, 0xef, 0xfa, 0x5d, 0x22, 0x0c, 0x69, 0xf4, 0xac, 0x44, 0xff,
0x3f, 0x0d, 0x7a, 0x5b, 0x13, 0x70, 0x25, 0x66, 0x25, 0xf0, 0xa3, 0x3e, 0xeb, 0x8e, 0xc0, 0x33,
0xe9, 0xe1, 0xf7, 0x34, 0x01, 0x57, 0x62, 0x96, 0x82, 0xef, 0x02, 0xb8, 0x0e, 0x3b, 0xd6, 0xe0,
0xac, 0x04, 0xdf, 0x4c, 0xb5, 0x6b, 0x87, 0x1d, 0xe3, 0xa2, 0x60, 0x28, 0xe0, 0x15, 0x28, 0x47,
0x3c, 0x1c, 0xc6, 0x38, 0x57, 0xcf, 0x34, 0x8b, 0xb8, 0xa4, 0x64, 0x4a, 0xc5, 0x86, 0x05, 0xc2,
0x79, 0xe8, 0x1c, 0xf6, 0x39, 0xd5, 0x5a, 0x79, 0x69, 0xf8, 0x6e, 0x1a, 0xc3, 0x0f, 0xe9, 0xe0,
0x31, 0x71, 0xfb, 0xb4, 0xc5, 0xec, 0x03, 0xe6, 0x70, 0x3c, 0x9f, 0x30, 0x95, 0x15, 0x0c, 0xa5,
0x88, 0x93, 0x6e, 0xec, 0x5a, 0x41, 0x5a, 0x58, 0x4b, 0x63, 0x61, 0x5f, 0x2c, 0xc7, 0x20, 0x29,
0x92, 0xd9, 0xf8, 0xcd, 0x80, 0x72, 0x92, 0x58, 0x84, 0x13, 0xe4, 0xc0, 0xb9, 0x38, 0xf7, 0xac,
0x98, 0xa2, 0xd3, 0xea, 0x83, 0x34, 0xa6, 0xb0, 0x86, 0xc4, 0x70, 0x5c, 0x0d, 0x27, 0x24, 0xc8,
0x06, 0xb0, 0x93, 0x5c, 0xae, 0xcd, 0xd6, 0x8d, 0x66, 0x69, 0xfd, 0xc3, 0x34, 0x36, 0x4e, 0xdf,
0x88, 0x8d, 0xec, 0xf3, 0x57, 0x97, 0x67, 0xf0, 0x08, 0xb7, 0xf1, 0xd6, 0x80, 0xea, 0xe4, 0x66,
0xd0, 0x43, 0x98, 0x8b, 0xb7, 0x53, 0x33, 0xa4, 0xe1, 0xeb, 0x53, 0x0d, 0x27, 0xd7, 0xf0, 0x64,
0x2d, 0xf1, 0x48, 0xdb, 0x48, 0x00, 0xe8, 0x73, 0x98, 0x8f, 0xba, 0x7e, 0x30, 0x12, 0x2f, 0x75,
0x57, 0xee, 0xa4, 0x3a, 0x1a, 0x41, 0x48, 0x82, 0x55, 0x89, 0x46, 0x87, 0xe8, 0x12, 0x40, 0xd4,
0xfd, 0x82, 0x7a, 0xc4, 0xea, 0x87, 0x6e, 0x2d, 0x53, 0x37, 0x9a, 0x45, 0x5c, 0x54, 0x92, 0x83,
0xd0, 0x7d, 0x90, 0x9f, 0x7b, 0x5b, 0xa8, 0xfe, 0x51, 0x68, 0xbc, 0x30, 0xa0, 0x32, 0xc6, 0x41,
0xbb, 0x90, 0x93, 0x24, 0xed, 0xe4, 0xf4, 0xc2, 0xa0, 0x2b, 0xd3, 0xc9, 0xda, 0x8a, 0xc9, 0x22,
0x1e, 0xf6, 0xc5, 0x7e, 0xe4, 0x6d, 0x95, 0x2c, 0xed, 0xae, 0xe2, 0xa0, 0x5d, 0x98, 0x9b, 0xf0,
0xf2, 0xd6, 0xdf, 0x38, 0x31, 0x9c, 0x40, 0xde, 0xe1, 0x5a, 0xe3, 0xa7, 0x1c, 0x14, 0xf4, 0x22,
0xf4, 0x14, 0x4a, 0x11, 0xf1, 0x02, 0x97, 0x5a, 0x7c, 0x90, 0xb8, 0xf4, 0x5e, 0x1a, 0xf3, 0xf2,
0x7a, 0x75, 0x06, 0x89, 0x53, 0xa0, 0x78, 0x42, 0x82, 0x1e, 0x40, 0x5e, 0x8d, 0xb4, 0x5f, 0xeb,
0xa9, 0x4e, 0x4f, 0xae, 0xc4, 0x9a, 0x80, 0xfe, 0x0d, 0xf3, 0xdc, 0xf1, 0xa8, 0xd5, 0x67, 0xce,
0x33, 0x8b, 0x11, 0xe6, 0x4b, 0xc7, 0xf2, 0xb8, 0x2c, 0xa4, 0x07, 0xcc, 0x79, 0xb6, 0x43, 0x98,
0x8f, 0xae, 0x42, 0xc5, 0xee, 0x87, 0xaa, 0xc6, 0x4a, 0xa5, 0x6c, 0xdd, 0x68, 0x66, 0x71, 0x39,
0x16, 0x4a, 0xa5, 0xa7, 0x50, 0x0a, 0x68, 0xe8, 0xf8, 0xb6, 0x72, 0x3a, 0xf7, 0x0f, 0x38, 0xad,
0x78, 0xd2, 0xe9, 0x45, 0xc8, 0xab, 0x51, 0x2d, 0x5f, 0x37, 0x9a, 0x19, 0xac, 0x47, 0xe8, 0x06,
0x20, 0x91, 0x15, 0x94, 0x71, 0x4b, 0xd6, 0x39, 0xdb, 0xe9, 0xd2, 0x48, 0x56, 0x9c, 0x1c, 0x3e,
0xa7, 0x67, 0xf6, 0x93, 0x09, 0x74, 0x08, 0xa0, 0x8d, 0x5b, 0x8e, 0x5d, 0x9b, 0xab, 0x1b, 0xcd,
0xf2, 0xc6, 0xa6, 0x30, 0xf6, 0xeb, 0xab, 0xcb, 0x77, 0x7b, 0xfe, 0xc4, 0x6e, 0x1d, 0xf1, 0x2c,
0xba, 0x2e, 0xed, 0x72, 0x3f, 0x5c, 0x0d, 0x6c, 0xc2, 0xc9, 0xaa, 0xc3, 0x38, 0x0d, 0x19, 0x71,
0x57, 0xc5, 0x28, 0xce, 0x12, 0xb3, 0x8d, 0x8b, 0x1a, 0x6b, 0xda, 0xe8, 0x36, 0xd4, 0xec, 0xd0,
0x0f, 0x02, 0x6a, 0x5b, 0x49, 0x5d, 0x8c, 0xac, 0xae, 0xdf, 0x67, 0xbc, 0x56, 0xac, 0x1b, 0xcd,
0x0a, 0x5e, 0xd4, 0xf3, 0xad, 0x64, 0x7a, 0x53, 0xcc, 0xa2, 0xf7, 0xe1, 0x82, 0x1f, 0x3a, 0x3d,
0x87, 0x11, 0xd7, 0x0a, 0xc8, 0xc0, 0xf5, 0x89, 0x6d, 0x1d, 0xf9, 0xa1, 0x47, 0x78, 0x0d, 0x64,
0xbe, 0xfd, 0x2b, 0x9e, 0xde, 0x53, 0xb3, 0xf7, 0xe4, 0x24, 0xba, 0x0e, 0xd5, 0xc9, 0x75, 0xb5,
0x92, 0xf0, 0x0d, 0x2f, 0x4c, 0x2c, 0x40, 0xff, 0x83, 0x73, 0xc3, 0x07, 0x20, 0x0e, 0x57, 0x59,
0x86, 0xab, 0x9a, 0x4c, 0x98, 0x4a, 0xde, 0xf8, 0xc1, 0x80, 0xac, 0x78, 0x64, 0xd0, 0x53, 0x98,
0xe3, 0x21, 0xe9, 0xca, 0xa0, 0x19, 0x32, 0x68, 0x2d, 0x1d, 0xb4, 0x3b, 0xe9, 0x83, 0xd6, 0x11,
0x24, 0xb3, 0x8d, 0x0b, 0x12, 0x69, 0xda, 0xe8, 0x09, 0x14, 0xa2, 0x80, 0x30, 0x01, 0x9f, 0x95,
0xf0, 0x8f, 0x34, 0xfc, 0x76, 0x7a, 0xf8, 0x7e, 0x40, 0x98, 0xd9, 0xc6, 0x79, 0x01, 0x34, 0xed,
0xc6, 0xcf, 0x06, 0x14, 0x93, 0xb4, 0x12, 0x79, 0x2c, 0x72, 0x53, 0x67, 0x0a, 0x7d, 0x26, 0x7d,
0xc9, 0xe1, 0xb2, 0x10, 0xee, 0x6b, 0x99, 0x50, 0xea, 0x33, 0x87, 0x0f, 0x95, 0x66, 0x95, 0x92,
0x10, 0x26, 0x4a, 0x5f, 0xc2, 0x05, 0xd2, 0xeb, 0x85, 0xb4, 0xa7, 0x1b, 0x0f, 0xea, 0x05, 0x7e,
0x48, 0x5c, 0x87, 0x0f, 0xe4, 0x05, 0x9a, 0x5f, 0xdf, 0x48, 0x93, 0xf8, 0xad, 0x21, 0xaa, 0x33,
0x24, 0xe1, 0x45, 0x32, 0x55, 0x2e, 0x8e, 0x25, 0xaf, 0xee, 0x31, 0xba, 0x1c, 0xbf, 0xb4, 0xa3,
0xfe, 0xa8, 0x67, 0xd3, 0x94, 0x1b, 0x5d, 0x84, 0xfc, 0x89, 0xf0, 0x5f, 0x15, 0xc1, 0x0c, 0xd6,
0xa3, 0xe9, 0x79, 0x90, 0x99, 0x9e, 0x07, 0xa2, 0xf4, 0xc9, 0x4e, 0x45, 0x19, 0xc9, 0x4a, 0x23,
0xb2, 0xef, 0x50, 0x36, 0x6e, 0xc2, 0x79, 0x51, 0x2e, 0x22, 0x4e, 0xbc, 0x20, 0x1a, 0x29, 0x25,
0xa2, 0xff, 0xc8, 0x63, 0x34, 0x9c, 0x8b, 0x0b, 0x4a, 0xe3, 0x47, 0x03, 0x0a, 0xba, 0x9d, 0x13,
0x5d, 0x8b, 0x47, 0x3d, 0x3f, 0x1c, 0x58, 0x11, 0x27, 0x21, 0x97, 0x3e, 0x64, 0x71, 0x49, 0xc9,
0xf6, 0x85, 0x68, 0x44, 0xc5, 0x75, 0x3c, 0x87, 0xcb, 0x13, 0x49, 0x54, 0xb6, 0x85, 0x48, 0x04,
0x42, 0xde, 0x6a, 0xff, 0xe8, 0x28, 0xa2, 0x5c, 0x1e, 0x42, 0x16, 0x83, 0x10, 0xed, 0x4a, 0x89,
0x70, 0x58, 0x8c, 0x18, 0xf1, 0x46, 0xce, 0x5f, 0xb9, 0x52, 0x8d, 0x27, 0x92, 0xe3, 0x9d, 0x1a,
0x9d, 0xdc, 0x19, 0xb7, 0x64, 0x1d, 0x72, 0xb2, 0x5d, 0x11, 0xd7, 0x30, 0x69, 0x45, 0xe3, 0x45,
0x86, 0x5c, 0xb4, 0x10, 0xcb, 0xe3, 0x35, 0xdf, 0x1b, 0x30, 0x17, 0x37, 0x9d, 0x22, 0xe3, 0xe2,
0xe6, 0x78, 0x2c, 0x2d, 0xb5, 0x50, 0x05, 0xb9, 0x06, 0x05, 0x62, 0xdb, 0x21, 0x8d, 0x22, 0xed,
0x7e, 0x3c, 0x44, 0x6d, 0xc8, 0xba, 0x0e, 0x8b, 0x5b, 0xd3, 0xb4, 0x1d, 0x24, 0xc5, 0x72, 0xf5,
0x74, 0x97, 0xb3, 0x67, 0xb8, 0xfc, 0x44, 0xd6, 0x05, 0x8a, 0xfe, 0x33, 0xd2, 0x1f, 0x8f, 0x6e,
0x3d, 0xe9, 0x74, 0xd5, 0xde, 0x91, 0xde, 0xe1, 0xac, 0x2c, 0xdd, 0xca, 0xde, 0x22, 0xe4, 0xbb,
0xbe, 0xdb, 0xf7, 0x98, 0x3c, 0xab, 0x0c, 0xd6, 0xa3, 0xc6, 0x77, 0x06, 0xcc, 0xc5, 0x1d, 0xb3,
0x88, 0xcc, 0xf8, 0x81, 0xe9, 0xc8, 0x8c, 0x1d, 0xd6, 0x4d, 0x38, 0x1f, 0x0d, 0x22, 0x4e, 0x3d,
0x6b, 0x5c, 0x57, 0xdd, 0x5b, 0xa4, 0xe6, 0x76, 0x26, 0x8e, 0xf7, 0x74, 0x2e, 0x64, 0xce, 0xc8,
0x05, 0xf1, 0xee, 0x8b, 0x2c, 0xb4, 0xa4, 0x0b, 0x59, 0xb9, 0xd9, 0xa2, 0x94, 0x88, 0x10, 0x34,
0xbe, 0x31, 0x60, 0x61, 0xa2, 0x1f, 0x16, 0xf9, 0x7a, 0x4c, 0x07, 0x93, 0xbb, 0x2e, 0x1d, 0xd3,
0x41, 0x42, 0xdd, 0x84, 0x9c, 0xbc, 0x89, 0xba, 0x9b, 0xbc, 0xf6, 0x8e, 0x7e, 0xa7, 0xc5, 0x94,
0x85, 0xb8, 0xc7, 0x91, 0x6b, 0x4f, 0x97, 0xaa, 0xcc, 0xe9, 0x52, 0xb5, 0xfc, 0x95, 0x01, 0x8b,
0xd3, 0x0b, 0x0c, 0xba, 0x06, 0x57, 0x5b, 0xf7, 0xef, 0xe3, 0xad, 0xfb, 0xad, 0x8e, 0xb9, 0xbb,
0x63, 0x75, 0xb6, 0x1e, 0xed, 0xed, 0xe2, 0xd6, 0xb6, 0xd9, 0x79, 0x62, 0x1d, 0xec, 0xec, 0xef,
0x6d, 0x6d, 0x9a, 0xf7, 0xcc, 0xad, 0x76, 0x75, 0x06, 0x5d, 0x81, 0x4b, 0x67, 0x29, 0xb6, 0xb7,
0xb6, 0x3b, 0xad, 0xaa, 0x81, 0xfe, 0x0b, 0x8d, 0xb3, 0x54, 0x36, 0x0f, 0x1e, 0x1d, 0x6c, 0xb7,
0x3a, 0xe6, 0xe3, 0xad, 0xea, 0xec, 0xc6, 0x4b, 0xe3, 0xf9, 0xeb, 0x25, 0xe3, 0xc5, 0xeb, 0x25,
0xe3, 0xf7, 0xd7, 0x4b, 0xc6, 0xd7, 0x6f, 0x96, 0x66, 0x5e, 0xbc, 0x59, 0x9a, 0x79, 0xf9, 0x66,
0x69, 0x06, 0x6e, 0x38, 0x7e, 0x8a, 0xe4, 0xdd, 0xa8, 0xc4, 0xcd, 0xe3, 0x9e, 0xd0, 0xda, 0x33,
0x3e, 0xfd, 0x38, 0xf5, 0x73, 0xa1, 0xfe, 0xcf, 0xf6, 0x28, 0x3b, 0xe3, 0xbf, 0xf7, 0xb7, 0xb3,
0xcb, 0xbb, 0x01, 0x65, 0x9d, 0x04, 0x28, 0x4d, 0x25, 0xed, 0xfc, 0xca, 0xe3, 0xb5, 0xf6, 0x50,
0xf9, 0x30, 0x2f, 0x69, 0xb7, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x93, 0x0f, 0x25, 0xda, 0xdd,
0x0f, 0x00, 0x00,
}
func (m *ProfilesDictionary) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProfilesDictionary) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProfilesDictionary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.StackTable) > 0 {
for iNdEx := len(m.StackTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.StackTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
}
}
if len(m.AttributeTable) > 0 {
for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
}
if len(m.StringTable) > 0 {
for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.StringTable[iNdEx])
copy(dAtA[i:], m.StringTable[iNdEx])
i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx])))
i--
dAtA[i] = 0x2a
}
}
if len(m.LinkTable) > 0 {
for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.FunctionTable) > 0 {
for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.LocationTable) > 0 {
for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.MappingTable) > 0 {
for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ProfilesData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.Dictionary.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.ResourceProfiles) > 0 {
for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.ScopeProfiles) > 0 {
for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.Profiles) > 0 {
for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Profile) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Profile) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.AttributeIndices) > 0 {
dAtA5 := make([]byte, len(m.AttributeIndices)*10)
var j4 int
for _, num1 := range m.AttributeIndices {
num := uint64(num1)
for num >= 1<<7 {
dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j4++
}
dAtA5[j4] = uint8(num)
j4++
}
i -= j4
copy(dAtA[i:], dAtA5[:j4])
i = encodeVarintProfiles(dAtA, i, uint64(j4))
i--
dAtA[i] = 0x62
}
if len(m.OriginalPayload) > 0 {
i -= len(m.OriginalPayload)
copy(dAtA[i:], m.OriginalPayload)
i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload)))
i--
dAtA[i] = 0x5a
}
if len(m.OriginalPayloadFormat) > 0 {
i -= len(m.OriginalPayloadFormat)
copy(dAtA[i:], m.OriginalPayloadFormat)
i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat)))
i--
dAtA[i] = 0x52
}
if m.DroppedAttributesCount != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x48
}
{
size := m.ProfileId.Size()
i -= size
if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
if len(m.CommentStrindices) > 0 {
dAtA7 := make([]byte, len(m.CommentStrindices)*10)
var j6 int
for _, num1 := range m.CommentStrindices {
num := uint64(num1)
for num >= 1<<7 {
dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j6++
}
dAtA7[j6] = uint8(num)
j6++
}
i -= j6
copy(dAtA[i:], dAtA7[:j6])
i = encodeVarintProfiles(dAtA, i, uint64(j6))
i--
dAtA[i] = 0x3a
}
if m.Period != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.Period))
i--
dAtA[i] = 0x30
}
{
size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
if m.DurationNano != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNano))
i--
dAtA[i] = 0x20
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x19
}
if len(m.Sample) > 0 {
for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.SampleType.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Link) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Link) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size := m.SpanId.Size()
i -= size
if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.TraceId.Size()
i -= size
if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ValueType) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ValueType) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.AggregationTemporality != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality))
i--
dAtA[i] = 0x18
}
if m.UnitStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
i--
dAtA[i] = 0x10
}
if m.TypeStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Sample) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.TimestampsUnixNano) > 0 {
for iNdEx := len(m.TimestampsUnixNano) - 1; iNdEx >= 0; iNdEx-- {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimestampsUnixNano[iNdEx]))
}
i = encodeVarintProfiles(dAtA, i, uint64(len(m.TimestampsUnixNano)*8))
i--
dAtA[i] = 0x2a
}
if m.LinkIndex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex))
i--
dAtA[i] = 0x20
}
if len(m.AttributeIndices) > 0 {
dAtA11 := make([]byte, len(m.AttributeIndices)*10)
var j10 int
for _, num1 := range m.AttributeIndices {
num := uint64(num1)
for num >= 1<<7 {
dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j10++
}
dAtA11[j10] = uint8(num)
j10++
}
i -= j10
copy(dAtA[i:], dAtA11[:j10])
i = encodeVarintProfiles(dAtA, i, uint64(j10))
i--
dAtA[i] = 0x1a
}
if len(m.Values) > 0 {
dAtA13 := make([]byte, len(m.Values)*10)
var j12 int
for _, num1 := range m.Values {
num := uint64(num1)
for num >= 1<<7 {
dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j12++
}
dAtA13[j12] = uint8(num)
j12++
}
i -= j12
copy(dAtA[i:], dAtA13[:j12])
i = encodeVarintProfiles(dAtA, i, uint64(j12))
i--
dAtA[i] = 0x12
}
if m.StackIndex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.StackIndex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Mapping) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Mapping) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.AttributeIndices) > 0 {
dAtA15 := make([]byte, len(m.AttributeIndices)*10)
var j14 int
for _, num1 := range m.AttributeIndices {
num := uint64(num1)
for num >= 1<<7 {
dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j14++
}
dAtA15[j14] = uint8(num)
j14++
}
i -= j14
copy(dAtA[i:], dAtA15[:j14])
i = encodeVarintProfiles(dAtA, i, uint64(j14))
i--
dAtA[i] = 0x2a
}
if m.FilenameStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
i--
dAtA[i] = 0x20
}
if m.FileOffset != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset))
i--
dAtA[i] = 0x18
}
if m.MemoryLimit != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit))
i--
dAtA[i] = 0x10
}
if m.MemoryStart != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Stack) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Stack) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Stack) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.LocationIndices) > 0 {
dAtA17 := make([]byte, len(m.LocationIndices)*10)
var j16 int
for _, num1 := range m.LocationIndices {
num := uint64(num1)
for num >= 1<<7 {
dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j16++
}
dAtA17[j16] = uint8(num)
j16++
}
i -= j16
copy(dAtA[i:], dAtA17[:j16])
i = encodeVarintProfiles(dAtA, i, uint64(j16))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Location) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.AttributeIndices) > 0 {
dAtA19 := make([]byte, len(m.AttributeIndices)*10)
var j18 int
for _, num1 := range m.AttributeIndices {
num := uint64(num1)
for num >= 1<<7 {
dAtA19[j18] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j18++
}
dAtA19[j18] = uint8(num)
j18++
}
i -= j18
copy(dAtA[i:], dAtA19[:j18])
i = encodeVarintProfiles(dAtA, i, uint64(j18))
i--
dAtA[i] = 0x22
}
if len(m.Line) > 0 {
for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.Address != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.Address))
i--
dAtA[i] = 0x10
}
if m.MappingIndex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Line) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Line) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Column != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.Column))
i--
dAtA[i] = 0x18
}
if m.Line != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.Line))
i--
dAtA[i] = 0x10
}
if m.FunctionIndex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *Function) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Function) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.StartLine != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine))
i--
dAtA[i] = 0x20
}
if m.FilenameStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex))
i--
dAtA[i] = 0x18
}
if m.SystemNameStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex))
i--
dAtA[i] = 0x10
}
if m.NameStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *KeyValueAndUnit) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *KeyValueAndUnit) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *KeyValueAndUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.UnitStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex))
i--
dAtA[i] = 0x18
}
{
size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintProfiles(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if m.KeyStrindex != 0 {
i = encodeVarintProfiles(dAtA, i, uint64(m.KeyStrindex))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int {
offset -= sovProfiles(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *ProfilesDictionary) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.MappingTable) > 0 {
for _, e := range m.MappingTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.LocationTable) > 0 {
for _, e := range m.LocationTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.FunctionTable) > 0 {
for _, e := range m.FunctionTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.LinkTable) > 0 {
for _, e := range m.LinkTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.StringTable) > 0 {
for _, s := range m.StringTable {
l = len(s)
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.AttributeTable) > 0 {
for _, e := range m.AttributeTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.StackTable) > 0 {
for _, e := range m.StackTable {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
return n
}
func (m *ProfilesData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceProfiles) > 0 {
for _, e := range m.ResourceProfiles {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
l = m.Dictionary.Size()
n += 1 + l + sovProfiles(uint64(l))
return n
}
func (m *ResourceProfiles) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Resource.Size()
n += 1 + l + sovProfiles(uint64(l))
if len(m.ScopeProfiles) > 0 {
for _, e := range m.ScopeProfiles {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovProfiles(uint64(l))
}
return n
}
func (m *ScopeProfiles) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Scope.Size()
n += 1 + l + sovProfiles(uint64(l))
if len(m.Profiles) > 0 {
for _, e := range m.Profiles {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovProfiles(uint64(l))
}
return n
}
func (m *Profile) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.SampleType.Size()
n += 1 + l + sovProfiles(uint64(l))
if len(m.Sample) > 0 {
for _, e := range m.Sample {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if m.TimeUnixNano != 0 {
n += 9
}
if m.DurationNano != 0 {
n += 1 + sovProfiles(uint64(m.DurationNano))
}
l = m.PeriodType.Size()
n += 1 + l + sovProfiles(uint64(l))
if m.Period != 0 {
n += 1 + sovProfiles(uint64(m.Period))
}
if len(m.CommentStrindices) > 0 {
l = 0
for _, e := range m.CommentStrindices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
l = m.ProfileId.Size()
n += 1 + l + sovProfiles(uint64(l))
if m.DroppedAttributesCount != 0 {
n += 1 + sovProfiles(uint64(m.DroppedAttributesCount))
}
l = len(m.OriginalPayloadFormat)
if l > 0 {
n += 1 + l + sovProfiles(uint64(l))
}
l = len(m.OriginalPayload)
if l > 0 {
n += 1 + l + sovProfiles(uint64(l))
}
if len(m.AttributeIndices) > 0 {
l = 0
for _, e := range m.AttributeIndices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
return n
}
func (m *Link) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.TraceId.Size()
n += 1 + l + sovProfiles(uint64(l))
l = m.SpanId.Size()
n += 1 + l + sovProfiles(uint64(l))
return n
}
func (m *ValueType) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.TypeStrindex != 0 {
n += 1 + sovProfiles(uint64(m.TypeStrindex))
}
if m.UnitStrindex != 0 {
n += 1 + sovProfiles(uint64(m.UnitStrindex))
}
if m.AggregationTemporality != 0 {
n += 1 + sovProfiles(uint64(m.AggregationTemporality))
}
return n
}
func (m *Sample) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.StackIndex != 0 {
n += 1 + sovProfiles(uint64(m.StackIndex))
}
if len(m.Values) > 0 {
l = 0
for _, e := range m.Values {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
if len(m.AttributeIndices) > 0 {
l = 0
for _, e := range m.AttributeIndices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
if m.LinkIndex != 0 {
n += 1 + sovProfiles(uint64(m.LinkIndex))
}
if len(m.TimestampsUnixNano) > 0 {
n += 1 + sovProfiles(uint64(len(m.TimestampsUnixNano)*8)) + len(m.TimestampsUnixNano)*8
}
return n
}
func (m *Mapping) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.MemoryStart != 0 {
n += 1 + sovProfiles(uint64(m.MemoryStart))
}
if m.MemoryLimit != 0 {
n += 1 + sovProfiles(uint64(m.MemoryLimit))
}
if m.FileOffset != 0 {
n += 1 + sovProfiles(uint64(m.FileOffset))
}
if m.FilenameStrindex != 0 {
n += 1 + sovProfiles(uint64(m.FilenameStrindex))
}
if len(m.AttributeIndices) > 0 {
l = 0
for _, e := range m.AttributeIndices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
return n
}
func (m *Stack) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.LocationIndices) > 0 {
l = 0
for _, e := range m.LocationIndices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
return n
}
func (m *Location) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.MappingIndex != 0 {
n += 1 + sovProfiles(uint64(m.MappingIndex))
}
if m.Address != 0 {
n += 1 + sovProfiles(uint64(m.Address))
}
if len(m.Line) > 0 {
for _, e := range m.Line {
l = e.Size()
n += 1 + l + sovProfiles(uint64(l))
}
}
if len(m.AttributeIndices) > 0 {
l = 0
for _, e := range m.AttributeIndices {
l += sovProfiles(uint64(e))
}
n += 1 + sovProfiles(uint64(l)) + l
}
return n
}
func (m *Line) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.FunctionIndex != 0 {
n += 1 + sovProfiles(uint64(m.FunctionIndex))
}
if m.Line != 0 {
n += 1 + sovProfiles(uint64(m.Line))
}
if m.Column != 0 {
n += 1 + sovProfiles(uint64(m.Column))
}
return n
}
func (m *Function) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.NameStrindex != 0 {
n += 1 + sovProfiles(uint64(m.NameStrindex))
}
if m.SystemNameStrindex != 0 {
n += 1 + sovProfiles(uint64(m.SystemNameStrindex))
}
if m.FilenameStrindex != 0 {
n += 1 + sovProfiles(uint64(m.FilenameStrindex))
}
if m.StartLine != 0 {
n += 1 + sovProfiles(uint64(m.StartLine))
}
return n
}
func (m *KeyValueAndUnit) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.KeyStrindex != 0 {
n += 1 + sovProfiles(uint64(m.KeyStrindex))
}
l = m.Value.Size()
n += 1 + l + sovProfiles(uint64(l))
if m.UnitStrindex != 0 {
n += 1 + sovProfiles(uint64(m.UnitStrindex))
}
return n
}
func sovProfiles(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozProfiles(x uint64) (n int) {
return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ProfilesDictionary) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProfilesDictionary: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProfilesDictionary: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MappingTable = append(m.MappingTable, &Mapping{})
if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LocationTable = append(m.LocationTable, &Location{})
if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FunctionTable = append(m.FunctionTable, &Function{})
if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LinkTable = append(m.LinkTable, &Link{})
if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AttributeTable = append(m.AttributeTable, &KeyValueAndUnit{})
if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StackTable = append(m.StackTable, &Stack{})
if err := m.StackTable[len(m.StackTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProfilesData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{})
if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Dictionary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResourceProfiles) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{})
if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScopeProfiles) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Profiles = append(m.Profiles, &Profile{})
if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Profile) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Profile: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SampleType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Sample = append(m.Sample, &Sample{})
if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType)
}
m.DurationNano = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DurationNano |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
}
m.Period = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Period |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.CommentStrindices = append(m.CommentStrindices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.CommentStrindices) == 0 {
m.CommentStrindices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.CommentStrindices = append(m.CommentStrindices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
}
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...)
if m.OriginalPayload == nil {
m.OriginalPayload = []byte{}
}
iNdEx = postIndex
case 12:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.AttributeIndices) == 0 {
m.AttributeIndices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Link) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Link: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ValueType) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ValueType: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
}
m.TypeStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TypeStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
m.UnitStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.UnitStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
m.AggregationTemporality = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Sample) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Sample: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType)
}
m.StackIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StackIndex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Values = append(m.Values, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.Values) == 0 {
m.Values = make([]int64, 0, elementCount)
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Values = append(m.Values, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
case 3:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.AttributeIndices) == 0 {
m.AttributeIndices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
}
m.LinkIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.LinkIndex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType == 1 {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
elementCount = packedLen / 8
if elementCount != 0 && len(m.TimestampsUnixNano) == 0 {
m.TimestampsUnixNano = make([]uint64, 0, elementCount)
}
for iNdEx < postIndex {
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.TimestampsUnixNano = append(m.TimestampsUnixNano, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Mapping) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mapping: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
}
m.MemoryStart = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MemoryStart |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
}
m.MemoryLimit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MemoryLimit |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
}
m.FileOffset = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FileOffset |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
m.FilenameStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FilenameStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.AttributeIndices) == 0 {
m.AttributeIndices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Stack) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Stack: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Stack: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.LocationIndices = append(m.LocationIndices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.LocationIndices) == 0 {
m.LocationIndices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.LocationIndices = append(m.LocationIndices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Location) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Location: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
}
m.MappingIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.MappingIndex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
m.Address = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Address |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Line = append(m.Line, &Line{})
if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType == 0 {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.AttributeIndices) == 0 {
m.AttributeIndices = make([]int32, 0, elementCount)
}
for iNdEx < postIndex {
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.AttributeIndices = append(m.AttributeIndices, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Line) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Line: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
}
m.FunctionIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FunctionIndex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
m.Line = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Line |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
}
m.Column = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Column |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Function) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Function: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
}
m.NameStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NameStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
}
m.SystemNameStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SystemNameStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
m.FilenameStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.FilenameStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
}
m.StartLine = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.StartLine |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *KeyValueAndUnit) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValueAndUnit: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValueAndUnit: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType)
}
m.KeyStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeyStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthProfiles
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthProfiles
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
m.UnitStrindex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProfiles
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.UnitStrindex |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipProfiles(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthProfiles
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipProfiles(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfiles
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfiles
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProfiles
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthProfiles
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupProfiles
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthProfiles
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/resource/v1/resource.proto
package v1
import (
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Resource information.
type Resource struct {
// Set of attributes that describe the resource.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"`
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
// no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Set of entities that participate in this Resource.
//
// Note: keys in the references MUST exist in attributes of this message.
//
// Status: [Development]
EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"`
}
func (m *Resource) Reset() { *m = Resource{} }
func (m *Resource) String() string { return proto.CompactTextString(m) }
func (*Resource) ProtoMessage() {}
func (*Resource) Descriptor() ([]byte, []int) {
return fileDescriptor_446f73eacf88f3f5, []int{0}
}
func (m *Resource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Resource) XXX_Merge(src proto.Message) {
xxx_messageInfo_Resource.Merge(m, src)
}
func (m *Resource) XXX_Size() int {
return m.Size()
}
func (m *Resource) XXX_DiscardUnknown() {
xxx_messageInfo_Resource.DiscardUnknown(m)
}
var xxx_messageInfo_Resource proto.InternalMessageInfo
func (m *Resource) GetAttributes() []v1.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *Resource) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
func (m *Resource) GetEntityRefs() []*v1.EntityRef {
if m != nil {
return m.EntityRefs
}
return nil
}
func init() {
proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource")
}
func init() {
proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5)
}
var fileDescriptor_446f73eacf88f3f5 = []byte{
// 334 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
0x10, 0xc6, 0xb3, 0xfa, 0xe7, 0x4f, 0x59, 0xf1, 0x12, 0x4a, 0x09, 0x1e, 0xa2, 0x78, 0xa9, 0xf4,
0xb0, 0x21, 0xed, 0xa5, 0xd7, 0x5a, 0x5a, 0x28, 0xa5, 0x54, 0x42, 0xf1, 0xd0, 0x8b, 0xc4, 0x38,
0x86, 0x40, 0xdc, 0x09, 0x9b, 0x89, 0xe0, 0x5b, 0xf4, 0x39, 0xfa, 0x02, 0x7d, 0x05, 0x8f, 0x1e,
0x7b, 0x92, 0xa2, 0x2f, 0x52, 0xb2, 0x31, 0xa9, 0x2d, 0x82, 0xb7, 0x6f, 0xe7, 0xfb, 0xe6, 0x37,
0xc3, 0x2c, 0x17, 0x98, 0x80, 0x24, 0x88, 0x61, 0x06, 0xa4, 0x16, 0x4e, 0xa2, 0x90, 0xd0, 0x51,
0x90, 0x62, 0xa6, 0x02, 0x70, 0xe6, 0x6e, 0xa5, 0x85, 0xb6, 0xcc, 0xf6, 0xaf, 0x7c, 0x51, 0x14,
0x55, 0x66, 0xee, 0xb6, 0x4e, 0x43, 0x0c, 0xb1, 0xc0, 0xe4, 0xaa, 0x48, 0xb4, 0x2e, 0x0e, 0x8d,
0x09, 0x70, 0x36, 0x43, 0x99, 0x0f, 0x29, 0x54, 0x91, 0xed, 0xae, 0x19, 0x3f, 0xf1, 0x76, 0x44,
0xf3, 0x89, 0x73, 0x9f, 0x48, 0x45, 0xe3, 0x8c, 0x20, 0xb5, 0x58, 0xa7, 0xde, 0x6b, 0x5c, 0x9e,
0x8b, 0x43, 0x4b, 0xec, 0x18, 0x73, 0x57, 0x3c, 0xc2, 0x62, 0xe8, 0xc7, 0x19, 0xf4, 0xff, 0x2d,
0xd7, 0x6d, 0xc3, 0xdb, 0x03, 0x98, 0xd7, 0xdc, 0x9a, 0x28, 0x4c, 0x12, 0x98, 0x8c, 0x7e, 0xaa,
0xa3, 0x00, 0x33, 0x49, 0x56, 0xad, 0xc3, 0x7a, 0x4d, 0xef, 0x6c, 0xe7, 0xdf, 0x54, 0xf6, 0x6d,
0xee, 0x9a, 0x0f, 0xbc, 0x01, 0x92, 0x22, 0x5a, 0x8c, 0x14, 0x4c, 0x53, 0xab, 0xae, 0x37, 0xe9,
0x1d, 0xd9, 0xe4, 0x4e, 0x77, 0x78, 0x30, 0xf5, 0x38, 0x94, 0x32, 0xed, 0x7f, 0xb0, 0xe5, 0xc6,
0x66, 0xab, 0x8d, 0xcd, 0xbe, 0x36, 0x36, 0x7b, 0xdb, 0xda, 0xc6, 0x6a, 0x6b, 0x1b, 0x9f, 0x5b,
0xdb, 0xe0, 0xdd, 0x08, 0xc5, 0x91, 0x0b, 0xf7, 0x9b, 0xe5, 0x71, 0x06, 0xb9, 0x35, 0x60, 0xaf,
0xf7, 0xe1, 0xdf, 0xa6, 0x28, 0x3f, 0x6e, 0x1c, 0x43, 0x40, 0xa8, 0x9c, 0x64, 0xe2, 0x93, 0xef,
0x44, 0x92, 0x40, 0x49, 0x3f, 0x76, 0xf4, 0x4b, 0x53, 0x43, 0x90, 0xfb, 0x5f, 0xfd, 0x5e, 0x6b,
0x3f, 0x27, 0x20, 0x5f, 0x2a, 0x8a, 0xe6, 0x8b, 0x72, 0x9a, 0x18, 0xba, 0xe3, 0xff, 0xba, 0xef,
0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x10, 0xa9, 0xec, 0x36, 0x02, 0x00, 0x00,
}
func (m *Resource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Resource) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.EntityRefs) > 0 {
for iNdEx := len(m.EntityRefs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.EntityRefs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintResource(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.DroppedAttributesCount != 0 {
i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x10
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintResource(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintResource(dAtA []byte, offset int, v uint64) int {
offset -= sovResource(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Resource) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovResource(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovResource(uint64(m.DroppedAttributesCount))
}
if len(m.EntityRefs) > 0 {
for _, e := range m.EntityRefs {
l = e.Size()
n += 1 + l + sovResource(uint64(l))
}
}
return n
}
func sovResource(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozResource(x uint64) (n int) {
return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Resource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowResource
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Resource: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowResource
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthResource
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthResource
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v1.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowResource
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowResource
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthResource
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthResource
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EntityRefs = append(m.EntityRefs, &v1.EntityRef{})
if err := m.EntityRefs[len(m.EntityRefs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipResource(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthResource
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipResource(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowResource
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowResource
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowResource
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthResource
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupResource
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthResource
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowResource = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group")
)
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: opentelemetry/proto/trace/v1/trace.proto
package v1
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
math_bits "math/bits"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data"
v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// SpanFlags represents constants used to interpret the
// Span.flags field, which is protobuf 'fixed32' type and is to
// be used as bit-fields. Each non-zero value defined in this enum is
// a bit-mask. To extract the bit-field, for example, use an
// expression like:
//
// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Note that Span flags were introduced in version 1.1 of the
// OpenTelemetry protocol. Older Span producers do not set this
// field, consequently consumers should not rely on the absence of a
// particular flag bit to indicate the presence of a particular feature.
type SpanFlags int32
const (
// The zero value for the enum. Should not be used for comparisons.
// Instead use bitwise "and" with the appropriate mask as shown above.
SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
// Bits 0-7 are used for trace flags.
SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
// Bits 8 and 9 are used to indicate that the parent span or link span is remote.
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
)
var SpanFlags_name = map[int32]string{
0: "SPAN_FLAGS_DO_NOT_USE",
255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
}
var SpanFlags_value = map[string]int32{
"SPAN_FLAGS_DO_NOT_USE": 0,
"SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
"SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
"SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
}
func (x SpanFlags) String() string {
return proto.EnumName(SpanFlags_name, int32(x))
}
func (SpanFlags) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{0}
}
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type Span_SpanKind int32
const (
// Unspecified. Do NOT use as default.
// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
// Indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
// Indicates that the span covers server-side handling of an RPC or other
// remote network request.
Span_SPAN_KIND_SERVER Span_SpanKind = 2
// Indicates that the span describes a request to some remote service.
Span_SPAN_KIND_CLIENT Span_SpanKind = 3
// Indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
// by the broker while the logical processing of the message might span a much longer time.
Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
// Indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship
// between producer and consumer spans.
Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
)
var Span_SpanKind_name = map[int32]string{
0: "SPAN_KIND_UNSPECIFIED",
1: "SPAN_KIND_INTERNAL",
2: "SPAN_KIND_SERVER",
3: "SPAN_KIND_CLIENT",
4: "SPAN_KIND_PRODUCER",
5: "SPAN_KIND_CONSUMER",
}
var Span_SpanKind_value = map[string]int32{
"SPAN_KIND_UNSPECIFIED": 0,
"SPAN_KIND_INTERNAL": 1,
"SPAN_KIND_SERVER": 2,
"SPAN_KIND_CLIENT": 3,
"SPAN_KIND_PRODUCER": 4,
"SPAN_KIND_CONSUMER": 5,
}
func (x Span_SpanKind) String() string {
return proto.EnumName(Span_SpanKind_name, int32(x))
}
func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{3, 0}
}
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type Status_StatusCode int32
const (
// The default status.
Status_STATUS_CODE_UNSET Status_StatusCode = 0
// The Span has been validated by an Application developer or Operator to
// have completed successfully.
Status_STATUS_CODE_OK Status_StatusCode = 1
// The Span contains an error.
Status_STATUS_CODE_ERROR Status_StatusCode = 2
)
var Status_StatusCode_name = map[int32]string{
0: "STATUS_CODE_UNSET",
1: "STATUS_CODE_OK",
2: "STATUS_CODE_ERROR",
}
var Status_StatusCode_value = map[string]int32{
"STATUS_CODE_UNSET": 0,
"STATUS_CODE_OK": 1,
"STATUS_CODE_ERROR": 2,
}
func (x Status_StatusCode) String() string {
return proto.EnumName(Status_StatusCode_name, int32(x))
}
func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{4, 0}
}
// TracesData represents the traces data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP traces data but do
// not implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type TracesData struct {
// An array of ResourceSpans.
// For data coming from a single resource this array will typically contain
// one element. Intermediary nodes that receive data from multiple origins
// typically batch the data before forwarding further and in that case this
// array will contain multiple elements.
ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
}
func (m *TracesData) Reset() { *m = TracesData{} }
func (m *TracesData) String() string { return proto.CompactTextString(m) }
func (*TracesData) ProtoMessage() {}
func (*TracesData) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{0}
}
func (m *TracesData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TracesData.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *TracesData) XXX_Merge(src proto.Message) {
xxx_messageInfo_TracesData.Merge(m, src)
}
func (m *TracesData) XXX_Size() int {
return m.Size()
}
func (m *TracesData) XXX_DiscardUnknown() {
xxx_messageInfo_TracesData.DiscardUnknown(m)
}
var xxx_messageInfo_TracesData proto.InternalMessageInfo
func (m *TracesData) GetResourceSpans() []*ResourceSpans {
if m != nil {
return m.ResourceSpans
}
return nil
}
// A collection of ScopeSpans from a Resource.
type ResourceSpans struct {
DeprecatedScopeSpans []*ScopeSpans `protobuf:"bytes,1000,rep,name=deprecated_scope_spans,json=deprecatedScopeSpans,proto3" json:"deprecated_scope_spans,omitempty"`
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"`
// A list of ScopeSpans that originate from a resource.
ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the resource data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to the data in the "resource" field. It does not apply
// to the data in the "scope_spans" field which have their own schema_url field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ResourceSpans) Reset() { *m = ResourceSpans{} }
func (m *ResourceSpans) String() string { return proto.CompactTextString(m) }
func (*ResourceSpans) ProtoMessage() {}
func (*ResourceSpans) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{1}
}
func (m *ResourceSpans) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ResourceSpans) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceSpans.Merge(m, src)
}
func (m *ResourceSpans) XXX_Size() int {
return m.Size()
}
func (m *ResourceSpans) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceSpans.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo
func (m *ResourceSpans) GetDeprecatedScopeSpans() []*ScopeSpans {
if m != nil {
return m.DeprecatedScopeSpans
}
return nil
}
func (m *ResourceSpans) GetResource() v1.Resource {
if m != nil {
return m.Resource
}
return v1.Resource{}
}
func (m *ResourceSpans) GetScopeSpans() []*ScopeSpans {
if m != nil {
return m.ScopeSpans
}
return nil
}
func (m *ResourceSpans) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
// an empty instrumentation scope name (unknown).
Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"`
// A list of Spans that originate from an instrumentation scope.
Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
// The Schema URL, if known. This is the identifier of the Schema that the span data
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
// This schema_url applies to all spans and span events in the "spans" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
func (m *ScopeSpans) Reset() { *m = ScopeSpans{} }
func (m *ScopeSpans) String() string { return proto.CompactTextString(m) }
func (*ScopeSpans) ProtoMessage() {}
func (*ScopeSpans) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{2}
}
func (m *ScopeSpans) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ScopeSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ScopeSpans.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ScopeSpans) XXX_Merge(src proto.Message) {
xxx_messageInfo_ScopeSpans.Merge(m, src)
}
func (m *ScopeSpans) XXX_Size() int {
return m.Size()
}
func (m *ScopeSpans) XXX_DiscardUnknown() {
xxx_messageInfo_ScopeSpans.DiscardUnknown(m)
}
var xxx_messageInfo_ScopeSpans proto.InternalMessageInfo
func (m *ScopeSpans) GetScope() v11.InstrumentationScope {
if m != nil {
return m.Scope
}
return v11.InstrumentationScope{}
}
func (m *ScopeSpans) GetSpans() []*Span {
if m != nil {
return m.Spans
}
return nil
}
func (m *ScopeSpans) GetSchemaUrl() string {
if m != nil {
return m.SchemaUrl
}
return ""
}
// A Span represents a single operation performed by a single component of the system.
//
// The next available field id is 17.
type Span struct {
// A unique identifier for a trace. All spans from the same trace share
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
// A unique identifier for a span within a trace, assigned when the span
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
// is zero-length and thus is also invalid).
//
// This field is required.
SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
// trace_state conveys information about request position in multiple distributed tracing graphs.
// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
// See also https://github.com/w3c/distributed-tracing for more details about this field.
TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
// The `span_id` of this span's parent span. If this is a root span, then this
// field must be empty. The ID is an 8-byte array.
ParentSpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"parent_span_id"`
// Flags, a bit field.
//
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
// Context specification. To read the 8-bit W3C trace flag, use
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Bits 8 and 9 represent the 3 states of whether a span's parent
// is remote. The states are (unknown, is not remote, is remote).
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
// To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
//
// When creating span messages, if the message is logically forwarded from another source
// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
// be copied as-is. If creating from a source that does not have an equivalent flags field
// (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
// be set to zero.
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
//
// [Optional].
Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
// A description of the span's operation.
//
// For example, the name can be a qualified method name or a file name
// and a line number where the operation is called. A best practice is to use
// the same display name at the same call point in an application.
// This makes it easier to correlate spans in different traces.
//
// This field is semantically required to be set to non-empty string.
// Empty value is equivalent to an unknown span name.
//
// This field is required.
Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
// Distinguishes between spans generated in a particular context. For example,
// two spans with the same name may be distinguished using `CLIENT` (caller)
// and `SERVER` (callee) to identify queueing latency associated with the span.
Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
// start_time_unix_nano is the start time of the span. On the client side, this is the time
// kept by the local machine where the span execution starts. On the server side, this
// is the time when the server's application handler starts running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
// end_time_unix_nano is the end time of the span. On the client side, this is the time
// kept by the local machine where the span execution ends. On the server side, this
// is the time when the server application handler stops running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
// attributes is a collection of key/value pairs. Note, global attributes
// like server name can be set using the resource API. Examples of attributes:
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
// "/http/server_latency": 300
// "example.com/myattribute": true
// "example.com/score": 10.239
//
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"`
// dropped_attributes_count is the number of attributes that were discarded. Attributes
// can be discarded because their keys are too long or because there are too many
// attributes. If this value is 0, then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// events is a collection of Event items.
Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
// dropped_events_count is the number of dropped events. If the value is 0, then no
// events were dropped.
DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
// links is a collection of Links, which are references from this span to a span
// in the same or different trace.
Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
// dropped_links_count is the number of dropped links after the maximum size was
// enforced. If this value is 0, then no links were dropped.
DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
// An optional final status for this span. Semantically when Status isn't set, it means
// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"`
}
func (m *Span) Reset() { *m = Span{} }
func (m *Span) String() string { return proto.CompactTextString(m) }
func (*Span) ProtoMessage() {}
func (*Span) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{3}
}
func (m *Span) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Span.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Span) XXX_Merge(src proto.Message) {
xxx_messageInfo_Span.Merge(m, src)
}
func (m *Span) XXX_Size() int {
return m.Size()
}
func (m *Span) XXX_DiscardUnknown() {
xxx_messageInfo_Span.DiscardUnknown(m)
}
var xxx_messageInfo_Span proto.InternalMessageInfo
func (m *Span) GetTraceState() string {
if m != nil {
return m.TraceState
}
return ""
}
func (m *Span) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
func (m *Span) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Span) GetKind() Span_SpanKind {
if m != nil {
return m.Kind
}
return Span_SPAN_KIND_UNSPECIFIED
}
func (m *Span) GetStartTimeUnixNano() uint64 {
if m != nil {
return m.StartTimeUnixNano
}
return 0
}
func (m *Span) GetEndTimeUnixNano() uint64 {
if m != nil {
return m.EndTimeUnixNano
}
return 0
}
func (m *Span) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *Span) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
func (m *Span) GetEvents() []*Span_Event {
if m != nil {
return m.Events
}
return nil
}
func (m *Span) GetDroppedEventsCount() uint32 {
if m != nil {
return m.DroppedEventsCount
}
return 0
}
func (m *Span) GetLinks() []*Span_Link {
if m != nil {
return m.Links
}
return nil
}
func (m *Span) GetDroppedLinksCount() uint32 {
if m != nil {
return m.DroppedLinksCount
}
return 0
}
func (m *Span) GetStatus() Status {
if m != nil {
return m.Status
}
return Status{}
}
// Event is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
type Span_Event struct {
// time_unix_nano is the time the event occurred.
TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
// name of the event.
// This field is semantically required to be set to non-empty string.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// attributes is a collection of attribute key/value pairs on the event.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"`
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
}
func (m *Span_Event) Reset() { *m = Span_Event{} }
func (m *Span_Event) String() string { return proto.CompactTextString(m) }
func (*Span_Event) ProtoMessage() {}
func (*Span_Event) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{3, 0}
}
func (m *Span_Event) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Span_Event) XXX_Merge(src proto.Message) {
xxx_messageInfo_Span_Event.Merge(m, src)
}
func (m *Span_Event) XXX_Size() int {
return m.Size()
}
func (m *Span_Event) XXX_DiscardUnknown() {
xxx_messageInfo_Span_Event.DiscardUnknown(m)
}
var xxx_messageInfo_Span_Event proto.InternalMessageInfo
func (m *Span_Event) GetTimeUnixNano() uint64 {
if m != nil {
return m.TimeUnixNano
}
return 0
}
func (m *Span_Event) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Span_Event) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *Span_Event) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
// A pointer from the current span to another span in the same trace or in a
// different trace. For example, this can be used in batching operations,
// where a single batch handler processes multiple requests from different
// traces or when the handler receives a request from a different project.
type Span_Link struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"`
// A unique identifier for the linked span. The ID is an 8-byte array.
SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"`
// The trace_state associated with the link.
TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
// attributes is a collection of attribute key/value pairs on the link.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
//
// The attribute values SHOULD NOT contain empty values.
// The attribute values SHOULD NOT contain bytes values.
// The attribute values SHOULD NOT contain array values different than array of string values, bool values, int values,
// double values.
// The attribute values SHOULD NOT contain kvlist values.
// The behavior of software that receives attributes containing such values can be unpredictable.
// These restrictions can change in a minor release.
// The restrictions take origin from the OpenTelemetry specification:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.47.0/specification/common/README.md#attribute.
Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"`
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Flags, a bit field.
//
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
// Context specification. To read the 8-bit W3C trace flag, use
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
//
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
//
// Bits 8 and 9 represent the 3 states of whether the link is remote.
// The states are (unknown, is not remote, is remote).
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
// To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
//
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
// When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
//
// [Optional].
Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
}
func (m *Span_Link) Reset() { *m = Span_Link{} }
func (m *Span_Link) String() string { return proto.CompactTextString(m) }
func (*Span_Link) ProtoMessage() {}
func (*Span_Link) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{3, 1}
}
func (m *Span_Link) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Span_Link) XXX_Merge(src proto.Message) {
xxx_messageInfo_Span_Link.Merge(m, src)
}
func (m *Span_Link) XXX_Size() int {
return m.Size()
}
func (m *Span_Link) XXX_DiscardUnknown() {
xxx_messageInfo_Span_Link.DiscardUnknown(m)
}
var xxx_messageInfo_Span_Link proto.InternalMessageInfo
func (m *Span_Link) GetTraceState() string {
if m != nil {
return m.TraceState
}
return ""
}
func (m *Span_Link) GetAttributes() []v11.KeyValue {
if m != nil {
return m.Attributes
}
return nil
}
func (m *Span_Link) GetDroppedAttributesCount() uint32 {
if m != nil {
return m.DroppedAttributesCount
}
return 0
}
func (m *Span_Link) GetFlags() uint32 {
if m != nil {
return m.Flags
}
return 0
}
// The Status type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs.
type Status struct {
// A developer-facing human readable error message.
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// The status code.
Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
return fileDescriptor_5c407ac9c675a601, []int{4}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Status) XXX_Merge(src proto.Message) {
xxx_messageInfo_Status.Merge(m, src)
}
func (m *Status) XXX_Size() int {
return m.Size()
}
func (m *Status) XXX_DiscardUnknown() {
xxx_messageInfo_Status.DiscardUnknown(m)
}
var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetCode() Status_StatusCode {
if m != nil {
return m.Code
}
return Status_STATUS_CODE_UNSET
}
func init() {
proto.RegisterEnum("opentelemetry.proto.trace.v1.SpanFlags", SpanFlags_name, SpanFlags_value)
proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value)
proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData")
proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans")
proto.RegisterType((*ScopeSpans)(nil), "opentelemetry.proto.trace.v1.ScopeSpans")
proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span")
proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event")
proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link")
proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status")
}
func init() {
proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601)
}
var fileDescriptor_5c407ac9c675a601 = []byte{
// 1112 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
0x14, 0xf6, 0x3a, 0x6b, 0x3b, 0x79, 0x49, 0xdc, 0xed, 0xe0, 0x56, 0x4b, 0x28, 0x8e, 0xb1, 0x0a,
0x98, 0x56, 0xb2, 0x49, 0x7b, 0x29, 0x07, 0x44, 0x1d, 0x7b, 0x03, 0x8b, 0x13, 0x3b, 0x9a, 0x5d,
0x47, 0x80, 0x90, 0x96, 0xad, 0x77, 0x6a, 0x56, 0xb1, 0x67, 0xad, 0xdd, 0x71, 0xd4, 0xde, 0xf8,
0x13, 0xb8, 0x22, 0x71, 0x47, 0x02, 0xce, 0xdc, 0xb8, 0x57, 0x9c, 0x7a, 0x44, 0x1c, 0x2a, 0x94,
0x5c, 0xf8, 0x2f, 0x8a, 0x66, 0x66, 0xd7, 0x5e, 0x47, 0x91, 0xd3, 0x48, 0xf4, 0xc2, 0x25, 0x99,
0x79, 0x3f, 0xbe, 0xef, 0x7b, 0x6f, 0xde, 0x8c, 0x17, 0x6a, 0xc1, 0x84, 0x50, 0x46, 0x46, 0x64,
0x4c, 0x58, 0xf8, 0xb4, 0x31, 0x09, 0x03, 0x16, 0x34, 0x58, 0xe8, 0x0e, 0x48, 0xe3, 0x64, 0x47,
0x2e, 0xea, 0xc2, 0x88, 0x6e, 0x2d, 0x44, 0x4a, 0x63, 0x5d, 0x06, 0x9c, 0xec, 0x6c, 0x95, 0x86,
0xc1, 0x30, 0x90, 0xd9, 0x7c, 0x25, 0xdd, 0x5b, 0x77, 0x2e, 0x42, 0x1f, 0x04, 0xe3, 0x71, 0x40,
0x39, 0xbc, 0x5c, 0xc5, 0xb1, 0xf5, 0x8b, 0x62, 0x43, 0x12, 0x05, 0xd3, 0x50, 0x8a, 0x49, 0xd6,
0x32, 0xbe, 0xfa, 0x0d, 0x80, 0xcd, 0xd9, 0xa3, 0xb6, 0xcb, 0x5c, 0x84, 0xa1, 0x98, 0xf8, 0x9d,
0x68, 0xe2, 0xd2, 0x48, 0x57, 0x2a, 0x2b, 0xb5, 0xf5, 0x7b, 0x77, 0xeb, 0xcb, 0x64, 0xd7, 0x71,
0x9c, 0x63, 0xf1, 0x14, 0xbc, 0x19, 0xa6, 0xb7, 0xd5, 0x9f, 0xb2, 0xb0, 0xb9, 0x10, 0x80, 0x1c,
0xb8, 0xe9, 0x91, 0x49, 0x48, 0x06, 0x2e, 0x23, 0x9e, 0x13, 0x0d, 0x82, 0x49, 0xc2, 0xf6, 0x4f,
0x41, 0xd0, 0xd5, 0x96, 0xd3, 0x59, 0x3c, 0x43, 0x72, 0x95, 0xe6, 0x40, 0x73, 0x2b, 0xea, 0xc0,
0x6a, 0xa2, 0x41, 0x57, 0x2a, 0x4a, 0x6d, 0xfd, 0xde, 0x07, 0x17, 0x22, 0xce, 0x7a, 0x91, 0xaa,
0x61, 0x57, 0x7d, 0xf6, 0x62, 0x3b, 0x83, 0x67, 0x00, 0xc8, 0x84, 0xf5, 0xb4, 0xc4, 0xec, 0x15,
0x15, 0x42, 0x34, 0xd7, 0xf5, 0x36, 0x40, 0x34, 0xf8, 0x96, 0x8c, 0x5d, 0x67, 0x1a, 0x8e, 0xf4,
0x95, 0x8a, 0x52, 0x5b, 0xc3, 0x6b, 0xd2, 0xd2, 0x0f, 0x47, 0xd5, 0xdf, 0x14, 0x80, 0x54, 0x15,
0x3d, 0xc8, 0x89, 0xdc, 0xb8, 0x84, 0xfb, 0x17, 0x52, 0xc6, 0x87, 0x7f, 0xb2, 0x53, 0x37, 0x69,
0xc4, 0xc2, 0xe9, 0x98, 0x50, 0xe6, 0x32, 0x3f, 0xa0, 0x02, 0x28, 0x2e, 0x46, 0xe2, 0xa0, 0x07,
0x90, 0x4b, 0xd7, 0x50, 0xbd, 0xa4, 0x86, 0x89, 0x4b, 0xb1, 0x4c, 0xb8, 0x4c, 0xf8, 0xaf, 0x9b,
0xa0, 0xf2, 0x70, 0xf4, 0x35, 0xac, 0x8a, 0x7c, 0xc7, 0xf7, 0x84, 0xea, 0x8d, 0xdd, 0x26, 0x17,
0xf0, 0xd7, 0x8b, 0xed, 0x8f, 0x86, 0xc1, 0x39, 0x3a, 0x9f, 0xcf, 0xf0, 0x68, 0x44, 0x06, 0x2c,
0x08, 0x1b, 0x13, 0xcf, 0x65, 0x6e, 0xc3, 0xa7, 0x8c, 0x84, 0xd4, 0x1d, 0x35, 0xf8, 0xae, 0x2e,
0xe6, 0xd2, 0x6c, 0xe3, 0x82, 0x80, 0x34, 0x3d, 0xf4, 0x25, 0x14, 0xb8, 0x1c, 0x0e, 0x9e, 0x15,
0xe0, 0x0f, 0x63, 0xf0, 0x07, 0x57, 0x07, 0xe7, 0x72, 0xcd, 0x36, 0xce, 0x73, 0x40, 0xd3, 0x43,
0xdb, 0xb0, 0x2e, 0x85, 0x47, 0xcc, 0x65, 0x24, 0xae, 0x10, 0x84, 0xc9, 0xe2, 0x16, 0xf4, 0x18,
0x8a, 0x13, 0x37, 0x24, 0x94, 0x39, 0x89, 0x04, 0xf5, 0x3f, 0x92, 0xb0, 0x21, 0x71, 0x2d, 0x29,
0xa4, 0x04, 0xb9, 0xc7, 0x23, 0x77, 0x18, 0xe9, 0x5a, 0x45, 0xa9, 0x15, 0xb0, 0xdc, 0x20, 0x04,
0x2a, 0x75, 0xc7, 0x44, 0xcf, 0x09, 0x5d, 0x62, 0x8d, 0x3e, 0x01, 0xf5, 0xd8, 0xa7, 0x9e, 0x9e,
0xaf, 0x28, 0xb5, 0xe2, 0x65, 0x37, 0x94, 0xa3, 0x8b, 0x3f, 0x1d, 0x9f, 0x7a, 0x58, 0x24, 0xa2,
0x06, 0x94, 0x22, 0xe6, 0x86, 0xcc, 0x61, 0xfe, 0x98, 0x38, 0x53, 0xea, 0x3f, 0x71, 0xa8, 0x4b,
0x03, 0xbd, 0x50, 0x51, 0x6a, 0x79, 0x7c, 0x5d, 0xf8, 0x6c, 0x7f, 0x4c, 0xfa, 0xd4, 0x7f, 0xd2,
0x75, 0x69, 0x80, 0xee, 0x02, 0x22, 0xd4, 0x3b, 0x1f, 0xbe, 0x2a, 0xc2, 0xaf, 0x11, 0xea, 0x2d,
0x04, 0x1f, 0x00, 0xb8, 0x8c, 0x85, 0xfe, 0xa3, 0x29, 0x23, 0x91, 0xbe, 0x26, 0x26, 0xee, 0xfd,
0x4b, 0x46, 0xb8, 0x43, 0x9e, 0x1e, 0xb9, 0xa3, 0x69, 0x32, 0xb6, 0x29, 0x00, 0xf4, 0x00, 0x74,
0x2f, 0x0c, 0x26, 0x13, 0xe2, 0x39, 0x73, 0xab, 0x33, 0x08, 0xa6, 0x94, 0xe9, 0x50, 0x51, 0x6a,
0x9b, 0xf8, 0x66, 0xec, 0x6f, 0xce, 0xdc, 0x2d, 0xee, 0x45, 0x0f, 0x21, 0x4f, 0x4e, 0x08, 0x65,
0x91, 0xbe, 0xfe, 0x4a, 0x57, 0x97, 0x77, 0xca, 0xe0, 0x09, 0x38, 0xce, 0x43, 0x1f, 0x42, 0x29,
0xe1, 0x96, 0x96, 0x98, 0x77, 0x43, 0xf0, 0xa2, 0xd8, 0x27, 0x72, 0x62, 0xce, 0x8f, 0x21, 0x37,
0xf2, 0xe9, 0x71, 0xa4, 0x6f, 0x2e, 0xa9, 0x7b, 0x91, 0x72, 0xdf, 0xa7, 0xc7, 0x58, 0x66, 0xa1,
0x3a, 0xbc, 0x91, 0x10, 0x0a, 0x43, 0xcc, 0x57, 0x14, 0x7c, 0xd7, 0x63, 0x17, 0x4f, 0x88, 0xe9,
0x76, 0x21, 0xcf, 0xe7, 0x76, 0x1a, 0xe9, 0xd7, 0xc4, 0x53, 0x71, 0xfb, 0x12, 0x3e, 0x11, 0x1b,
0x37, 0x39, 0xce, 0xdc, 0xfa, 0x43, 0x81, 0x9c, 0x28, 0x01, 0xdd, 0x86, 0xe2, 0xb9, 0x23, 0x56,
0xc4, 0x11, 0x6f, 0xb0, 0xf4, 0xf9, 0x26, 0x23, 0x99, 0x4d, 0x8d, 0xe4, 0xe2, 0x99, 0xaf, 0xbc,
0xce, 0x33, 0x57, 0x97, 0x9d, 0xf9, 0xd6, 0xcb, 0x2c, 0xa8, 0xbc, 0x3f, 0xff, 0xe3, 0x07, 0x69,
0xb1, 0xd7, 0xea, 0xeb, 0xec, 0x75, 0x6e, 0xe9, 0xfd, 0x9a, 0xbd, 0x58, 0xf9, 0xd4, 0x8b, 0x55,
0xfd, 0x41, 0x81, 0xd5, 0xe4, 0xbd, 0x41, 0x6f, 0xc2, 0x0d, 0xeb, 0xb0, 0xd9, 0x75, 0x3a, 0x66,
0xb7, 0xed, 0xf4, 0xbb, 0xd6, 0xa1, 0xd1, 0x32, 0xf7, 0x4c, 0xa3, 0xad, 0x65, 0xd0, 0x4d, 0x40,
0x73, 0x97, 0xd9, 0xb5, 0x0d, 0xdc, 0x6d, 0xee, 0x6b, 0x0a, 0x2a, 0x81, 0x36, 0xb7, 0x5b, 0x06,
0x3e, 0x32, 0xb0, 0x96, 0x5d, 0xb4, 0xb6, 0xf6, 0x4d, 0xa3, 0x6b, 0x6b, 0x2b, 0x8b, 0x18, 0x87,
0xb8, 0xd7, 0xee, 0xb7, 0x0c, 0xac, 0xa9, 0x8b, 0xf6, 0x56, 0xaf, 0x6b, 0xf5, 0x0f, 0x0c, 0xac,
0xe5, 0xaa, 0xbf, 0x2b, 0x90, 0x97, 0x77, 0x00, 0xe9, 0x50, 0x18, 0x93, 0x28, 0x72, 0x87, 0xc9,
0x20, 0x27, 0x5b, 0xd4, 0x02, 0x75, 0x10, 0x78, 0xb2, 0xf3, 0xc5, 0x7b, 0x8d, 0x57, 0xb9, 0x51,
0xf1, 0xbf, 0x56, 0xe0, 0x11, 0x2c, 0x92, 0xab, 0x5d, 0x80, 0xb9, 0x0d, 0xdd, 0x80, 0xeb, 0x96,
0xdd, 0xb4, 0xfb, 0x96, 0xd3, 0xea, 0xb5, 0x0d, 0xde, 0x08, 0xc3, 0xd6, 0x32, 0x08, 0x41, 0x31,
0x6d, 0xee, 0x75, 0x34, 0xe5, 0x7c, 0xa8, 0x81, 0x71, 0x0f, 0x6b, 0xd9, 0xcf, 0xd5, 0x55, 0x45,
0xcb, 0xde, 0xf9, 0x51, 0x81, 0x35, 0xde, 0xdb, 0x3d, 0xf1, 0xdb, 0x90, 0x34, 0x77, 0x6f, 0xbf,
0xf9, 0xa9, 0xe5, 0xb4, 0x7b, 0x4e, 0xb7, 0x67, 0x3b, 0x7d, 0xcb, 0xd0, 0x32, 0xa8, 0x02, 0x6f,
0xa5, 0x5c, 0x36, 0x6e, 0xb6, 0x8c, 0x78, 0x7d, 0xd0, 0xb4, 0x3a, 0xda, 0x4b, 0x05, 0xdd, 0x81,
0x77, 0x53, 0x11, 0xad, 0x5e, 0xd7, 0x36, 0xbe, 0xb0, 0x9d, 0xcf, 0x9a, 0x96, 0x63, 0x5a, 0x0e,
0x36, 0x0e, 0x7a, 0xb6, 0x21, 0x63, 0xbf, 0xcb, 0xa2, 0xf7, 0xe0, 0x9d, 0x0b, 0x62, 0xcf, 0xc7,
0xa9, 0xbb, 0xbf, 0x28, 0xcf, 0x4e, 0xcb, 0xca, 0xf3, 0xd3, 0xb2, 0xf2, 0xf7, 0x69, 0x59, 0xf9,
0xfe, 0xac, 0x9c, 0x79, 0x7e, 0x56, 0xce, 0xfc, 0x79, 0x56, 0xce, 0xc0, 0xb6, 0x1f, 0x2c, 0x6d,
0xe4, 0xae, 0xfc, 0x18, 0x3d, 0xe4, 0xc6, 0x43, 0xe5, 0xab, 0xd6, 0x95, 0xaf, 0x91, 0xfc, 0xe0,
0x1d, 0x12, 0x3a, 0xfb, 0xfa, 0xfe, 0x39, 0x7b, 0xab, 0x37, 0x21, 0xd4, 0x9e, 0x41, 0x08, 0x70,
0x79, 0x97, 0xeb, 0x47, 0x3b, 0x8f, 0xf2, 0x22, 0xe3, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
0xfd, 0xbe, 0x84, 0xc3, 0xc3, 0x0b, 0x00, 0x00,
}
func (m *TracesData) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TracesData) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ResourceSpans) > 0 {
for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ResourceSpans) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.DeprecatedScopeSpans) > 0 {
for iNdEx := len(m.DeprecatedScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DeprecatedScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3e
i--
dAtA[i] = 0xc2
}
}
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.ScopeSpans) > 0 {
for iNdEx := len(m.ScopeSpans) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.ScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *ScopeSpans) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ScopeSpans) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ScopeSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.SchemaUrl) > 0 {
i -= len(m.SchemaUrl)
copy(dAtA[i:], m.SchemaUrl)
i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl)))
i--
dAtA[i] = 0x1a
}
if len(m.Spans) > 0 {
for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Span) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Span) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Flags != 0 {
i -= 4
encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x85
}
{
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x7a
if m.DroppedLinksCount != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount))
i--
dAtA[i] = 0x70
}
if len(m.Links) > 0 {
for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6a
}
}
if m.DroppedEventsCount != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount))
i--
dAtA[i] = 0x60
}
if len(m.Events) > 0 {
for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x5a
}
}
if m.DroppedAttributesCount != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x50
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
}
}
if m.EndTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano))
i--
dAtA[i] = 0x41
}
if m.StartTimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
i--
dAtA[i] = 0x39
}
if m.Kind != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.Kind))
i--
dAtA[i] = 0x30
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x2a
}
{
size := m.ParentSpanId.Size()
i -= size
if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
if len(m.TraceState) > 0 {
i -= len(m.TraceState)
copy(dAtA[i:], m.TraceState)
i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
i--
dAtA[i] = 0x1a
}
{
size := m.SpanId.Size()
i -= size
if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.TraceId.Size()
i -= size
if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Span_Event) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.DroppedAttributesCount != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x20
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintTrace(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x12
}
if m.TimeUnixNano != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *Span_Link) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Flags != 0 {
i -= 4
encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.Flags))
i--
dAtA[i] = 0x35
}
if m.DroppedAttributesCount != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount))
i--
dAtA[i] = 0x28
}
if len(m.Attributes) > 0 {
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.TraceState) > 0 {
i -= len(m.TraceState)
copy(dAtA[i:], m.TraceState)
i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState)))
i--
dAtA[i] = 0x1a
}
{
size := m.SpanId.Size()
i -= size
if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.TraceId.Size()
i -= size
if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintTrace(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Status) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Status) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Code != 0 {
i = encodeVarintTrace(dAtA, i, uint64(m.Code))
i--
dAtA[i] = 0x18
}
if len(m.Message) > 0 {
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintTrace(dAtA, i, uint64(len(m.Message)))
i--
dAtA[i] = 0x12
}
return len(dAtA) - i, nil
}
func encodeVarintTrace(dAtA []byte, offset int, v uint64) int {
offset -= sovTrace(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *TracesData) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.ResourceSpans) > 0 {
for _, e := range m.ResourceSpans {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
return n
}
func (m *ResourceSpans) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Resource.Size()
n += 1 + l + sovTrace(uint64(l))
if len(m.ScopeSpans) > 0 {
for _, e := range m.ScopeSpans {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
if len(m.DeprecatedScopeSpans) > 0 {
for _, e := range m.DeprecatedScopeSpans {
l = e.Size()
n += 2 + l + sovTrace(uint64(l))
}
}
return n
}
func (m *ScopeSpans) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Scope.Size()
n += 1 + l + sovTrace(uint64(l))
if len(m.Spans) > 0 {
for _, e := range m.Spans {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
l = len(m.SchemaUrl)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
return n
}
func (m *Span) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.TraceId.Size()
n += 1 + l + sovTrace(uint64(l))
l = m.SpanId.Size()
n += 1 + l + sovTrace(uint64(l))
l = len(m.TraceState)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
l = m.ParentSpanId.Size()
n += 1 + l + sovTrace(uint64(l))
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
if m.Kind != 0 {
n += 1 + sovTrace(uint64(m.Kind))
}
if m.StartTimeUnixNano != 0 {
n += 9
}
if m.EndTimeUnixNano != 0 {
n += 9
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
}
if len(m.Events) > 0 {
for _, e := range m.Events {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
if m.DroppedEventsCount != 0 {
n += 1 + sovTrace(uint64(m.DroppedEventsCount))
}
if len(m.Links) > 0 {
for _, e := range m.Links {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
if m.DroppedLinksCount != 0 {
n += 1 + sovTrace(uint64(m.DroppedLinksCount))
}
l = m.Status.Size()
n += 1 + l + sovTrace(uint64(l))
if m.Flags != 0 {
n += 6
}
return n
}
func (m *Span_Event) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.TimeUnixNano != 0 {
n += 9
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
}
return n
}
func (m *Span_Link) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.TraceId.Size()
n += 1 + l + sovTrace(uint64(l))
l = m.SpanId.Size()
n += 1 + l + sovTrace(uint64(l))
l = len(m.TraceState)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
if len(m.Attributes) > 0 {
for _, e := range m.Attributes {
l = e.Size()
n += 1 + l + sovTrace(uint64(l))
}
}
if m.DroppedAttributesCount != 0 {
n += 1 + sovTrace(uint64(m.DroppedAttributesCount))
}
if m.Flags != 0 {
n += 5
}
return n
}
func (m *Status) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Message)
if l > 0 {
n += 1 + l + sovTrace(uint64(l))
}
if m.Code != 0 {
n += 1 + sovTrace(uint64(m.Code))
}
return n
}
func sovTrace(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTrace(x uint64) (n int) {
return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *TracesData) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TracesData: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{})
if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ResourceSpans) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ScopeSpans = append(m.ScopeSpans, &ScopeSpans{})
if err := m.ScopeSpans[len(m.ScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 1000:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeprecatedScopeSpans = append(m.DeprecatedScopeSpans, &ScopeSpans{})
if err := m.DeprecatedScopeSpans[len(m.DeprecatedScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ScopeSpans) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ScopeSpans: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ScopeSpans: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Spans = append(m.Spans, &Span{})
if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.SchemaUrl = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Span) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Span: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TraceState = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
m.Kind = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Kind |= Span_SpanKind(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
m.StartTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 8:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
}
m.EndTimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Events = append(m.Events, &Span_Event{})
if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
}
m.DroppedEventsCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedEventsCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Links = append(m.Links, &Span_Link{})
if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
}
m.DroppedLinksCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedLinksCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 15:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 16:
if wireType != 5 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
iNdEx += 4
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Span_Event) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Event: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
m.TimeUnixNano = 0
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Span_Link) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Link: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TraceState = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Attributes = append(m.Attributes, v11.KeyValue{})
if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
m.DroppedAttributesCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 5 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
m.Flags = 0
if (iNdEx + 4) > l {
return io.ErrUnexpectedEOF
}
m.Flags = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
iNdEx += 4
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Status) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Status: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTrace
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTrace
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
}
m.Code = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTrace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Code |= Status_StatusCode(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTrace(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTrace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTrace(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTrace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTrace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTrace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTrace
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTrace
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTrace
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group")
)
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package data // import "go.opentelemetry.io/collector/pdata/internal/data"
import (
"encoding/hex"
"errors"
"github.com/gogo/protobuf/proto"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const spanIDSize = 8
var (
errMarshalSpanID = errors.New("marshal: invalid buffer length for SpanID")
errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
)
// SpanID is a custom data type that is used for all span_id fields in OTLP
// Protobuf messages.
type SpanID [spanIDSize]byte
var _ proto.Sizer = (*SpanID)(nil)
// Size returns the size of the data to serialize.
func (sid SpanID) Size() int {
if sid.IsEmpty() {
return 0
}
return spanIDSize
}
// IsEmpty returns true if id contains at least one non-zero byte.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
func (sid SpanID) MarshalTo(data []byte) (n int, err error) {
if sid.IsEmpty() {
return 0, nil
}
if len(data) < spanIDSize {
return 0, errMarshalSpanID
}
return copy(data, sid[:]), nil
}
// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
func (sid *SpanID) Unmarshal(data []byte) error {
if len(data) == 0 {
*sid = [spanIDSize]byte{}
return nil
}
if len(data) != spanIDSize {
return errUnmarshalSpanID
}
copy(sid[:], data)
return nil
}
// MarshalJSONStream converts SpanID into a hex string.
func (sid SpanID) MarshalJSONStream(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(sid[:]))
}
// UnmarshalJSONIter decodes SpanID from hex string.
func (sid *SpanID) UnmarshalJSONIter(iter *json.Iterator) {
*sid = [spanIDSize]byte{}
unmarshalJSON(sid[:], iter)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package data // import "go.opentelemetry.io/collector/pdata/internal/data"
import (
"encoding/hex"
"errors"
"github.com/gogo/protobuf/proto"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const traceIDSize = 16
var (
errMarshalTraceID = errors.New("marshal: invalid buffer length for TraceID")
errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
)
// TraceID is a custom data type that is used for all trace_id fields in OTLP
// Protobuf messages.
type TraceID [traceIDSize]byte
var _ proto.Sizer = (*TraceID)(nil)
// Size returns the size of the data to serialize.
func (tid TraceID) Size() int {
if tid.IsEmpty() {
return 0
}
return traceIDSize
}
// IsEmpty returns true if id contains at leas one non-zero byte.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization.
func (tid TraceID) MarshalTo(data []byte) (n int, err error) {
if tid.IsEmpty() {
return 0, nil
}
if len(data) < traceIDSize {
return 0, errMarshalTraceID
}
return copy(data, tid[:]), nil
}
// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization.
func (tid *TraceID) Unmarshal(data []byte) error {
if len(data) == 0 {
*tid = [traceIDSize]byte{}
return nil
}
if len(data) != traceIDSize {
return errUnmarshalTraceID
}
copy(tid[:], data)
return nil
}
// MarshalJSONStream converts TraceID into a hex string.
func (tid TraceID) MarshalJSONStream(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(tid[:]))
}
// UnmarshalJSONIter decodes TraceID from hex string.
func (tid *TraceID) UnmarshalJSONIter(iter *json.Iterator) {
*tid = [profileIDSize]byte{}
unmarshalJSON(tid[:], iter)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolAnyValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue{}
},
}
ProtoPoolAnyValue_StringValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_StringValue{}
},
}
ProtoPoolAnyValue_BoolValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_BoolValue{}
},
}
ProtoPoolAnyValue_IntValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_IntValue{}
},
}
ProtoPoolAnyValue_DoubleValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_DoubleValue{}
},
}
ProtoPoolAnyValue_ArrayValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_ArrayValue{}
},
}
ProtoPoolAnyValue_KvlistValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_KvlistValue{}
},
}
ProtoPoolAnyValue_BytesValue = sync.Pool{
New: func() any {
return &otlpcommon.AnyValue_BytesValue{}
},
}
)
func NewAnyValue() *otlpcommon.AnyValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue{}
}
return protoPoolAnyValue.Get().(*otlpcommon.AnyValue)
}
func DeleteAnyValue(orig *otlpcommon.AnyValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
switch ov := orig.Value.(type) {
case *otlpcommon.AnyValue_StringValue:
if UseProtoPooling.IsEnabled() {
ov.StringValue = ""
ProtoPoolAnyValue_StringValue.Put(ov)
}
case *otlpcommon.AnyValue_BoolValue:
if UseProtoPooling.IsEnabled() {
ov.BoolValue = false
ProtoPoolAnyValue_BoolValue.Put(ov)
}
case *otlpcommon.AnyValue_IntValue:
if UseProtoPooling.IsEnabled() {
ov.IntValue = int64(0)
ProtoPoolAnyValue_IntValue.Put(ov)
}
case *otlpcommon.AnyValue_DoubleValue:
if UseProtoPooling.IsEnabled() {
ov.DoubleValue = float64(0)
ProtoPoolAnyValue_DoubleValue.Put(ov)
}
case *otlpcommon.AnyValue_ArrayValue:
DeleteArrayValue(ov.ArrayValue, true)
ov.ArrayValue = nil
ProtoPoolAnyValue_ArrayValue.Put(ov)
case *otlpcommon.AnyValue_KvlistValue:
DeleteKeyValueList(ov.KvlistValue, true)
ov.KvlistValue = nil
ProtoPoolAnyValue_KvlistValue.Put(ov)
case *otlpcommon.AnyValue_BytesValue:
if UseProtoPooling.IsEnabled() {
ov.BytesValue = nil
ProtoPoolAnyValue_BytesValue.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolAnyValue.Put(orig)
}
}
func CopyAnyValue(dest, src *otlpcommon.AnyValue) {
// If copying to same object, just return.
if src == dest {
return
}
switch t := src.Value.(type) {
case *otlpcommon.AnyValue_StringValue:
var ov *otlpcommon.AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
}
ov.StringValue = t.StringValue
dest.Value = ov
case *otlpcommon.AnyValue_BoolValue:
var ov *otlpcommon.AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
}
ov.BoolValue = t.BoolValue
dest.Value = ov
case *otlpcommon.AnyValue_IntValue:
var ov *otlpcommon.AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
}
ov.IntValue = t.IntValue
dest.Value = ov
case *otlpcommon.AnyValue_DoubleValue:
var ov *otlpcommon.AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
}
ov.DoubleValue = t.DoubleValue
dest.Value = ov
case *otlpcommon.AnyValue_ArrayValue:
var ov *otlpcommon.AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
CopyArrayValue(ov.ArrayValue, t.ArrayValue)
dest.Value = ov
case *otlpcommon.AnyValue_KvlistValue:
var ov *otlpcommon.AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
CopyKeyValueList(ov.KvlistValue, t.KvlistValue)
dest.Value = ov
case *otlpcommon.AnyValue_BytesValue:
var ov *otlpcommon.AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
}
ov.BytesValue = t.BytesValue
dest.Value = ov
}
}
func GenTestAnyValue() *otlpcommon.AnyValue {
orig := NewAnyValue()
orig.Value = &otlpcommon.AnyValue_BoolValue{BoolValue: true}
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONAnyValue(orig *otlpcommon.AnyValue, dest *json.Stream) {
dest.WriteObjectStart()
switch orig := orig.Value.(type) {
case *otlpcommon.AnyValue_StringValue:
dest.WriteObjectField("stringValue")
dest.WriteString(orig.StringValue)
case *otlpcommon.AnyValue_BoolValue:
dest.WriteObjectField("boolValue")
dest.WriteBool(orig.BoolValue)
case *otlpcommon.AnyValue_IntValue:
dest.WriteObjectField("intValue")
dest.WriteInt64(orig.IntValue)
case *otlpcommon.AnyValue_DoubleValue:
dest.WriteObjectField("doubleValue")
dest.WriteFloat64(orig.DoubleValue)
case *otlpcommon.AnyValue_ArrayValue:
if orig.ArrayValue != nil {
dest.WriteObjectField("arrayValue")
MarshalJSONArrayValue(orig.ArrayValue, dest)
}
case *otlpcommon.AnyValue_KvlistValue:
if orig.KvlistValue != nil {
dest.WriteObjectField("kvlistValue")
MarshalJSONKeyValueList(orig.KvlistValue, dest)
}
case *otlpcommon.AnyValue_BytesValue:
dest.WriteObjectField("bytesValue")
dest.WriteBytes(orig.BytesValue)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONValue unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONAnyValue(orig *otlpcommon.AnyValue, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "stringValue", "string_value":
{
var ov *otlpcommon.AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
}
ov.StringValue = iter.ReadString()
orig.Value = ov
}
case "boolValue", "bool_value":
{
var ov *otlpcommon.AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
}
ov.BoolValue = iter.ReadBool()
orig.Value = ov
}
case "intValue", "int_value":
{
var ov *otlpcommon.AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
}
ov.IntValue = iter.ReadInt64()
orig.Value = ov
}
case "doubleValue", "double_value":
{
var ov *otlpcommon.AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
}
ov.DoubleValue = iter.ReadFloat64()
orig.Value = ov
}
case "arrayValue", "array_value":
{
var ov *otlpcommon.AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
UnmarshalJSONArrayValue(ov.ArrayValue, iter)
orig.Value = ov
}
case "kvlistValue", "kvlist_value":
{
var ov *otlpcommon.AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
UnmarshalJSONKeyValueList(ov.KvlistValue, iter)
orig.Value = ov
}
case "bytesValue", "bytes_value":
{
var ov *otlpcommon.AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
}
ov.BytesValue = iter.ReadBytes()
orig.Value = ov
}
default:
iter.Skip()
}
}
}
func SizeProtoAnyValue(orig *otlpcommon.AnyValue) int {
var n int
var l int
_ = l
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *otlpcommon.AnyValue_StringValue:
l = len(orig.StringValue)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpcommon.AnyValue_BoolValue:
n += 2
case *otlpcommon.AnyValue_IntValue:
n += 1 + proto.Sov(uint64(orig.IntValue))
case *otlpcommon.AnyValue_DoubleValue:
n += 9
case *otlpcommon.AnyValue_ArrayValue:
l = SizeProtoArrayValue(orig.ArrayValue)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpcommon.AnyValue_KvlistValue:
l = SizeProtoKeyValueList(orig.KvlistValue)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpcommon.AnyValue_BytesValue:
l = len(orig.BytesValue)
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoAnyValue(orig *otlpcommon.AnyValue, buf []byte) int {
pos := len(buf)
var l int
_ = l
switch orig := orig.Value.(type) {
case *otlpcommon.AnyValue_StringValue:
l = len(orig.StringValue)
pos -= l
copy(buf[pos:], orig.StringValue)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
case *otlpcommon.AnyValue_BoolValue:
pos--
if orig.BoolValue {
buf[pos] = 1
} else {
buf[pos] = 0
}
pos--
buf[pos] = 0x10
case *otlpcommon.AnyValue_IntValue:
pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue))
pos--
buf[pos] = 0x18
case *otlpcommon.AnyValue_DoubleValue:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue))
pos--
buf[pos] = 0x21
case *otlpcommon.AnyValue_ArrayValue:
l = MarshalProtoArrayValue(orig.ArrayValue, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
case *otlpcommon.AnyValue_KvlistValue:
l = MarshalProtoKeyValueList(orig.KvlistValue, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
case *otlpcommon.AnyValue_BytesValue:
l = len(orig.BytesValue)
pos -= l
copy(buf[pos:], orig.BytesValue)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
return len(buf) - pos
}
func UnmarshalProtoAnyValue(orig *otlpcommon.AnyValue, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpcommon.AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
}
ov.StringValue = string(buf[startPos:pos])
orig.Value = ov
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
var ov *otlpcommon.AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
}
ov.BoolValue = num != 0
orig.Value = ov
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
var ov *otlpcommon.AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
}
ov.IntValue = int64(num)
orig.Value = ov
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpcommon.AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
}
ov.DoubleValue = math.Float64frombits(num)
orig.Value = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpcommon.AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
err = UnmarshalProtoArrayValue(ov.ArrayValue, buf[startPos:pos])
if err != nil {
return err
}
orig.Value = ov
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpcommon.AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
err = UnmarshalProtoKeyValueList(ov.KvlistValue, buf[startPos:pos])
if err != nil {
return err
}
orig.Value = ov
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpcommon.AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &otlpcommon.AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
}
if length != 0 {
ov.BytesValue = make([]byte, length)
copy(ov.BytesValue, buf[startPos:pos])
}
orig.Value = ov
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
type SliceWrapper struct {
orig *[]otlpcommon.AnyValue
state *State
}
func GetSliceOrig(ms SliceWrapper) *[]otlpcommon.AnyValue {
return ms.orig
}
func GetSliceState(ms SliceWrapper) *State {
return ms.state
}
func NewSliceWrapper(orig *[]otlpcommon.AnyValue, state *State) SliceWrapper {
return SliceWrapper{orig: orig, state: state}
}
func GenTestSliceWrapper() SliceWrapper {
orig := GenTestAnyValueSlice()
return NewSliceWrapper(&orig, NewState())
}
func CopyAnyValueSlice(dest, src []otlpcommon.AnyValue) []otlpcommon.AnyValue {
var newDest []otlpcommon.AnyValue
if cap(dest) < len(src) {
newDest = make([]otlpcommon.AnyValue, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteAnyValue(&dest[i], false)
}
}
for i := range src {
CopyAnyValue(&newDest[i], &src[i])
}
return newDest
}
func GenTestAnyValueSlice() []otlpcommon.AnyValue {
orig := make([]otlpcommon.AnyValue, 5)
orig[1] = *GenTestAnyValue()
orig[3] = *GenTestAnyValue()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolArrayValue = sync.Pool{
New: func() any {
return &otlpcommon.ArrayValue{}
},
}
)
func NewArrayValue() *otlpcommon.ArrayValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.ArrayValue{}
}
return protoPoolArrayValue.Get().(*otlpcommon.ArrayValue)
}
func DeleteArrayValue(orig *otlpcommon.ArrayValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Values {
DeleteAnyValue(&orig.Values[i], false)
}
orig.Reset()
if nullable {
protoPoolArrayValue.Put(orig)
}
}
func CopyArrayValue(dest, src *otlpcommon.ArrayValue) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Values = CopyAnyValueSlice(dest.Values, src.Values)
}
func GenTestArrayValue() *otlpcommon.ArrayValue {
orig := NewArrayValue()
orig.Values = GenTestAnyValueSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONArrayValue(orig *otlpcommon.ArrayValue, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
MarshalJSONAnyValue(&orig.Values[0], dest)
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
MarshalJSONAnyValue(&orig.Values[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONArrayValue unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONArrayValue(orig *otlpcommon.ArrayValue, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, otlpcommon.AnyValue{})
UnmarshalJSONAnyValue(&orig.Values[len(orig.Values)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoArrayValue(orig *otlpcommon.ArrayValue) int {
var n int
var l int
_ = l
for i := range orig.Values {
l = SizeProtoAnyValue(&orig.Values[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoArrayValue(orig *otlpcommon.ArrayValue, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Values) - 1; i >= 0; i-- {
l = MarshalProtoAnyValue(&orig.Values[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoArrayValue(orig *otlpcommon.ArrayValue, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Values = append(orig.Values, otlpcommon.AnyValue{})
err = UnmarshalProtoAnyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type ByteSliceWrapper struct {
orig *[]byte
state *State
}
func GetByteSliceOrig(ms ByteSliceWrapper) *[]byte {
return ms.orig
}
func GetByteSliceState(ms ByteSliceWrapper) *State {
return ms.state
}
func NewByteSliceWrapper(orig *[]byte, state *State) ByteSliceWrapper {
return ByteSliceWrapper{orig: orig, state: state}
}
func GenTestByteSliceWrapper() ByteSliceWrapper {
orig := GenTestByteSlice()
return NewByteSliceWrapper(&orig, NewState())
}
func CopyByteSlice(dst, src []byte) []byte {
return append(dst[:0], src...)
}
func GenTestByteSlice() []byte {
return []byte{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type EntityRefWrapper struct {
orig *otlpcommon.EntityRef
state *State
}
func GetEntityRefOrig(ms EntityRefWrapper) *otlpcommon.EntityRef {
return ms.orig
}
func GetEntityRefState(ms EntityRefWrapper) *State {
return ms.state
}
func NewEntityRefWrapper(orig *otlpcommon.EntityRef, state *State) EntityRefWrapper {
return EntityRefWrapper{orig: orig, state: state}
}
func GenTestEntityRefWrapper() EntityRefWrapper {
orig := GenTestEntityRef()
return NewEntityRefWrapper(orig, NewState())
}
var (
protoPoolEntityRef = sync.Pool{
New: func() any {
return &otlpcommon.EntityRef{}
},
}
)
func NewEntityRef() *otlpcommon.EntityRef {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.EntityRef{}
}
return protoPoolEntityRef.Get().(*otlpcommon.EntityRef)
}
func DeleteEntityRef(orig *otlpcommon.EntityRef, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolEntityRef.Put(orig)
}
}
func CopyEntityRef(dest, src *otlpcommon.EntityRef) {
// If copying to same object, just return.
if src == dest {
return
}
dest.SchemaUrl = src.SchemaUrl
dest.Type = src.Type
dest.IdKeys = CopyStringSlice(dest.IdKeys, src.IdKeys)
dest.DescriptionKeys = CopyStringSlice(dest.DescriptionKeys, src.DescriptionKeys)
}
func GenTestEntityRef() *otlpcommon.EntityRef {
orig := NewEntityRef()
orig.SchemaUrl = "test_schemaurl"
orig.Type = "test_type"
orig.IdKeys = GenTestStringSlice()
orig.DescriptionKeys = GenTestStringSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONEntityRef(orig *otlpcommon.EntityRef, dest *json.Stream) {
dest.WriteObjectStart()
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
if orig.Type != "" {
dest.WriteObjectField("type")
dest.WriteString(orig.Type)
}
if len(orig.IdKeys) > 0 {
dest.WriteObjectField("idKeys")
dest.WriteArrayStart()
dest.WriteString(orig.IdKeys[0])
for i := 1; i < len(orig.IdKeys); i++ {
dest.WriteMore()
dest.WriteString(orig.IdKeys[i])
}
dest.WriteArrayEnd()
}
if len(orig.DescriptionKeys) > 0 {
dest.WriteObjectField("descriptionKeys")
dest.WriteArrayStart()
dest.WriteString(orig.DescriptionKeys[0])
for i := 1; i < len(orig.DescriptionKeys); i++ {
dest.WriteMore()
dest.WriteString(orig.DescriptionKeys[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONEntityRef unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONEntityRef(orig *otlpcommon.EntityRef, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
case "type":
orig.Type = iter.ReadString()
case "idKeys", "id_keys":
for iter.ReadArray() {
orig.IdKeys = append(orig.IdKeys, iter.ReadString())
}
case "descriptionKeys", "description_keys":
for iter.ReadArray() {
orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString())
}
default:
iter.Skip()
}
}
}
func SizeProtoEntityRef(orig *otlpcommon.EntityRef) int {
var n int
var l int
_ = l
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Type)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.IdKeys {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.DescriptionKeys {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoEntityRef(orig *otlpcommon.EntityRef, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Type)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Type)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.IdKeys) - 1; i >= 0; i-- {
l = len(orig.IdKeys[i])
pos -= l
copy(buf[pos:], orig.IdKeys[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- {
l = len(orig.DescriptionKeys[i])
pos -= l
copy(buf[pos:], orig.DescriptionKeys[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
return len(buf) - pos
}
func UnmarshalProtoEntityRef(orig *otlpcommon.EntityRef, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Type = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos]))
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos]))
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
type EntityRefSliceWrapper struct {
orig *[]*otlpcommon.EntityRef
state *State
}
func GetEntityRefSliceOrig(ms EntityRefSliceWrapper) *[]*otlpcommon.EntityRef {
return ms.orig
}
func GetEntityRefSliceState(ms EntityRefSliceWrapper) *State {
return ms.state
}
func NewEntityRefSliceWrapper(orig *[]*otlpcommon.EntityRef, state *State) EntityRefSliceWrapper {
return EntityRefSliceWrapper{orig: orig, state: state}
}
func GenTestEntityRefSliceWrapper() EntityRefSliceWrapper {
orig := GenTestEntityRefSlice()
return NewEntityRefSliceWrapper(&orig, NewState())
}
func CopyEntityRefSlice(dest, src []*otlpcommon.EntityRef) []*otlpcommon.EntityRef {
var newDest []*otlpcommon.EntityRef
if cap(dest) < len(src) {
newDest = make([]*otlpcommon.EntityRef, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewEntityRef()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteEntityRef(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewEntityRef()
}
}
for i := range src {
CopyEntityRef(newDest[i], src[i])
}
return newDest
}
func GenTestEntityRefSlice() []*otlpcommon.EntityRef {
orig := make([]*otlpcommon.EntityRef, 5)
orig[0] = NewEntityRef()
orig[1] = GenTestEntityRef()
orig[2] = NewEntityRef()
orig[3] = GenTestEntityRef()
orig[4] = NewEntityRef()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExemplar = sync.Pool{
New: func() any {
return &otlpmetrics.Exemplar{}
},
}
ProtoPoolExemplar_AsDouble = sync.Pool{
New: func() any {
return &otlpmetrics.Exemplar_AsDouble{}
},
}
ProtoPoolExemplar_AsInt = sync.Pool{
New: func() any {
return &otlpmetrics.Exemplar_AsInt{}
},
}
)
func NewExemplar() *otlpmetrics.Exemplar {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Exemplar{}
}
return protoPoolExemplar.Get().(*otlpmetrics.Exemplar)
}
func DeleteExemplar(orig *otlpmetrics.Exemplar, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.FilteredAttributes {
DeleteKeyValue(&orig.FilteredAttributes[i], false)
}
switch ov := orig.Value.(type) {
case *otlpmetrics.Exemplar_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolExemplar_AsDouble.Put(ov)
}
case *otlpmetrics.Exemplar_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolExemplar_AsInt.Put(ov)
}
}
DeleteSpanID(&orig.SpanId, false)
DeleteTraceID(&orig.TraceId, false)
orig.Reset()
if nullable {
protoPoolExemplar.Put(orig)
}
}
func CopyExemplar(dest, src *otlpmetrics.Exemplar) {
// If copying to same object, just return.
if src == dest {
return
}
dest.FilteredAttributes = CopyKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes)
dest.TimeUnixNano = src.TimeUnixNano
switch t := src.Value.(type) {
case *otlpmetrics.Exemplar_AsDouble:
var ov *otlpmetrics.Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
case *otlpmetrics.Exemplar_AsInt:
var ov *otlpmetrics.Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
}
dest.SpanId = src.SpanId
dest.TraceId = src.TraceId
}
func GenTestExemplar() *otlpmetrics.Exemplar {
orig := NewExemplar()
orig.FilteredAttributes = GenTestKeyValueSlice()
orig.TimeUnixNano = 1234567890
orig.Value = &otlpmetrics.Exemplar_AsInt{AsInt: int64(13)}
orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExemplar(orig *otlpmetrics.Exemplar, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.FilteredAttributes) > 0 {
dest.WriteObjectField("filteredAttributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.FilteredAttributes[0], dest)
for i := 1; i < len(orig.FilteredAttributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.FilteredAttributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
case *otlpmetrics.Exemplar_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
case *otlpmetrics.Exemplar_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
if orig.SpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("spanId")
MarshalJSONSpanID(&orig.SpanId, dest)
}
if orig.TraceId != data.TraceID([16]byte{}) {
dest.WriteObjectField("traceId")
MarshalJSONTraceID(&orig.TraceId, dest)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExemplar unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExemplar(orig *otlpmetrics.Exemplar, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "filteredAttributes", "filtered_attributes":
for iter.ReadArray() {
orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], iter)
}
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "asDouble", "as_double":
{
var ov *otlpmetrics.Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
}
case "asInt", "as_int":
{
var ov *otlpmetrics.Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
}
case "spanId", "span_id":
UnmarshalJSONSpanID(&orig.SpanId, iter)
case "traceId", "trace_id":
UnmarshalJSONTraceID(&orig.TraceId, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExemplar(orig *otlpmetrics.Exemplar) int {
var n int
var l int
_ = l
for i := range orig.FilteredAttributes {
l = SizeProtoKeyValue(&orig.FilteredAttributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.TimeUnixNano != 0 {
n += 9
}
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *otlpmetrics.Exemplar_AsDouble:
n += 9
case *otlpmetrics.Exemplar_AsInt:
n += 9
}
l = SizeProtoSpanID(&orig.SpanId)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoTraceID(&orig.TraceId)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExemplar(orig *otlpmetrics.Exemplar, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.FilteredAttributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x11
}
switch orig := orig.Value.(type) {
case *otlpmetrics.Exemplar_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x19
case *otlpmetrics.Exemplar_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
buf[pos] = 0x31
}
l = MarshalProtoSpanID(&orig.SpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
l = MarshalProtoTraceID(&orig.TraceId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
return len(buf) - pos
}
func UnmarshalProtoExemplar(orig *otlpmetrics.Exemplar, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.FilteredAttributes = append(orig.FilteredAttributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.FilteredAttributes[len(orig.FilteredAttributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
case 6:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.SpanId, buf[startPos:pos])
if err != nil {
return err
}
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoTraceID(&orig.TraceId, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyExemplarSlice(dest, src []otlpmetrics.Exemplar) []otlpmetrics.Exemplar {
var newDest []otlpmetrics.Exemplar
if cap(dest) < len(src) {
newDest = make([]otlpmetrics.Exemplar, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExemplar(&dest[i], false)
}
}
for i := range src {
CopyExemplar(&newDest[i], &src[i])
}
return newDest
}
func GenTestExemplarSlice() []otlpmetrics.Exemplar {
orig := make([]otlpmetrics.Exemplar, 5)
orig[1] = *GenTestExemplar()
orig[3] = *GenTestExemplar()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExponentialHistogram = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogram{}
},
}
)
func NewExponentialHistogram() *otlpmetrics.ExponentialHistogram {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.ExponentialHistogram{}
}
return protoPoolExponentialHistogram.Get().(*otlpmetrics.ExponentialHistogram)
}
func DeleteExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteExponentialHistogramDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolExponentialHistogram.Put(orig)
}
}
func CopyExponentialHistogram(dest, src *otlpmetrics.ExponentialHistogram) {
// If copying to same object, just return.
if src == dest {
return
}
dest.DataPoints = CopyExponentialHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
}
func GenTestExponentialHistogram() *otlpmetrics.ExponentialHistogram {
orig := NewExponentialHistogram()
orig.DataPoints = GenTestExponentialHistogramDataPointSlice()
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
MarshalJSONExponentialHistogramDataPoint(orig.DataPoints[0], dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
MarshalJSONExponentialHistogramDataPoint(orig.DataPoints[i], dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExponentialHistogram unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
UnmarshalJSONExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
default:
iter.Skip()
}
}
}
func SizeProtoExponentialHistogram(orig *otlpmetrics.ExponentialHistogram) int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = SizeProtoExponentialHistogramDataPoint(orig.DataPoints[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
return n
}
func MarshalProtoExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = MarshalProtoExponentialHistogramDataPoint(orig.DataPoints[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
return len(buf) - pos
}
func UnmarshalProtoExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
err = UnmarshalProtoExponentialHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExponentialHistogramDataPoint = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogramDataPoint{}
},
}
ProtoPoolExponentialHistogramDataPoint_Sum = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
},
}
ProtoPoolExponentialHistogramDataPoint_Min = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogramDataPoint_Min{}
},
}
ProtoPoolExponentialHistogramDataPoint_Max = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogramDataPoint_Max{}
},
}
)
func NewExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.ExponentialHistogramDataPoint{}
}
return protoPoolExponentialHistogramDataPoint.Get().(*otlpmetrics.ExponentialHistogramDataPoint)
}
func DeleteExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
case *otlpmetrics.ExponentialHistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolExponentialHistogramDataPoint_Sum.Put(ov)
}
}
DeleteExponentialHistogramDataPoint_Buckets(&orig.Positive, false)
DeleteExponentialHistogramDataPoint_Buckets(&orig.Negative, false)
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
case *otlpmetrics.ExponentialHistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolExponentialHistogramDataPoint_Min.Put(ov)
}
}
switch ov := orig.Max_.(type) {
case *otlpmetrics.ExponentialHistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolExponentialHistogramDataPoint_Max.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolExponentialHistogramDataPoint.Put(orig)
}
}
func CopyExponentialHistogramDataPoint(dest, src *otlpmetrics.ExponentialHistogramDataPoint) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
if srcSum, ok := src.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
destSum, ok := dest.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
if !ok {
destSum = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
dest.Sum_ = destSum
}
destSum.Sum = srcSum.Sum
} else {
dest.Sum_ = nil
}
dest.Scale = src.Scale
dest.ZeroCount = src.ZeroCount
CopyExponentialHistogramDataPoint_Buckets(&dest.Positive, &src.Positive)
CopyExponentialHistogramDataPoint_Buckets(&dest.Negative, &src.Negative)
dest.Flags = src.Flags
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
if srcMin, ok := src.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
destMin, ok := dest.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min)
if !ok {
destMin = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
dest.Min_ = destMin
}
destMin.Min = srcMin.Min
} else {
dest.Min_ = nil
}
if srcMax, ok := src.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
destMax, ok := dest.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max)
if !ok {
destMax = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
dest.Max_ = destMax
}
destMax.Max = srcMax.Max
} else {
dest.Max_ = nil
}
dest.ZeroThreshold = src.ZeroThreshold
}
func GenTestExponentialHistogramDataPoint() *otlpmetrics.ExponentialHistogramDataPoint {
orig := NewExponentialHistogramDataPoint()
orig.Attributes = GenTestKeyValueSlice()
orig.StartTimeUnixNano = 1234567890
orig.TimeUnixNano = 1234567890
orig.Count = uint64(13)
orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)}
orig.Scale = int32(13)
orig.ZeroCount = uint64(13)
orig.Positive = *GenTestExponentialHistogramDataPoint_Buckets()
orig.Negative = *GenTestExponentialHistogramDataPoint_Buckets()
orig.Flags = 1
orig.Exemplars = GenTestExemplarSlice()
orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)}
orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)}
orig.ZeroThreshold = float64(3.1415926)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if orig.Scale != int32(0) {
dest.WriteObjectField("scale")
dest.WriteInt32(orig.Scale)
}
if orig.ZeroCount != uint64(0) {
dest.WriteObjectField("zeroCount")
dest.WriteUint64(orig.ZeroCount)
}
dest.WriteObjectField("positive")
MarshalJSONExponentialHistogramDataPoint_Buckets(&orig.Positive, dest)
dest.WriteObjectField("negative")
MarshalJSONExponentialHistogramDataPoint_Buckets(&orig.Negative, dest)
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
MarshalJSONExemplar(&orig.Exemplars[0], dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
MarshalJSONExemplar(&orig.Exemplars[i], dest)
}
dest.WriteArrayEnd()
}
if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
if orig.ZeroThreshold != float64(0) {
dest.WriteObjectField("zeroThreshold")
dest.WriteFloat64(orig.ZeroThreshold)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExponentialHistogramDataPoint unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
{
var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
}
case "scale":
orig.Scale = iter.ReadInt32()
case "zeroCount", "zero_count":
orig.ZeroCount = iter.ReadUint64()
case "positive":
UnmarshalJSONExponentialHistogramDataPoint_Buckets(&orig.Positive, iter)
case "negative":
UnmarshalJSONExponentialHistogramDataPoint_Buckets(&orig.Negative, iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
UnmarshalJSONExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
}
case "min":
{
var ov *otlpmetrics.ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
}
case "max":
{
var ov *otlpmetrics.ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
}
case "zeroThreshold", "zero_threshold":
orig.ZeroThreshold = iter.ReadFloat64()
default:
iter.Skip()
}
}
}
func SizeProtoExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint) int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
if orig.Scale != 0 {
n += 1 + proto.Soz(uint64(orig.Scale))
}
if orig.ZeroCount != 0 {
n += 9
}
l = SizeProtoExponentialHistogramDataPoint_Buckets(&orig.Positive)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoExponentialHistogramDataPoint_Buckets(&orig.Negative)
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
for i := range orig.Exemplars {
l = SizeProtoExemplar(&orig.Exemplars[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
_ = orig
n += 9
}
if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
_ = orig
n += 9
}
if orig.ZeroThreshold != 0 {
n += 9
}
return n
}
func MarshalProtoExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig, ok := orig.Sum_.(*otlpmetrics.ExponentialHistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
if orig.Scale != 0 {
pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Scale)<<1)^uint32(orig.Scale>>31)))
pos--
buf[pos] = 0x30
}
if orig.ZeroCount != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ZeroCount))
pos--
buf[pos] = 0x39
}
l = MarshalProtoExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
l = MarshalProtoExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x50
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = MarshalProtoExemplar(&orig.Exemplars[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
if orig, ok := orig.Min_.(*otlpmetrics.ExponentialHistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x61
}
if orig, ok := orig.Max_.(*otlpmetrics.ExponentialHistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
buf[pos] = 0x69
}
if orig.ZeroThreshold != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ZeroThreshold))
pos--
buf[pos] = 0x71
}
return len(buf) - pos
}
func UnmarshalProtoExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Sum{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Scale = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
case 7:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ZeroCount = uint64(num)
case 8:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExponentialHistogramDataPoint_Buckets(&orig.Positive, buf[startPos:pos])
if err != nil {
return err
}
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExponentialHistogramDataPoint_Buckets(&orig.Negative, buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
err = UnmarshalProtoExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Min{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
case 13:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.ExponentialHistogramDataPoint_Max{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
case 14:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ZeroThreshold = math.Float64frombits(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExponentialHistogramDataPoint_Buckets = sync.Pool{
New: func() any {
return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{}
},
}
)
func NewExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.ExponentialHistogramDataPoint_Buckets{}
}
return protoPoolExponentialHistogramDataPoint_Buckets.Get().(*otlpmetrics.ExponentialHistogramDataPoint_Buckets)
}
func DeleteExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExponentialHistogramDataPoint_Buckets.Put(orig)
}
}
func CopyExponentialHistogramDataPoint_Buckets(dest, src *otlpmetrics.ExponentialHistogramDataPoint_Buckets) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Offset = src.Offset
dest.BucketCounts = CopyUint64Slice(dest.BucketCounts, src.BucketCounts)
}
func GenTestExponentialHistogramDataPoint_Buckets() *otlpmetrics.ExponentialHistogramDataPoint_Buckets {
orig := NewExponentialHistogramDataPoint_Buckets()
orig.Offset = int32(13)
orig.BucketCounts = GenTestUint64Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Offset != int32(0) {
dest.WriteObjectField("offset")
dest.WriteInt32(orig.Offset)
}
if len(orig.BucketCounts) > 0 {
dest.WriteObjectField("bucketCounts")
dest.WriteArrayStart()
dest.WriteUint64(orig.BucketCounts[0])
for i := 1; i < len(orig.BucketCounts); i++ {
dest.WriteMore()
dest.WriteUint64(orig.BucketCounts[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExponentialHistogramDataPointBuckets unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "offset":
orig.Offset = iter.ReadInt32()
case "bucketCounts", "bucket_counts":
for iter.ReadArray() {
orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
}
default:
iter.Skip()
}
}
}
func SizeProtoExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets) int {
var n int
var l int
_ = l
if orig.Offset != 0 {
n += 1 + proto.Soz(uint64(orig.Offset))
}
if len(orig.BucketCounts) > 0 {
l = 0
for _, e := range orig.BucketCounts {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.Offset != 0 {
pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31)))
pos--
buf[pos] = 0x8
}
l = len(orig.BucketCounts)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func UnmarshalProtoExponentialHistogramDataPoint_Buckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
case 2:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyExponentialHistogramDataPointSlice(dest, src []*otlpmetrics.ExponentialHistogramDataPoint) []*otlpmetrics.ExponentialHistogramDataPoint {
var newDest []*otlpmetrics.ExponentialHistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.ExponentialHistogramDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogramDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPoint()
}
}
for i := range src {
CopyExponentialHistogramDataPoint(newDest[i], src[i])
}
return newDest
}
func GenTestExponentialHistogramDataPointSlice() []*otlpmetrics.ExponentialHistogramDataPoint {
orig := make([]*otlpmetrics.ExponentialHistogramDataPoint, 5)
orig[0] = NewExponentialHistogramDataPoint()
orig[1] = GenTestExponentialHistogramDataPoint()
orig[2] = NewExponentialHistogramDataPoint()
orig[3] = GenTestExponentialHistogramDataPoint()
orig[4] = NewExponentialHistogramDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportLogsPartialSuccess = sync.Pool{
New: func() any {
return &otlpcollectorlogs.ExportLogsPartialSuccess{}
},
}
)
func NewExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorlogs.ExportLogsPartialSuccess{}
}
return protoPoolExportLogsPartialSuccess.Get().(*otlpcollectorlogs.ExportLogsPartialSuccess)
}
func DeleteExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportLogsPartialSuccess.Put(orig)
}
}
func CopyExportLogsPartialSuccess(dest, src *otlpcollectorlogs.ExportLogsPartialSuccess) {
// If copying to same object, just return.
if src == dest {
return
}
dest.RejectedLogRecords = src.RejectedLogRecords
dest.ErrorMessage = src.ErrorMessage
}
func GenTestExportLogsPartialSuccess() *otlpcollectorlogs.ExportLogsPartialSuccess {
orig := NewExportLogsPartialSuccess()
orig.RejectedLogRecords = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedLogRecords != int64(0) {
dest.WriteObjectField("rejectedLogRecords")
dest.WriteInt64(orig.RejectedLogRecords)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedLogRecords", "rejected_log_records":
orig.RejectedLogRecords = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess) int {
var n int
var l int
_ = l
if orig.RejectedLogRecords != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedLogRecords))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedLogRecords != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func UnmarshalProtoExportLogsPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedLogRecords = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type LogsWrapper struct {
orig *otlpcollectorlogs.ExportLogsServiceRequest
state *State
}
func GetLogsOrig(ms LogsWrapper) *otlpcollectorlogs.ExportLogsServiceRequest {
return ms.orig
}
func GetLogsState(ms LogsWrapper) *State {
return ms.state
}
func NewLogsWrapper(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *State) LogsWrapper {
return LogsWrapper{orig: orig, state: state}
}
func GenTestLogsWrapper() LogsWrapper {
orig := GenTestExportLogsServiceRequest()
return NewLogsWrapper(orig, NewState())
}
var (
protoPoolExportLogsServiceRequest = sync.Pool{
New: func() any {
return &otlpcollectorlogs.ExportLogsServiceRequest{}
},
}
)
func NewExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorlogs.ExportLogsServiceRequest{}
}
return protoPoolExportLogsServiceRequest.Get().(*otlpcollectorlogs.ExportLogsServiceRequest)
}
func DeleteExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceLogs {
DeleteResourceLogs(orig.ResourceLogs[i], true)
}
orig.Reset()
if nullable {
protoPoolExportLogsServiceRequest.Put(orig)
}
}
func CopyExportLogsServiceRequest(dest, src *otlpcollectorlogs.ExportLogsServiceRequest) {
// If copying to same object, just return.
if src == dest {
return
}
dest.ResourceLogs = CopyResourceLogsSlice(dest.ResourceLogs, src.ResourceLogs)
}
func GenTestExportLogsServiceRequest() *otlpcollectorlogs.ExportLogsServiceRequest {
orig := NewExportLogsServiceRequest()
orig.ResourceLogs = GenTestResourceLogsSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceLogs) > 0 {
dest.WriteObjectField("resourceLogs")
dest.WriteArrayStart()
MarshalJSONResourceLogs(orig.ResourceLogs[0], dest)
for i := 1; i < len(orig.ResourceLogs); i++ {
dest.WriteMore()
MarshalJSONResourceLogs(orig.ResourceLogs[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONLogs unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceLogs", "resource_logs":
for iter.ReadArray() {
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
UnmarshalJSONResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest) int {
var n int
var l int
_ = l
for i := range orig.ResourceLogs {
l = SizeProtoResourceLogs(orig.ResourceLogs[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
l = MarshalProtoResourceLogs(orig.ResourceLogs[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoExportLogsServiceRequest(orig *otlpcollectorlogs.ExportLogsServiceRequest, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
err = UnmarshalProtoResourceLogs(orig.ResourceLogs[len(orig.ResourceLogs)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportLogsServiceResponse = sync.Pool{
New: func() any {
return &otlpcollectorlogs.ExportLogsServiceResponse{}
},
}
)
func NewExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorlogs.ExportLogsServiceResponse{}
}
return protoPoolExportLogsServiceResponse.Get().(*otlpcollectorlogs.ExportLogsServiceResponse)
}
func DeleteExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportLogsPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportLogsServiceResponse.Put(orig)
}
}
func CopyExportLogsServiceResponse(dest, src *otlpcollectorlogs.ExportLogsServiceResponse) {
// If copying to same object, just return.
if src == dest {
return
}
CopyExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
}
func GenTestExportLogsServiceResponse() *otlpcollectorlogs.ExportLogsServiceResponse {
orig := NewExportLogsServiceResponse()
orig.PartialSuccess = *GenTestExportLogsPartialSuccess()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
MarshalJSONExportLogsPartialSuccess(&orig.PartialSuccess, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONExportResponse unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
UnmarshalJSONExportLogsPartialSuccess(&orig.PartialSuccess, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse) int {
var n int
var l int
_ = l
l = SizeProtoExportLogsPartialSuccess(&orig.PartialSuccess)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoExportLogsPartialSuccess(&orig.PartialSuccess, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func UnmarshalProtoExportLogsServiceResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExportLogsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportMetricsPartialSuccess = sync.Pool{
New: func() any {
return &otlpcollectormetrics.ExportMetricsPartialSuccess{}
},
}
)
func NewExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectormetrics.ExportMetricsPartialSuccess{}
}
return protoPoolExportMetricsPartialSuccess.Get().(*otlpcollectormetrics.ExportMetricsPartialSuccess)
}
func DeleteExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportMetricsPartialSuccess.Put(orig)
}
}
func CopyExportMetricsPartialSuccess(dest, src *otlpcollectormetrics.ExportMetricsPartialSuccess) {
// If copying to same object, just return.
if src == dest {
return
}
dest.RejectedDataPoints = src.RejectedDataPoints
dest.ErrorMessage = src.ErrorMessage
}
func GenTestExportMetricsPartialSuccess() *otlpcollectormetrics.ExportMetricsPartialSuccess {
orig := NewExportMetricsPartialSuccess()
orig.RejectedDataPoints = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedDataPoints != int64(0) {
dest.WriteObjectField("rejectedDataPoints")
dest.WriteInt64(orig.RejectedDataPoints)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedDataPoints", "rejected_data_points":
orig.RejectedDataPoints = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess) int {
var n int
var l int
_ = l
if orig.RejectedDataPoints != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedDataPoints))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedDataPoints != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func UnmarshalProtoExportMetricsPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedDataPoints = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type MetricsWrapper struct {
orig *otlpcollectormetrics.ExportMetricsServiceRequest
state *State
}
func GetMetricsOrig(ms MetricsWrapper) *otlpcollectormetrics.ExportMetricsServiceRequest {
return ms.orig
}
func GetMetricsState(ms MetricsWrapper) *State {
return ms.state
}
func NewMetricsWrapper(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *State) MetricsWrapper {
return MetricsWrapper{orig: orig, state: state}
}
func GenTestMetricsWrapper() MetricsWrapper {
orig := GenTestExportMetricsServiceRequest()
return NewMetricsWrapper(orig, NewState())
}
var (
protoPoolExportMetricsServiceRequest = sync.Pool{
New: func() any {
return &otlpcollectormetrics.ExportMetricsServiceRequest{}
},
}
)
func NewExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectormetrics.ExportMetricsServiceRequest{}
}
return protoPoolExportMetricsServiceRequest.Get().(*otlpcollectormetrics.ExportMetricsServiceRequest)
}
func DeleteExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceMetrics {
DeleteResourceMetrics(orig.ResourceMetrics[i], true)
}
orig.Reset()
if nullable {
protoPoolExportMetricsServiceRequest.Put(orig)
}
}
func CopyExportMetricsServiceRequest(dest, src *otlpcollectormetrics.ExportMetricsServiceRequest) {
// If copying to same object, just return.
if src == dest {
return
}
dest.ResourceMetrics = CopyResourceMetricsSlice(dest.ResourceMetrics, src.ResourceMetrics)
}
func GenTestExportMetricsServiceRequest() *otlpcollectormetrics.ExportMetricsServiceRequest {
orig := NewExportMetricsServiceRequest()
orig.ResourceMetrics = GenTestResourceMetricsSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceMetrics) > 0 {
dest.WriteObjectField("resourceMetrics")
dest.WriteArrayStart()
MarshalJSONResourceMetrics(orig.ResourceMetrics[0], dest)
for i := 1; i < len(orig.ResourceMetrics); i++ {
dest.WriteMore()
MarshalJSONResourceMetrics(orig.ResourceMetrics[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONMetrics unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceMetrics", "resource_metrics":
for iter.ReadArray() {
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
UnmarshalJSONResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest) int {
var n int
var l int
_ = l
for i := range orig.ResourceMetrics {
l = SizeProtoResourceMetrics(orig.ResourceMetrics[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
l = MarshalProtoResourceMetrics(orig.ResourceMetrics[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoExportMetricsServiceRequest(orig *otlpcollectormetrics.ExportMetricsServiceRequest, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
err = UnmarshalProtoResourceMetrics(orig.ResourceMetrics[len(orig.ResourceMetrics)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportMetricsServiceResponse = sync.Pool{
New: func() any {
return &otlpcollectormetrics.ExportMetricsServiceResponse{}
},
}
)
func NewExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectormetrics.ExportMetricsServiceResponse{}
}
return protoPoolExportMetricsServiceResponse.Get().(*otlpcollectormetrics.ExportMetricsServiceResponse)
}
func DeleteExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportMetricsPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportMetricsServiceResponse.Put(orig)
}
}
func CopyExportMetricsServiceResponse(dest, src *otlpcollectormetrics.ExportMetricsServiceResponse) {
// If copying to same object, just return.
if src == dest {
return
}
CopyExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
}
func GenTestExportMetricsServiceResponse() *otlpcollectormetrics.ExportMetricsServiceResponse {
orig := NewExportMetricsServiceResponse()
orig.PartialSuccess = *GenTestExportMetricsPartialSuccess()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
MarshalJSONExportMetricsPartialSuccess(&orig.PartialSuccess, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONExportResponse unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
UnmarshalJSONExportMetricsPartialSuccess(&orig.PartialSuccess, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse) int {
var n int
var l int
_ = l
l = SizeProtoExportMetricsPartialSuccess(&orig.PartialSuccess)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoExportMetricsPartialSuccess(&orig.PartialSuccess, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func UnmarshalProtoExportMetricsServiceResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExportMetricsPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportProfilesPartialSuccess = sync.Pool{
New: func() any {
return &otlpcollectorprofiles.ExportProfilesPartialSuccess{}
},
}
)
func NewExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorprofiles.ExportProfilesPartialSuccess{}
}
return protoPoolExportProfilesPartialSuccess.Get().(*otlpcollectorprofiles.ExportProfilesPartialSuccess)
}
func DeleteExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportProfilesPartialSuccess.Put(orig)
}
}
func CopyExportProfilesPartialSuccess(dest, src *otlpcollectorprofiles.ExportProfilesPartialSuccess) {
// If copying to same object, just return.
if src == dest {
return
}
dest.RejectedProfiles = src.RejectedProfiles
dest.ErrorMessage = src.ErrorMessage
}
func GenTestExportProfilesPartialSuccess() *otlpcollectorprofiles.ExportProfilesPartialSuccess {
orig := NewExportProfilesPartialSuccess()
orig.RejectedProfiles = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedProfiles != int64(0) {
dest.WriteObjectField("rejectedProfiles")
dest.WriteInt64(orig.RejectedProfiles)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedProfiles", "rejected_profiles":
orig.RejectedProfiles = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess) int {
var n int
var l int
_ = l
if orig.RejectedProfiles != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedProfiles))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedProfiles != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func UnmarshalProtoExportProfilesPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedProfiles = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type ProfilesWrapper struct {
orig *otlpcollectorprofiles.ExportProfilesServiceRequest
state *State
}
func GetProfilesOrig(ms ProfilesWrapper) *otlpcollectorprofiles.ExportProfilesServiceRequest {
return ms.orig
}
func GetProfilesState(ms ProfilesWrapper) *State {
return ms.state
}
func NewProfilesWrapper(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state *State) ProfilesWrapper {
return ProfilesWrapper{orig: orig, state: state}
}
func GenTestProfilesWrapper() ProfilesWrapper {
orig := GenTestExportProfilesServiceRequest()
return NewProfilesWrapper(orig, NewState())
}
var (
protoPoolExportProfilesServiceRequest = sync.Pool{
New: func() any {
return &otlpcollectorprofiles.ExportProfilesServiceRequest{}
},
}
)
func NewExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorprofiles.ExportProfilesServiceRequest{}
}
return protoPoolExportProfilesServiceRequest.Get().(*otlpcollectorprofiles.ExportProfilesServiceRequest)
}
func DeleteExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceProfiles {
DeleteResourceProfiles(orig.ResourceProfiles[i], true)
}
DeleteProfilesDictionary(&orig.Dictionary, false)
orig.Reset()
if nullable {
protoPoolExportProfilesServiceRequest.Put(orig)
}
}
func CopyExportProfilesServiceRequest(dest, src *otlpcollectorprofiles.ExportProfilesServiceRequest) {
// If copying to same object, just return.
if src == dest {
return
}
dest.ResourceProfiles = CopyResourceProfilesSlice(dest.ResourceProfiles, src.ResourceProfiles)
CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
}
func GenTestExportProfilesServiceRequest() *otlpcollectorprofiles.ExportProfilesServiceRequest {
orig := NewExportProfilesServiceRequest()
orig.ResourceProfiles = GenTestResourceProfilesSlice()
orig.Dictionary = *GenTestProfilesDictionary()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceProfiles) > 0 {
dest.WriteObjectField("resourceProfiles")
dest.WriteArrayStart()
MarshalJSONResourceProfiles(orig.ResourceProfiles[0], dest)
for i := 1; i < len(orig.ResourceProfiles); i++ {
dest.WriteMore()
MarshalJSONResourceProfiles(orig.ResourceProfiles[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectField("dictionary")
MarshalJSONProfilesDictionary(&orig.Dictionary, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONProfiles unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceProfiles", "resource_profiles":
for iter.ReadArray() {
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
UnmarshalJSONResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], iter)
}
case "dictionary":
UnmarshalJSONProfilesDictionary(&orig.Dictionary, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest) int {
var n int
var l int
_ = l
for i := range orig.ResourceProfiles {
l = SizeProtoResourceProfiles(orig.ResourceProfiles[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = SizeProtoProfilesDictionary(&orig.Dictionary)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
l = MarshalProtoResourceProfiles(orig.ResourceProfiles[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = MarshalProtoProfilesDictionary(&orig.Dictionary, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func UnmarshalProtoExportProfilesServiceRequest(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
err = UnmarshalProtoResourceProfiles(orig.ResourceProfiles[len(orig.ResourceProfiles)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoProfilesDictionary(&orig.Dictionary, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportProfilesServiceResponse = sync.Pool{
New: func() any {
return &otlpcollectorprofiles.ExportProfilesServiceResponse{}
},
}
)
func NewExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectorprofiles.ExportProfilesServiceResponse{}
}
return protoPoolExportProfilesServiceResponse.Get().(*otlpcollectorprofiles.ExportProfilesServiceResponse)
}
func DeleteExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportProfilesPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportProfilesServiceResponse.Put(orig)
}
}
func CopyExportProfilesServiceResponse(dest, src *otlpcollectorprofiles.ExportProfilesServiceResponse) {
// If copying to same object, just return.
if src == dest {
return
}
CopyExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
}
func GenTestExportProfilesServiceResponse() *otlpcollectorprofiles.ExportProfilesServiceResponse {
orig := NewExportProfilesServiceResponse()
orig.PartialSuccess = *GenTestExportProfilesPartialSuccess()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
MarshalJSONExportProfilesPartialSuccess(&orig.PartialSuccess, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONExportResponse unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
UnmarshalJSONExportProfilesPartialSuccess(&orig.PartialSuccess, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse) int {
var n int
var l int
_ = l
l = SizeProtoExportProfilesPartialSuccess(&orig.PartialSuccess)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoExportProfilesPartialSuccess(&orig.PartialSuccess, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func UnmarshalProtoExportProfilesServiceResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExportProfilesPartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportTracePartialSuccess = sync.Pool{
New: func() any {
return &otlpcollectortrace.ExportTracePartialSuccess{}
},
}
)
func NewExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectortrace.ExportTracePartialSuccess{}
}
return protoPoolExportTracePartialSuccess.Get().(*otlpcollectortrace.ExportTracePartialSuccess)
}
func DeleteExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportTracePartialSuccess.Put(orig)
}
}
func CopyExportTracePartialSuccess(dest, src *otlpcollectortrace.ExportTracePartialSuccess) {
// If copying to same object, just return.
if src == dest {
return
}
dest.RejectedSpans = src.RejectedSpans
dest.ErrorMessage = src.ErrorMessage
}
func GenTestExportTracePartialSuccess() *otlpcollectortrace.ExportTracePartialSuccess {
orig := NewExportTracePartialSuccess()
orig.RejectedSpans = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedSpans != int64(0) {
dest.WriteObjectField("rejectedSpans")
dest.WriteInt64(orig.RejectedSpans)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONExportPartialSuccess unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedSpans", "rejected_spans":
orig.RejectedSpans = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess) int {
var n int
var l int
_ = l
if orig.RejectedSpans != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedSpans))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedSpans != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func UnmarshalProtoExportTracePartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedSpans = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type TracesWrapper struct {
orig *otlpcollectortrace.ExportTraceServiceRequest
state *State
}
func GetTracesOrig(ms TracesWrapper) *otlpcollectortrace.ExportTraceServiceRequest {
return ms.orig
}
func GetTracesState(ms TracesWrapper) *State {
return ms.state
}
func NewTracesWrapper(orig *otlpcollectortrace.ExportTraceServiceRequest, state *State) TracesWrapper {
return TracesWrapper{orig: orig, state: state}
}
func GenTestTracesWrapper() TracesWrapper {
orig := GenTestExportTraceServiceRequest()
return NewTracesWrapper(orig, NewState())
}
var (
protoPoolExportTraceServiceRequest = sync.Pool{
New: func() any {
return &otlpcollectortrace.ExportTraceServiceRequest{}
},
}
)
func NewExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectortrace.ExportTraceServiceRequest{}
}
return protoPoolExportTraceServiceRequest.Get().(*otlpcollectortrace.ExportTraceServiceRequest)
}
func DeleteExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceSpans {
DeleteResourceSpans(orig.ResourceSpans[i], true)
}
orig.Reset()
if nullable {
protoPoolExportTraceServiceRequest.Put(orig)
}
}
func CopyExportTraceServiceRequest(dest, src *otlpcollectortrace.ExportTraceServiceRequest) {
// If copying to same object, just return.
if src == dest {
return
}
dest.ResourceSpans = CopyResourceSpansSlice(dest.ResourceSpans, src.ResourceSpans)
}
func GenTestExportTraceServiceRequest() *otlpcollectortrace.ExportTraceServiceRequest {
orig := NewExportTraceServiceRequest()
orig.ResourceSpans = GenTestResourceSpansSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceSpans) > 0 {
dest.WriteObjectField("resourceSpans")
dest.WriteArrayStart()
MarshalJSONResourceSpans(orig.ResourceSpans[0], dest)
for i := 1; i < len(orig.ResourceSpans); i++ {
dest.WriteMore()
MarshalJSONResourceSpans(orig.ResourceSpans[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONTraces unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceSpans", "resource_spans":
for iter.ReadArray() {
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
UnmarshalJSONResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest) int {
var n int
var l int
_ = l
for i := range orig.ResourceSpans {
l = SizeProtoResourceSpans(orig.ResourceSpans[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
l = MarshalProtoResourceSpans(orig.ResourceSpans[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoExportTraceServiceRequest(orig *otlpcollectortrace.ExportTraceServiceRequest, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
err = UnmarshalProtoResourceSpans(orig.ResourceSpans[len(orig.ResourceSpans)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolExportTraceServiceResponse = sync.Pool{
New: func() any {
return &otlpcollectortrace.ExportTraceServiceResponse{}
},
}
)
func NewExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &otlpcollectortrace.ExportTraceServiceResponse{}
}
return protoPoolExportTraceServiceResponse.Get().(*otlpcollectortrace.ExportTraceServiceResponse)
}
func DeleteExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportTracePartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportTraceServiceResponse.Put(orig)
}
}
func CopyExportTraceServiceResponse(dest, src *otlpcollectortrace.ExportTraceServiceResponse) {
// If copying to same object, just return.
if src == dest {
return
}
CopyExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
}
func GenTestExportTraceServiceResponse() *otlpcollectortrace.ExportTraceServiceResponse {
orig := NewExportTraceServiceResponse()
orig.PartialSuccess = *GenTestExportTracePartialSuccess()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
MarshalJSONExportTracePartialSuccess(&orig.PartialSuccess, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONExportResponse unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
UnmarshalJSONExportTracePartialSuccess(&orig.PartialSuccess, iter)
default:
iter.Skip()
}
}
}
func SizeProtoExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse) int {
var n int
var l int
_ = l
l = SizeProtoExportTracePartialSuccess(&orig.PartialSuccess)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoExportTracePartialSuccess(&orig.PartialSuccess, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func UnmarshalProtoExportTraceServiceResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoExportTracePartialSuccess(&orig.PartialSuccess, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Float64SliceWrapper struct {
orig *[]float64
state *State
}
func GetFloat64SliceOrig(ms Float64SliceWrapper) *[]float64 {
return ms.orig
}
func GetFloat64SliceState(ms Float64SliceWrapper) *State {
return ms.state
}
func NewFloat64SliceWrapper(orig *[]float64, state *State) Float64SliceWrapper {
return Float64SliceWrapper{orig: orig, state: state}
}
func GenTestFloat64SliceWrapper() Float64SliceWrapper {
orig := GenTestFloat64Slice()
return NewFloat64SliceWrapper(&orig, NewState())
}
func CopyFloat64Slice(dst, src []float64) []float64 {
return append(dst[:0], src...)
}
func GenTestFloat64Slice() []float64 {
return []float64{1.1, 2.2, 3.3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolFunction = sync.Pool{
New: func() any {
return &otlpprofiles.Function{}
},
}
)
func NewFunction() *otlpprofiles.Function {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Function{}
}
return protoPoolFunction.Get().(*otlpprofiles.Function)
}
func DeleteFunction(orig *otlpprofiles.Function, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolFunction.Put(orig)
}
}
func CopyFunction(dest, src *otlpprofiles.Function) {
// If copying to same object, just return.
if src == dest {
return
}
dest.NameStrindex = src.NameStrindex
dest.SystemNameStrindex = src.SystemNameStrindex
dest.FilenameStrindex = src.FilenameStrindex
dest.StartLine = src.StartLine
}
func GenTestFunction() *otlpprofiles.Function {
orig := NewFunction()
orig.NameStrindex = int32(13)
orig.SystemNameStrindex = int32(13)
orig.FilenameStrindex = int32(13)
orig.StartLine = int64(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONFunction(orig *otlpprofiles.Function, dest *json.Stream) {
dest.WriteObjectStart()
if orig.NameStrindex != int32(0) {
dest.WriteObjectField("nameStrindex")
dest.WriteInt32(orig.NameStrindex)
}
if orig.SystemNameStrindex != int32(0) {
dest.WriteObjectField("systemNameStrindex")
dest.WriteInt32(orig.SystemNameStrindex)
}
if orig.FilenameStrindex != int32(0) {
dest.WriteObjectField("filenameStrindex")
dest.WriteInt32(orig.FilenameStrindex)
}
if orig.StartLine != int64(0) {
dest.WriteObjectField("startLine")
dest.WriteInt64(orig.StartLine)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONFunction unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONFunction(orig *otlpprofiles.Function, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "nameStrindex", "name_strindex":
orig.NameStrindex = iter.ReadInt32()
case "systemNameStrindex", "system_name_strindex":
orig.SystemNameStrindex = iter.ReadInt32()
case "filenameStrindex", "filename_strindex":
orig.FilenameStrindex = iter.ReadInt32()
case "startLine", "start_line":
orig.StartLine = iter.ReadInt64()
default:
iter.Skip()
}
}
}
func SizeProtoFunction(orig *otlpprofiles.Function) int {
var n int
var l int
_ = l
if orig.NameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.NameStrindex))
}
if orig.SystemNameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.SystemNameStrindex))
}
if orig.FilenameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
}
if orig.StartLine != 0 {
n += 1 + proto.Sov(uint64(orig.StartLine))
}
return n
}
func MarshalProtoFunction(orig *otlpprofiles.Function, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.NameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.NameStrindex))
pos--
buf[pos] = 0x8
}
if orig.SystemNameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.SystemNameStrindex))
pos--
buf[pos] = 0x10
}
if orig.FilenameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
pos--
buf[pos] = 0x18
}
if orig.StartLine != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.StartLine))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func UnmarshalProtoFunction(orig *otlpprofiles.Function, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.NameStrindex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.SystemNameStrindex = int32(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FilenameStrindex = int32(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.StartLine = int64(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyFunctionSlice(dest, src []*otlpprofiles.Function) []*otlpprofiles.Function {
var newDest []*otlpprofiles.Function
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Function, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewFunction()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteFunction(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewFunction()
}
}
for i := range src {
CopyFunction(newDest[i], src[i])
}
return newDest
}
func GenTestFunctionSlice() []*otlpprofiles.Function {
orig := make([]*otlpprofiles.Function, 5)
orig[0] = NewFunction()
orig[1] = GenTestFunction()
orig[2] = NewFunction()
orig[3] = GenTestFunction()
orig[4] = NewFunction()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolGauge = sync.Pool{
New: func() any {
return &otlpmetrics.Gauge{}
},
}
)
func NewGauge() *otlpmetrics.Gauge {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Gauge{}
}
return protoPoolGauge.Get().(*otlpmetrics.Gauge)
}
func DeleteGauge(orig *otlpmetrics.Gauge, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteNumberDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolGauge.Put(orig)
}
}
func CopyGauge(dest, src *otlpmetrics.Gauge) {
// If copying to same object, just return.
if src == dest {
return
}
dest.DataPoints = CopyNumberDataPointSlice(dest.DataPoints, src.DataPoints)
}
func GenTestGauge() *otlpmetrics.Gauge {
orig := NewGauge()
orig.DataPoints = GenTestNumberDataPointSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONGauge(orig *otlpmetrics.Gauge, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
MarshalJSONNumberDataPoint(orig.DataPoints[0], dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
MarshalJSONNumberDataPoint(orig.DataPoints[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONGauge unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONGauge(orig *otlpmetrics.Gauge, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
UnmarshalJSONNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoGauge(orig *otlpmetrics.Gauge) int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = SizeProtoNumberDataPoint(orig.DataPoints[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoGauge(orig *otlpmetrics.Gauge, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = MarshalProtoNumberDataPoint(orig.DataPoints[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoGauge(orig *otlpmetrics.Gauge, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
err = UnmarshalProtoNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolHistogram = sync.Pool{
New: func() any {
return &otlpmetrics.Histogram{}
},
}
)
func NewHistogram() *otlpmetrics.Histogram {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Histogram{}
}
return protoPoolHistogram.Get().(*otlpmetrics.Histogram)
}
func DeleteHistogram(orig *otlpmetrics.Histogram, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteHistogramDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolHistogram.Put(orig)
}
}
func CopyHistogram(dest, src *otlpmetrics.Histogram) {
// If copying to same object, just return.
if src == dest {
return
}
dest.DataPoints = CopyHistogramDataPointSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
}
func GenTestHistogram() *otlpmetrics.Histogram {
orig := NewHistogram()
orig.DataPoints = GenTestHistogramDataPointSlice()
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONHistogram(orig *otlpmetrics.Histogram, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
MarshalJSONHistogramDataPoint(orig.DataPoints[0], dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
MarshalJSONHistogramDataPoint(orig.DataPoints[i], dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
dest.WriteObjectEnd()
}
// UnmarshalJSONHistogram unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONHistogram(orig *otlpmetrics.Histogram, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
UnmarshalJSONHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
default:
iter.Skip()
}
}
}
func SizeProtoHistogram(orig *otlpmetrics.Histogram) int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = SizeProtoHistogramDataPoint(orig.DataPoints[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
return n
}
func MarshalProtoHistogram(orig *otlpmetrics.Histogram, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = MarshalProtoHistogramDataPoint(orig.DataPoints[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
return len(buf) - pos
}
func UnmarshalProtoHistogram(orig *otlpmetrics.Histogram, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
err = UnmarshalProtoHistogramDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolHistogramDataPoint = sync.Pool{
New: func() any {
return &otlpmetrics.HistogramDataPoint{}
},
}
ProtoPoolHistogramDataPoint_Sum = sync.Pool{
New: func() any {
return &otlpmetrics.HistogramDataPoint_Sum{}
},
}
ProtoPoolHistogramDataPoint_Min = sync.Pool{
New: func() any {
return &otlpmetrics.HistogramDataPoint_Min{}
},
}
ProtoPoolHistogramDataPoint_Max = sync.Pool{
New: func() any {
return &otlpmetrics.HistogramDataPoint_Max{}
},
}
)
func NewHistogramDataPoint() *otlpmetrics.HistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.HistogramDataPoint{}
}
return protoPoolHistogramDataPoint.Get().(*otlpmetrics.HistogramDataPoint)
}
func DeleteHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
case *otlpmetrics.HistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolHistogramDataPoint_Sum.Put(ov)
}
}
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
case *otlpmetrics.HistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolHistogramDataPoint_Min.Put(ov)
}
}
switch ov := orig.Max_.(type) {
case *otlpmetrics.HistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolHistogramDataPoint_Max.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolHistogramDataPoint.Put(orig)
}
}
func CopyHistogramDataPoint(dest, src *otlpmetrics.HistogramDataPoint) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
if srcSum, ok := src.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
destSum, ok := dest.Sum_.(*otlpmetrics.HistogramDataPoint_Sum)
if !ok {
destSum = &otlpmetrics.HistogramDataPoint_Sum{}
dest.Sum_ = destSum
}
destSum.Sum = srcSum.Sum
} else {
dest.Sum_ = nil
}
dest.BucketCounts = CopyUint64Slice(dest.BucketCounts, src.BucketCounts)
dest.ExplicitBounds = CopyFloat64Slice(dest.ExplicitBounds, src.ExplicitBounds)
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
dest.Flags = src.Flags
if srcMin, ok := src.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
destMin, ok := dest.Min_.(*otlpmetrics.HistogramDataPoint_Min)
if !ok {
destMin = &otlpmetrics.HistogramDataPoint_Min{}
dest.Min_ = destMin
}
destMin.Min = srcMin.Min
} else {
dest.Min_ = nil
}
if srcMax, ok := src.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
destMax, ok := dest.Max_.(*otlpmetrics.HistogramDataPoint_Max)
if !ok {
destMax = &otlpmetrics.HistogramDataPoint_Max{}
dest.Max_ = destMax
}
destMax.Max = srcMax.Max
} else {
dest.Max_ = nil
}
}
func GenTestHistogramDataPoint() *otlpmetrics.HistogramDataPoint {
orig := NewHistogramDataPoint()
orig.Attributes = GenTestKeyValueSlice()
orig.StartTimeUnixNano = 1234567890
orig.TimeUnixNano = 1234567890
orig.Count = uint64(13)
orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: float64(3.1415926)}
orig.BucketCounts = GenTestUint64Slice()
orig.ExplicitBounds = GenTestFloat64Slice()
orig.Exemplars = GenTestExemplarSlice()
orig.Flags = 1
orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: float64(3.1415926)}
orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: float64(3.1415926)}
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if len(orig.BucketCounts) > 0 {
dest.WriteObjectField("bucketCounts")
dest.WriteArrayStart()
dest.WriteUint64(orig.BucketCounts[0])
for i := 1; i < len(orig.BucketCounts); i++ {
dest.WriteMore()
dest.WriteUint64(orig.BucketCounts[i])
}
dest.WriteArrayEnd()
}
if len(orig.ExplicitBounds) > 0 {
dest.WriteObjectField("explicitBounds")
dest.WriteArrayStart()
dest.WriteFloat64(orig.ExplicitBounds[0])
for i := 1; i < len(orig.ExplicitBounds); i++ {
dest.WriteMore()
dest.WriteFloat64(orig.ExplicitBounds[i])
}
dest.WriteArrayEnd()
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
MarshalJSONExemplar(&orig.Exemplars[0], dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
MarshalJSONExemplar(&orig.Exemplars[i], dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONHistogramDataPoint unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
{
var ov *otlpmetrics.HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Sum{}
} else {
ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
}
case "bucketCounts", "bucket_counts":
for iter.ReadArray() {
orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
}
case "explicitBounds", "explicit_bounds":
for iter.ReadArray() {
orig.ExplicitBounds = append(orig.ExplicitBounds, iter.ReadFloat64())
}
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
UnmarshalJSONExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
case "min":
{
var ov *otlpmetrics.HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Min{}
} else {
ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
}
case "max":
{
var ov *otlpmetrics.HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Max{}
} else {
ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
}
default:
iter.Skip()
}
}
}
func SizeProtoHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint) int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
l = len(orig.BucketCounts)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.ExplicitBounds)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Exemplars {
l = SizeProtoExemplar(&orig.Exemplars[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
_ = orig
n += 9
}
if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
_ = orig
n += 9
}
return n
}
func MarshalProtoHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig, ok := orig.Sum_.(*otlpmetrics.HistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
l = len(orig.BucketCounts)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.BucketCounts[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x32
}
l = len(orig.ExplicitBounds)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ExplicitBounds[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x3a
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = MarshalProtoExemplar(&orig.Exemplars[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x50
}
if orig, ok := orig.Min_.(*otlpmetrics.HistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x59
}
if orig, ok := orig.Max_.(*otlpmetrics.HistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
buf[pos] = 0x61
}
return len(buf) - pos
}
func UnmarshalProtoHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Sum{}
} else {
ov = ProtoPoolHistogramDataPoint_Sum.Get().(*otlpmetrics.HistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
case 6:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.BucketCounts = make([]uint64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.BucketCounts[i] = uint64(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
case 7:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.ExplicitBounds = make([]float64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.ExplicitBounds[i] = math.Float64frombits(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field ExplicitBounds", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ExplicitBounds = append(orig.ExplicitBounds, math.Float64frombits(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
}
case 8:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
err = UnmarshalProtoExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 11:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Min{}
} else {
ov = ProtoPoolHistogramDataPoint_Min.Get().(*otlpmetrics.HistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
case 12:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.HistogramDataPoint_Max{}
} else {
ov = ProtoPoolHistogramDataPoint_Max.Get().(*otlpmetrics.HistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyHistogramDataPointSlice(dest, src []*otlpmetrics.HistogramDataPoint) []*otlpmetrics.HistogramDataPoint {
var newDest []*otlpmetrics.HistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.HistogramDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogramDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteHistogramDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogramDataPoint()
}
}
for i := range src {
CopyHistogramDataPoint(newDest[i], src[i])
}
return newDest
}
func GenTestHistogramDataPointSlice() []*otlpmetrics.HistogramDataPoint {
orig := make([]*otlpmetrics.HistogramDataPoint, 5)
orig[0] = NewHistogramDataPoint()
orig[1] = GenTestHistogramDataPoint()
orig[2] = NewHistogramDataPoint()
orig[3] = GenTestHistogramDataPoint()
orig[4] = NewHistogramDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type InstrumentationScopeWrapper struct {
orig *otlpcommon.InstrumentationScope
state *State
}
func GetInstrumentationScopeOrig(ms InstrumentationScopeWrapper) *otlpcommon.InstrumentationScope {
return ms.orig
}
func GetInstrumentationScopeState(ms InstrumentationScopeWrapper) *State {
return ms.state
}
func NewInstrumentationScopeWrapper(orig *otlpcommon.InstrumentationScope, state *State) InstrumentationScopeWrapper {
return InstrumentationScopeWrapper{orig: orig, state: state}
}
func GenTestInstrumentationScopeWrapper() InstrumentationScopeWrapper {
orig := GenTestInstrumentationScope()
return NewInstrumentationScopeWrapper(orig, NewState())
}
var (
protoPoolInstrumentationScope = sync.Pool{
New: func() any {
return &otlpcommon.InstrumentationScope{}
},
}
)
func NewInstrumentationScope() *otlpcommon.InstrumentationScope {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.InstrumentationScope{}
}
return protoPoolInstrumentationScope.Get().(*otlpcommon.InstrumentationScope)
}
func DeleteInstrumentationScope(orig *otlpcommon.InstrumentationScope, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolInstrumentationScope.Put(orig)
}
}
func CopyInstrumentationScope(dest, src *otlpcommon.InstrumentationScope) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Name = src.Name
dest.Version = src.Version
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
}
func GenTestInstrumentationScope() *otlpcommon.InstrumentationScope {
orig := NewInstrumentationScope()
orig.Name = "test_name"
orig.Version = "test_version"
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONInstrumentationScope(orig *otlpcommon.InstrumentationScope, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if orig.Version != "" {
dest.WriteObjectField("version")
dest.WriteString(orig.Version)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONInstrumentationScope unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONInstrumentationScope(orig *otlpcommon.InstrumentationScope, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "name":
orig.Name = iter.ReadString()
case "version":
orig.Version = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func SizeProtoInstrumentationScope(orig *otlpcommon.InstrumentationScope) int {
var n int
var l int
_ = l
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Version)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
return n
}
func MarshalProtoInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Version)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Version)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func UnmarshalProtoInstrumentationScope(orig *otlpcommon.InstrumentationScope, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Version = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Int32SliceWrapper struct {
orig *[]int32
state *State
}
func GetInt32SliceOrig(ms Int32SliceWrapper) *[]int32 {
return ms.orig
}
func GetInt32SliceState(ms Int32SliceWrapper) *State {
return ms.state
}
func NewInt32SliceWrapper(orig *[]int32, state *State) Int32SliceWrapper {
return Int32SliceWrapper{orig: orig, state: state}
}
func GenTestInt32SliceWrapper() Int32SliceWrapper {
orig := GenTestInt32Slice()
return NewInt32SliceWrapper(&orig, NewState())
}
func CopyInt32Slice(dst, src []int32) []int32 {
return append(dst[:0], src...)
}
func GenTestInt32Slice() []int32 {
return []int32{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Int64SliceWrapper struct {
orig *[]int64
state *State
}
func GetInt64SliceOrig(ms Int64SliceWrapper) *[]int64 {
return ms.orig
}
func GetInt64SliceState(ms Int64SliceWrapper) *State {
return ms.state
}
func NewInt64SliceWrapper(orig *[]int64, state *State) Int64SliceWrapper {
return Int64SliceWrapper{orig: orig, state: state}
}
func GenTestInt64SliceWrapper() Int64SliceWrapper {
orig := GenTestInt64Slice()
return NewInt64SliceWrapper(&orig, NewState())
}
func CopyInt64Slice(dst, src []int64) []int64 {
return append(dst[:0], src...)
}
func GenTestInt64Slice() []int64 {
return []int64{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolKeyValue = sync.Pool{
New: func() any {
return &otlpcommon.KeyValue{}
},
}
)
func NewKeyValue() *otlpcommon.KeyValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.KeyValue{}
}
return protoPoolKeyValue.Get().(*otlpcommon.KeyValue)
}
func DeleteKeyValue(orig *otlpcommon.KeyValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Value, false)
orig.Reset()
if nullable {
protoPoolKeyValue.Put(orig)
}
}
func CopyKeyValue(dest, src *otlpcommon.KeyValue) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Key = src.Key
CopyAnyValue(&dest.Value, &src.Value)
}
func GenTestKeyValue() *otlpcommon.KeyValue {
orig := NewKeyValue()
orig.Key = "test_key"
orig.Value = *GenTestAnyValue()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONKeyValue(orig *otlpcommon.KeyValue, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Key != "" {
dest.WriteObjectField("key")
dest.WriteString(orig.Key)
}
dest.WriteObjectField("value")
MarshalJSONAnyValue(&orig.Value, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONKeyValue(orig *otlpcommon.KeyValue, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "key":
orig.Key = iter.ReadString()
case "value":
UnmarshalJSONAnyValue(&orig.Value, iter)
default:
iter.Skip()
}
}
}
func SizeProtoKeyValue(orig *otlpcommon.KeyValue) int {
var n int
var l int
_ = l
l = len(orig.Key)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = SizeProtoAnyValue(&orig.Value)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoKeyValue(orig *otlpcommon.KeyValue, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Key)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Key)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = MarshalProtoAnyValue(&orig.Value, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func UnmarshalProtoKeyValue(orig *otlpcommon.KeyValue, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Key = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoAnyValue(&orig.Value, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolKeyValueAndUnit = sync.Pool{
New: func() any {
return &otlpprofiles.KeyValueAndUnit{}
},
}
)
func NewKeyValueAndUnit() *otlpprofiles.KeyValueAndUnit {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.KeyValueAndUnit{}
}
return protoPoolKeyValueAndUnit.Get().(*otlpprofiles.KeyValueAndUnit)
}
func DeleteKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Value, false)
orig.Reset()
if nullable {
protoPoolKeyValueAndUnit.Put(orig)
}
}
func CopyKeyValueAndUnit(dest, src *otlpprofiles.KeyValueAndUnit) {
// If copying to same object, just return.
if src == dest {
return
}
dest.KeyStrindex = src.KeyStrindex
CopyAnyValue(&dest.Value, &src.Value)
dest.UnitStrindex = src.UnitStrindex
}
func GenTestKeyValueAndUnit() *otlpprofiles.KeyValueAndUnit {
orig := NewKeyValueAndUnit()
orig.KeyStrindex = int32(13)
orig.Value = *GenTestAnyValue()
orig.UnitStrindex = int32(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, dest *json.Stream) {
dest.WriteObjectStart()
if orig.KeyStrindex != int32(0) {
dest.WriteObjectField("keyStrindex")
dest.WriteInt32(orig.KeyStrindex)
}
dest.WriteObjectField("value")
MarshalJSONAnyValue(&orig.Value, dest)
if orig.UnitStrindex != int32(0) {
dest.WriteObjectField("unitStrindex")
dest.WriteInt32(orig.UnitStrindex)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONKeyValueAndUnit unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "keyStrindex", "key_strindex":
orig.KeyStrindex = iter.ReadInt32()
case "value":
UnmarshalJSONAnyValue(&orig.Value, iter)
case "unitStrindex", "unit_strindex":
orig.UnitStrindex = iter.ReadInt32()
default:
iter.Skip()
}
}
}
func SizeProtoKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit) int {
var n int
var l int
_ = l
if orig.KeyStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.KeyStrindex))
}
l = SizeProtoAnyValue(&orig.Value)
n += 1 + proto.Sov(uint64(l)) + l
if orig.UnitStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.UnitStrindex))
}
return n
}
func MarshalProtoKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.KeyStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex))
pos--
buf[pos] = 0x8
}
l = MarshalProtoAnyValue(&orig.Value, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
if orig.UnitStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func UnmarshalProtoKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.KeyStrindex = int32(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoAnyValue(&orig.Value, buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.UnitStrindex = int32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyKeyValueAndUnitSlice(dest, src []*otlpprofiles.KeyValueAndUnit) []*otlpprofiles.KeyValueAndUnit {
var newDest []*otlpprofiles.KeyValueAndUnit
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.KeyValueAndUnit, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueAndUnit()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValueAndUnit(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueAndUnit()
}
}
for i := range src {
CopyKeyValueAndUnit(newDest[i], src[i])
}
return newDest
}
func GenTestKeyValueAndUnitSlice() []*otlpprofiles.KeyValueAndUnit {
orig := make([]*otlpprofiles.KeyValueAndUnit, 5)
orig[0] = NewKeyValueAndUnit()
orig[1] = GenTestKeyValueAndUnit()
orig[2] = NewKeyValueAndUnit()
orig[3] = GenTestKeyValueAndUnit()
orig[4] = NewKeyValueAndUnit()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolKeyValueList = sync.Pool{
New: func() any {
return &otlpcommon.KeyValueList{}
},
}
)
func NewKeyValueList() *otlpcommon.KeyValueList {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.KeyValueList{}
}
return protoPoolKeyValueList.Get().(*otlpcommon.KeyValueList)
}
func DeleteKeyValueList(orig *otlpcommon.KeyValueList, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Values {
DeleteKeyValue(&orig.Values[i], false)
}
orig.Reset()
if nullable {
protoPoolKeyValueList.Put(orig)
}
}
func CopyKeyValueList(dest, src *otlpcommon.KeyValueList) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Values = CopyKeyValueSlice(dest.Values, src.Values)
}
func GenTestKeyValueList() *otlpcommon.KeyValueList {
orig := NewKeyValueList()
orig.Values = GenTestKeyValueSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONKeyValueList(orig *otlpcommon.KeyValueList, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Values[0], dest)
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Values[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONKeyValueList unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONKeyValueList(orig *otlpcommon.KeyValueList, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Values[len(orig.Values)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoKeyValueList(orig *otlpcommon.KeyValueList) int {
var n int
var l int
_ = l
for i := range orig.Values {
l = SizeProtoKeyValue(&orig.Values[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Values) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Values[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoKeyValueList(orig *otlpcommon.KeyValueList, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Values = append(orig.Values, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Values[len(orig.Values)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
func CopyKeyValueSlice(dest, src []otlpcommon.KeyValue) []otlpcommon.KeyValue {
var newDest []otlpcommon.KeyValue
if cap(dest) < len(src) {
newDest = make([]otlpcommon.KeyValue, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValue(&dest[i], false)
}
}
for i := range src {
CopyKeyValue(&newDest[i], &src[i])
}
return newDest
}
func GenTestKeyValueSlice() []otlpcommon.KeyValue {
orig := make([]otlpcommon.KeyValue, 5)
orig[1] = *GenTestKeyValue()
orig[3] = *GenTestKeyValue()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolLine = sync.Pool{
New: func() any {
return &otlpprofiles.Line{}
},
}
)
func NewLine() *otlpprofiles.Line {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Line{}
}
return protoPoolLine.Get().(*otlpprofiles.Line)
}
func DeleteLine(orig *otlpprofiles.Line, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolLine.Put(orig)
}
}
func CopyLine(dest, src *otlpprofiles.Line) {
// If copying to same object, just return.
if src == dest {
return
}
dest.FunctionIndex = src.FunctionIndex
dest.Line = src.Line
dest.Column = src.Column
}
func GenTestLine() *otlpprofiles.Line {
orig := NewLine()
orig.FunctionIndex = int32(13)
orig.Line = int64(13)
orig.Column = int64(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONLine(orig *otlpprofiles.Line, dest *json.Stream) {
dest.WriteObjectStart()
if orig.FunctionIndex != int32(0) {
dest.WriteObjectField("functionIndex")
dest.WriteInt32(orig.FunctionIndex)
}
if orig.Line != int64(0) {
dest.WriteObjectField("line")
dest.WriteInt64(orig.Line)
}
if orig.Column != int64(0) {
dest.WriteObjectField("column")
dest.WriteInt64(orig.Column)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONLine unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONLine(orig *otlpprofiles.Line, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "functionIndex", "function_index":
orig.FunctionIndex = iter.ReadInt32()
case "line":
orig.Line = iter.ReadInt64()
case "column":
orig.Column = iter.ReadInt64()
default:
iter.Skip()
}
}
}
func SizeProtoLine(orig *otlpprofiles.Line) int {
var n int
var l int
_ = l
if orig.FunctionIndex != 0 {
n += 1 + proto.Sov(uint64(orig.FunctionIndex))
}
if orig.Line != 0 {
n += 1 + proto.Sov(uint64(orig.Line))
}
if orig.Column != 0 {
n += 1 + proto.Sov(uint64(orig.Column))
}
return n
}
func MarshalProtoLine(orig *otlpprofiles.Line, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.FunctionIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FunctionIndex))
pos--
buf[pos] = 0x8
}
if orig.Line != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Line))
pos--
buf[pos] = 0x10
}
if orig.Column != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Column))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func UnmarshalProtoLine(orig *otlpprofiles.Line, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FunctionIndex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Line = int64(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Column = int64(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyLineSlice(dest, src []*otlpprofiles.Line) []*otlpprofiles.Line {
var newDest []*otlpprofiles.Line
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Line, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLine()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLine(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLine()
}
}
for i := range src {
CopyLine(newDest[i], src[i])
}
return newDest
}
func GenTestLineSlice() []*otlpprofiles.Line {
orig := make([]*otlpprofiles.Line, 5)
orig[0] = NewLine()
orig[1] = GenTestLine()
orig[2] = NewLine()
orig[3] = GenTestLine()
orig[4] = NewLine()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolLink = sync.Pool{
New: func() any {
return &otlpprofiles.Link{}
},
}
)
func NewLink() *otlpprofiles.Link {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Link{}
}
return protoPoolLink.Get().(*otlpprofiles.Link)
}
func DeleteLink(orig *otlpprofiles.Link, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
protoPoolLink.Put(orig)
}
}
func CopyLink(dest, src *otlpprofiles.Link) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TraceId = src.TraceId
dest.SpanId = src.SpanId
}
func GenTestLink() *otlpprofiles.Link {
orig := NewLink()
orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONLink(orig *otlpprofiles.Link, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TraceId != data.TraceID([16]byte{}) {
dest.WriteObjectField("traceId")
MarshalJSONTraceID(&orig.TraceId, dest)
}
if orig.SpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("spanId")
MarshalJSONSpanID(&orig.SpanId, dest)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONLink unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONLink(orig *otlpprofiles.Link, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
UnmarshalJSONTraceID(&orig.TraceId, iter)
case "spanId", "span_id":
UnmarshalJSONSpanID(&orig.SpanId, iter)
default:
iter.Skip()
}
}
}
func SizeProtoLink(orig *otlpprofiles.Link) int {
var n int
var l int
_ = l
l = SizeProtoTraceID(&orig.TraceId)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoSpanID(&orig.SpanId)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoLink(orig *otlpprofiles.Link, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoTraceID(&orig.TraceId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = MarshalProtoSpanID(&orig.SpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func UnmarshalProtoLink(orig *otlpprofiles.Link, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoTraceID(&orig.TraceId, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.SpanId, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyLinkSlice(dest, src []*otlpprofiles.Link) []*otlpprofiles.Link {
var newDest []*otlpprofiles.Link
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Link, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLink()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLink(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLink()
}
}
for i := range src {
CopyLink(newDest[i], src[i])
}
return newDest
}
func GenTestLinkSlice() []*otlpprofiles.Link {
orig := make([]*otlpprofiles.Link, 5)
orig[0] = NewLink()
orig[1] = GenTestLink()
orig[2] = NewLink()
orig[3] = GenTestLink()
orig[4] = NewLink()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolLocation = sync.Pool{
New: func() any {
return &otlpprofiles.Location{}
},
}
)
func NewLocation() *otlpprofiles.Location {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Location{}
}
return protoPoolLocation.Get().(*otlpprofiles.Location)
}
func DeleteLocation(orig *otlpprofiles.Location, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Line {
DeleteLine(orig.Line[i], true)
}
orig.Reset()
if nullable {
protoPoolLocation.Put(orig)
}
}
func CopyLocation(dest, src *otlpprofiles.Location) {
// If copying to same object, just return.
if src == dest {
return
}
dest.MappingIndex = src.MappingIndex
dest.Address = src.Address
dest.Line = CopyLineSlice(dest.Line, src.Line)
dest.AttributeIndices = CopyInt32Slice(dest.AttributeIndices, src.AttributeIndices)
}
func GenTestLocation() *otlpprofiles.Location {
orig := NewLocation()
orig.MappingIndex = int32(13)
orig.Address = uint64(13)
orig.Line = GenTestLineSlice()
orig.AttributeIndices = GenTestInt32Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONLocation(orig *otlpprofiles.Location, dest *json.Stream) {
dest.WriteObjectStart()
if orig.MappingIndex != int32(0) {
dest.WriteObjectField("mappingIndex")
dest.WriteInt32(orig.MappingIndex)
}
if orig.Address != uint64(0) {
dest.WriteObjectField("address")
dest.WriteUint64(orig.Address)
}
if len(orig.Line) > 0 {
dest.WriteObjectField("line")
dest.WriteArrayStart()
MarshalJSONLine(orig.Line[0], dest)
for i := 1; i < len(orig.Line); i++ {
dest.WriteMore()
MarshalJSONLine(orig.Line[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONLocation unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONLocation(orig *otlpprofiles.Location, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "mappingIndex", "mapping_index":
orig.MappingIndex = iter.ReadInt32()
case "address":
orig.Address = iter.ReadUint64()
case "line":
for iter.ReadArray() {
orig.Line = append(orig.Line, NewLine())
UnmarshalJSONLine(orig.Line[len(orig.Line)-1], iter)
}
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func SizeProtoLocation(orig *otlpprofiles.Location) int {
var n int
var l int
_ = l
if orig.MappingIndex != 0 {
n += 1 + proto.Sov(uint64(orig.MappingIndex))
}
if orig.Address != 0 {
n += 1 + proto.Sov(uint64(orig.Address))
}
for i := range orig.Line {
l = SizeProtoLine(orig.Line[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoLocation(orig *otlpprofiles.Location, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.MappingIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex))
pos--
buf[pos] = 0x8
}
if orig.Address != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Address))
pos--
buf[pos] = 0x10
}
for i := len(orig.Line) - 1; i >= 0; i-- {
l = MarshalProtoLine(orig.Line[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x22
}
return len(buf) - pos
}
func UnmarshalProtoLocation(orig *otlpprofiles.Location, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MappingIndex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Address = uint64(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Line = append(orig.Line, NewLine())
err = UnmarshalProtoLine(orig.Line[len(orig.Line)-1], buf[startPos:pos])
if err != nil {
return err
}
case 4:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyLocationSlice(dest, src []*otlpprofiles.Location) []*otlpprofiles.Location {
var newDest []*otlpprofiles.Location
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Location, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLocation()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLocation(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLocation()
}
}
for i := range src {
CopyLocation(newDest[i], src[i])
}
return newDest
}
func GenTestLocationSlice() []*otlpprofiles.Location {
orig := make([]*otlpprofiles.Location, 5)
orig[0] = NewLocation()
orig[1] = GenTestLocation()
orig[2] = NewLocation()
orig[3] = GenTestLocation()
orig[4] = NewLocation()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolLogRecord = sync.Pool{
New: func() any {
return &otlplogs.LogRecord{}
},
}
)
func NewLogRecord() *otlplogs.LogRecord {
if !UseProtoPooling.IsEnabled() {
return &otlplogs.LogRecord{}
}
return protoPoolLogRecord.Get().(*otlplogs.LogRecord)
}
func DeleteLogRecord(orig *otlplogs.LogRecord, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Body, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
protoPoolLogRecord.Put(orig)
}
}
func CopyLogRecord(dest, src *otlplogs.LogRecord) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TimeUnixNano = src.TimeUnixNano
dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano
dest.SeverityNumber = src.SeverityNumber
dest.SeverityText = src.SeverityText
CopyAnyValue(&dest.Body, &src.Body)
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Flags = src.Flags
dest.TraceId = src.TraceId
dest.SpanId = src.SpanId
dest.EventName = src.EventName
}
func GenTestLogRecord() *otlplogs.LogRecord {
orig := NewLogRecord()
orig.TimeUnixNano = 1234567890
orig.ObservedTimeUnixNano = 1234567890
orig.SeverityNumber = otlplogs.SeverityNumber(5)
orig.SeverityText = "test_severitytext"
orig.Body = *GenTestAnyValue()
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
orig.Flags = 1
orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
orig.EventName = "test_eventname"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONLogRecord(orig *otlplogs.LogRecord, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.ObservedTimeUnixNano != uint64(0) {
dest.WriteObjectField("observedTimeUnixNano")
dest.WriteUint64(orig.ObservedTimeUnixNano)
}
if int32(orig.SeverityNumber) != 0 {
dest.WriteObjectField("severityNumber")
dest.WriteInt32(int32(orig.SeverityNumber))
}
if orig.SeverityText != "" {
dest.WriteObjectField("severityText")
dest.WriteString(orig.SeverityText)
}
dest.WriteObjectField("body")
MarshalJSONAnyValue(&orig.Body, dest)
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if orig.TraceId != data.TraceID([16]byte{}) {
dest.WriteObjectField("traceId")
MarshalJSONTraceID(&orig.TraceId, dest)
}
if orig.SpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("spanId")
MarshalJSONSpanID(&orig.SpanId, dest)
}
if orig.EventName != "" {
dest.WriteObjectField("eventName")
dest.WriteString(orig.EventName)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONLogRecord unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONLogRecord(orig *otlplogs.LogRecord, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "observedTimeUnixNano", "observed_time_unix_nano":
orig.ObservedTimeUnixNano = iter.ReadUint64()
case "severityNumber", "severity_number":
orig.SeverityNumber = otlplogs.SeverityNumber(iter.ReadEnumValue(otlplogs.SeverityNumber_value))
case "severityText", "severity_text":
orig.SeverityText = iter.ReadString()
case "body":
UnmarshalJSONAnyValue(&orig.Body, iter)
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "flags":
orig.Flags = iter.ReadUint32()
case "traceId", "trace_id":
UnmarshalJSONTraceID(&orig.TraceId, iter)
case "spanId", "span_id":
UnmarshalJSONSpanID(&orig.SpanId, iter)
case "eventName", "event_name":
orig.EventName = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoLogRecord(orig *otlplogs.LogRecord) int {
var n int
var l int
_ = l
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.ObservedTimeUnixNano != 0 {
n += 9
}
if orig.SeverityNumber != 0 {
n += 1 + proto.Sov(uint64(orig.SeverityNumber))
}
l = len(orig.SeverityText)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = SizeProtoAnyValue(&orig.Body)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
if orig.Flags != 0 {
n += 5
}
l = SizeProtoTraceID(&orig.TraceId)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoSpanID(&orig.SpanId)
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.EventName)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoLogRecord(orig *otlplogs.LogRecord, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x9
}
if orig.ObservedTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ObservedTimeUnixNano))
pos--
buf[pos] = 0x59
}
if orig.SeverityNumber != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.SeverityNumber))
pos--
buf[pos] = 0x10
}
l = len(orig.SeverityText)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SeverityText)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = MarshalProtoAnyValue(&orig.Body, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x38
}
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x45
}
l = MarshalProtoTraceID(&orig.TraceId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
l = MarshalProtoSpanID(&orig.SpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
l = len(orig.EventName)
if l > 0 {
pos -= l
copy(buf[pos:], orig.EventName)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x62
}
return len(buf) - pos
}
func UnmarshalProtoLogRecord(orig *otlplogs.LogRecord, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 11:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ObservedTimeUnixNano = uint64(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.SeverityNumber = otlplogs.SeverityNumber(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SeverityText = string(buf[startPos:pos])
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoAnyValue(&orig.Body, buf[startPos:pos])
if err != nil {
return err
}
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 7:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 8:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoTraceID(&orig.TraceId, buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.SpanId, buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.EventName = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
func CopyLogRecordSlice(dest, src []*otlplogs.LogRecord) []*otlplogs.LogRecord {
var newDest []*otlplogs.LogRecord
if cap(dest) < len(src) {
newDest = make([]*otlplogs.LogRecord, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogRecord()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogRecord(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogRecord()
}
}
for i := range src {
CopyLogRecord(newDest[i], src[i])
}
return newDest
}
func GenTestLogRecordSlice() []*otlplogs.LogRecord {
orig := make([]*otlplogs.LogRecord, 5)
orig[0] = NewLogRecord()
orig[1] = GenTestLogRecord()
orig[2] = NewLogRecord()
orig[3] = GenTestLogRecord()
orig[4] = NewLogRecord()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolMapping = sync.Pool{
New: func() any {
return &otlpprofiles.Mapping{}
},
}
)
func NewMapping() *otlpprofiles.Mapping {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Mapping{}
}
return protoPoolMapping.Get().(*otlpprofiles.Mapping)
}
func DeleteMapping(orig *otlpprofiles.Mapping, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolMapping.Put(orig)
}
}
func CopyMapping(dest, src *otlpprofiles.Mapping) {
// If copying to same object, just return.
if src == dest {
return
}
dest.MemoryStart = src.MemoryStart
dest.MemoryLimit = src.MemoryLimit
dest.FileOffset = src.FileOffset
dest.FilenameStrindex = src.FilenameStrindex
dest.AttributeIndices = CopyInt32Slice(dest.AttributeIndices, src.AttributeIndices)
}
func GenTestMapping() *otlpprofiles.Mapping {
orig := NewMapping()
orig.MemoryStart = uint64(13)
orig.MemoryLimit = uint64(13)
orig.FileOffset = uint64(13)
orig.FilenameStrindex = int32(13)
orig.AttributeIndices = GenTestInt32Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONMapping(orig *otlpprofiles.Mapping, dest *json.Stream) {
dest.WriteObjectStart()
if orig.MemoryStart != uint64(0) {
dest.WriteObjectField("memoryStart")
dest.WriteUint64(orig.MemoryStart)
}
if orig.MemoryLimit != uint64(0) {
dest.WriteObjectField("memoryLimit")
dest.WriteUint64(orig.MemoryLimit)
}
if orig.FileOffset != uint64(0) {
dest.WriteObjectField("fileOffset")
dest.WriteUint64(orig.FileOffset)
}
if orig.FilenameStrindex != int32(0) {
dest.WriteObjectField("filenameStrindex")
dest.WriteInt32(orig.FilenameStrindex)
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONMapping unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONMapping(orig *otlpprofiles.Mapping, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "memoryStart", "memory_start":
orig.MemoryStart = iter.ReadUint64()
case "memoryLimit", "memory_limit":
orig.MemoryLimit = iter.ReadUint64()
case "fileOffset", "file_offset":
orig.FileOffset = iter.ReadUint64()
case "filenameStrindex", "filename_strindex":
orig.FilenameStrindex = iter.ReadInt32()
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func SizeProtoMapping(orig *otlpprofiles.Mapping) int {
var n int
var l int
_ = l
if orig.MemoryStart != 0 {
n += 1 + proto.Sov(uint64(orig.MemoryStart))
}
if orig.MemoryLimit != 0 {
n += 1 + proto.Sov(uint64(orig.MemoryLimit))
}
if orig.FileOffset != 0 {
n += 1 + proto.Sov(uint64(orig.FileOffset))
}
if orig.FilenameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoMapping(orig *otlpprofiles.Mapping, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.MemoryStart != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryStart))
pos--
buf[pos] = 0x8
}
if orig.MemoryLimit != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryLimit))
pos--
buf[pos] = 0x10
}
if orig.FileOffset != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FileOffset))
pos--
buf[pos] = 0x18
}
if orig.FilenameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
pos--
buf[pos] = 0x20
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x2a
}
return len(buf) - pos
}
func UnmarshalProtoMapping(orig *otlpprofiles.Mapping, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MemoryStart = uint64(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MemoryLimit = uint64(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FileOffset = uint64(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FilenameStrindex = int32(num)
case 5:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyMappingSlice(dest, src []*otlpprofiles.Mapping) []*otlpprofiles.Mapping {
var newDest []*otlpprofiles.Mapping
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Mapping, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMapping()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMapping(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMapping()
}
}
for i := range src {
CopyMapping(newDest[i], src[i])
}
return newDest
}
func GenTestMappingSlice() []*otlpprofiles.Mapping {
orig := make([]*otlpprofiles.Mapping, 5)
orig[0] = NewMapping()
orig[1] = GenTestMapping()
orig[2] = NewMapping()
orig[3] = GenTestMapping()
orig[4] = NewMapping()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolMetric = sync.Pool{
New: func() any {
return &otlpmetrics.Metric{}
},
}
ProtoPoolMetric_Gauge = sync.Pool{
New: func() any {
return &otlpmetrics.Metric_Gauge{}
},
}
ProtoPoolMetric_Sum = sync.Pool{
New: func() any {
return &otlpmetrics.Metric_Sum{}
},
}
ProtoPoolMetric_Histogram = sync.Pool{
New: func() any {
return &otlpmetrics.Metric_Histogram{}
},
}
ProtoPoolMetric_ExponentialHistogram = sync.Pool{
New: func() any {
return &otlpmetrics.Metric_ExponentialHistogram{}
},
}
ProtoPoolMetric_Summary = sync.Pool{
New: func() any {
return &otlpmetrics.Metric_Summary{}
},
}
)
func NewMetric() *otlpmetrics.Metric {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Metric{}
}
return protoPoolMetric.Get().(*otlpmetrics.Metric)
}
func DeleteMetric(orig *otlpmetrics.Metric, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
switch ov := orig.Data.(type) {
case *otlpmetrics.Metric_Gauge:
DeleteGauge(ov.Gauge, true)
ov.Gauge = nil
ProtoPoolMetric_Gauge.Put(ov)
case *otlpmetrics.Metric_Sum:
DeleteSum(ov.Sum, true)
ov.Sum = nil
ProtoPoolMetric_Sum.Put(ov)
case *otlpmetrics.Metric_Histogram:
DeleteHistogram(ov.Histogram, true)
ov.Histogram = nil
ProtoPoolMetric_Histogram.Put(ov)
case *otlpmetrics.Metric_ExponentialHistogram:
DeleteExponentialHistogram(ov.ExponentialHistogram, true)
ov.ExponentialHistogram = nil
ProtoPoolMetric_ExponentialHistogram.Put(ov)
case *otlpmetrics.Metric_Summary:
DeleteSummary(ov.Summary, true)
ov.Summary = nil
ProtoPoolMetric_Summary.Put(ov)
}
for i := range orig.Metadata {
DeleteKeyValue(&orig.Metadata[i], false)
}
orig.Reset()
if nullable {
protoPoolMetric.Put(orig)
}
}
func CopyMetric(dest, src *otlpmetrics.Metric) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Name = src.Name
dest.Description = src.Description
dest.Unit = src.Unit
switch t := src.Data.(type) {
case *otlpmetrics.Metric_Gauge:
var ov *otlpmetrics.Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
}
ov.Gauge = NewGauge()
CopyGauge(ov.Gauge, t.Gauge)
dest.Data = ov
case *otlpmetrics.Metric_Sum:
var ov *otlpmetrics.Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
}
ov.Sum = NewSum()
CopySum(ov.Sum, t.Sum)
dest.Data = ov
case *otlpmetrics.Metric_Histogram:
var ov *otlpmetrics.Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
}
ov.Histogram = NewHistogram()
CopyHistogram(ov.Histogram, t.Histogram)
dest.Data = ov
case *otlpmetrics.Metric_ExponentialHistogram:
var ov *otlpmetrics.Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
CopyExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram)
dest.Data = ov
case *otlpmetrics.Metric_Summary:
var ov *otlpmetrics.Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
}
ov.Summary = NewSummary()
CopySummary(ov.Summary, t.Summary)
dest.Data = ov
}
dest.Metadata = CopyKeyValueSlice(dest.Metadata, src.Metadata)
}
func GenTestMetric() *otlpmetrics.Metric {
orig := NewMetric()
orig.Name = "test_name"
orig.Description = "test_description"
orig.Unit = "test_unit"
orig.Data = &otlpmetrics.Metric_Sum{Sum: GenTestSum()}
orig.Metadata = GenTestKeyValueSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONMetric(orig *otlpmetrics.Metric, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if orig.Description != "" {
dest.WriteObjectField("description")
dest.WriteString(orig.Description)
}
if orig.Unit != "" {
dest.WriteObjectField("unit")
dest.WriteString(orig.Unit)
}
switch orig := orig.Data.(type) {
case *otlpmetrics.Metric_Gauge:
if orig.Gauge != nil {
dest.WriteObjectField("gauge")
MarshalJSONGauge(orig.Gauge, dest)
}
case *otlpmetrics.Metric_Sum:
if orig.Sum != nil {
dest.WriteObjectField("sum")
MarshalJSONSum(orig.Sum, dest)
}
case *otlpmetrics.Metric_Histogram:
if orig.Histogram != nil {
dest.WriteObjectField("histogram")
MarshalJSONHistogram(orig.Histogram, dest)
}
case *otlpmetrics.Metric_ExponentialHistogram:
if orig.ExponentialHistogram != nil {
dest.WriteObjectField("exponentialHistogram")
MarshalJSONExponentialHistogram(orig.ExponentialHistogram, dest)
}
case *otlpmetrics.Metric_Summary:
if orig.Summary != nil {
dest.WriteObjectField("summary")
MarshalJSONSummary(orig.Summary, dest)
}
}
if len(orig.Metadata) > 0 {
dest.WriteObjectField("metadata")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Metadata[0], dest)
for i := 1; i < len(orig.Metadata); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Metadata[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONMetric unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONMetric(orig *otlpmetrics.Metric, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "name":
orig.Name = iter.ReadString()
case "description":
orig.Description = iter.ReadString()
case "unit":
orig.Unit = iter.ReadString()
case "gauge":
{
var ov *otlpmetrics.Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
}
ov.Gauge = NewGauge()
UnmarshalJSONGauge(ov.Gauge, iter)
orig.Data = ov
}
case "sum":
{
var ov *otlpmetrics.Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
}
ov.Sum = NewSum()
UnmarshalJSONSum(ov.Sum, iter)
orig.Data = ov
}
case "histogram":
{
var ov *otlpmetrics.Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
}
ov.Histogram = NewHistogram()
UnmarshalJSONHistogram(ov.Histogram, iter)
orig.Data = ov
}
case "exponentialHistogram", "exponential_histogram":
{
var ov *otlpmetrics.Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
UnmarshalJSONExponentialHistogram(ov.ExponentialHistogram, iter)
orig.Data = ov
}
case "summary":
{
var ov *otlpmetrics.Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
}
ov.Summary = NewSummary()
UnmarshalJSONSummary(ov.Summary, iter)
orig.Data = ov
}
case "metadata":
for iter.ReadArray() {
orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Metadata[len(orig.Metadata)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoMetric(orig *otlpmetrics.Metric) int {
var n int
var l int
_ = l
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Description)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Unit)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
switch orig := orig.Data.(type) {
case nil:
_ = orig
break
case *otlpmetrics.Metric_Gauge:
l = SizeProtoGauge(orig.Gauge)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpmetrics.Metric_Sum:
l = SizeProtoSum(orig.Sum)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpmetrics.Metric_Histogram:
l = SizeProtoHistogram(orig.Histogram)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpmetrics.Metric_ExponentialHistogram:
l = SizeProtoExponentialHistogram(orig.ExponentialHistogram)
n += 1 + proto.Sov(uint64(l)) + l
case *otlpmetrics.Metric_Summary:
l = SizeProtoSummary(orig.Summary)
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Metadata {
l = SizeProtoKeyValue(&orig.Metadata[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoMetric(orig *otlpmetrics.Metric, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Description)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Description)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.Unit)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Unit)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
switch orig := orig.Data.(type) {
case *otlpmetrics.Metric_Gauge:
l = MarshalProtoGauge(orig.Gauge, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
case *otlpmetrics.Metric_Sum:
l = MarshalProtoSum(orig.Sum, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
case *otlpmetrics.Metric_Histogram:
l = MarshalProtoHistogram(orig.Histogram, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
case *otlpmetrics.Metric_ExponentialHistogram:
l = MarshalProtoExponentialHistogram(orig.ExponentialHistogram, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
case *otlpmetrics.Metric_Summary:
l = MarshalProtoSummary(orig.Summary, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
for i := len(orig.Metadata) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Metadata[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x62
}
return len(buf) - pos
}
func UnmarshalProtoMetric(orig *otlpmetrics.Metric, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Description = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Unit = string(buf[startPos:pos])
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpmetrics.Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
}
ov.Gauge = NewGauge()
err = UnmarshalProtoGauge(ov.Gauge, buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpmetrics.Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
}
ov.Sum = NewSum()
err = UnmarshalProtoSum(ov.Sum, buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpmetrics.Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
}
ov.Histogram = NewHistogram()
err = UnmarshalProtoHistogram(ov.Histogram, buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpmetrics.Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
err = UnmarshalProtoExponentialHistogram(ov.ExponentialHistogram, buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *otlpmetrics.Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
}
ov.Summary = NewSummary()
err = UnmarshalProtoSummary(ov.Summary, buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 12:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Metadata = append(orig.Metadata, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Metadata[len(orig.Metadata)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyMetricSlice(dest, src []*otlpmetrics.Metric) []*otlpmetrics.Metric {
var newDest []*otlpmetrics.Metric
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.Metric, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetric()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetric(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetric()
}
}
for i := range src {
CopyMetric(newDest[i], src[i])
}
return newDest
}
func GenTestMetricSlice() []*otlpmetrics.Metric {
orig := make([]*otlpmetrics.Metric, 5)
orig[0] = NewMetric()
orig[1] = GenTestMetric()
orig[2] = NewMetric()
orig[3] = GenTestMetric()
orig[4] = NewMetric()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolNumberDataPoint = sync.Pool{
New: func() any {
return &otlpmetrics.NumberDataPoint{}
},
}
ProtoPoolNumberDataPoint_AsDouble = sync.Pool{
New: func() any {
return &otlpmetrics.NumberDataPoint_AsDouble{}
},
}
ProtoPoolNumberDataPoint_AsInt = sync.Pool{
New: func() any {
return &otlpmetrics.NumberDataPoint_AsInt{}
},
}
)
func NewNumberDataPoint() *otlpmetrics.NumberDataPoint {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.NumberDataPoint{}
}
return protoPoolNumberDataPoint.Get().(*otlpmetrics.NumberDataPoint)
}
func DeleteNumberDataPoint(orig *otlpmetrics.NumberDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Value.(type) {
case *otlpmetrics.NumberDataPoint_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolNumberDataPoint_AsDouble.Put(ov)
}
case *otlpmetrics.NumberDataPoint_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolNumberDataPoint_AsInt.Put(ov)
}
}
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
orig.Reset()
if nullable {
protoPoolNumberDataPoint.Put(orig)
}
}
func CopyNumberDataPoint(dest, src *otlpmetrics.NumberDataPoint) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
switch t := src.Value.(type) {
case *otlpmetrics.NumberDataPoint_AsDouble:
var ov *otlpmetrics.NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
case *otlpmetrics.NumberDataPoint_AsInt:
var ov *otlpmetrics.NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
}
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
dest.Flags = src.Flags
}
func GenTestNumberDataPoint() *otlpmetrics.NumberDataPoint {
orig := NewNumberDataPoint()
orig.Attributes = GenTestKeyValueSlice()
orig.StartTimeUnixNano = 1234567890
orig.TimeUnixNano = 1234567890
orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)}
orig.Exemplars = GenTestExemplarSlice()
orig.Flags = 1
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONNumberDataPoint(orig *otlpmetrics.NumberDataPoint, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
case *otlpmetrics.NumberDataPoint_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
case *otlpmetrics.NumberDataPoint_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
MarshalJSONExemplar(&orig.Exemplars[0], dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
MarshalJSONExemplar(&orig.Exemplars[i], dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONNumberDataPoint unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONNumberDataPoint(orig *otlpmetrics.NumberDataPoint, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "asDouble", "as_double":
{
var ov *otlpmetrics.NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
}
case "asInt", "as_int":
{
var ov *otlpmetrics.NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
}
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
UnmarshalJSONExemplar(&orig.Exemplars[len(orig.Exemplars)-1], iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func SizeProtoNumberDataPoint(orig *otlpmetrics.NumberDataPoint) int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *otlpmetrics.NumberDataPoint_AsDouble:
n += 9
case *otlpmetrics.NumberDataPoint_AsInt:
n += 9
}
for i := range orig.Exemplars {
l = SizeProtoExemplar(&orig.Exemplars[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
return n
}
func MarshalProtoNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
switch orig := orig.Value.(type) {
case *otlpmetrics.NumberDataPoint_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x21
case *otlpmetrics.NumberDataPoint_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
buf[pos] = 0x31
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = MarshalProtoExemplar(&orig.Exemplars[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x40
}
return len(buf) - pos
}
func UnmarshalProtoNumberDataPoint(orig *otlpmetrics.NumberDataPoint, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
case 6:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *otlpmetrics.NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, otlpmetrics.Exemplar{})
err = UnmarshalProtoExemplar(&orig.Exemplars[len(orig.Exemplars)-1], buf[startPos:pos])
if err != nil {
return err
}
case 8:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyNumberDataPointSlice(dest, src []*otlpmetrics.NumberDataPoint) []*otlpmetrics.NumberDataPoint {
var newDest []*otlpmetrics.NumberDataPoint
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.NumberDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewNumberDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteNumberDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewNumberDataPoint()
}
}
for i := range src {
CopyNumberDataPoint(newDest[i], src[i])
}
return newDest
}
func GenTestNumberDataPointSlice() []*otlpmetrics.NumberDataPoint {
orig := make([]*otlpmetrics.NumberDataPoint, 5)
orig[0] = NewNumberDataPoint()
orig[1] = GenTestNumberDataPoint()
orig[2] = NewNumberDataPoint()
orig[3] = GenTestNumberDataPoint()
orig[4] = NewNumberDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolProfile = sync.Pool{
New: func() any {
return &otlpprofiles.Profile{}
},
}
)
func NewProfile() *otlpprofiles.Profile {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Profile{}
}
return protoPoolProfile.Get().(*otlpprofiles.Profile)
}
func DeleteProfile(orig *otlpprofiles.Profile, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteValueType(&orig.SampleType, false)
for i := range orig.Sample {
DeleteSample(orig.Sample[i], true)
}
DeleteValueType(&orig.PeriodType, false)
DeleteProfileID(&orig.ProfileId, false)
orig.Reset()
if nullable {
protoPoolProfile.Put(orig)
}
}
func CopyProfile(dest, src *otlpprofiles.Profile) {
// If copying to same object, just return.
if src == dest {
return
}
CopyValueType(&dest.SampleType, &src.SampleType)
dest.Sample = CopySampleSlice(dest.Sample, src.Sample)
dest.TimeUnixNano = src.TimeUnixNano
dest.DurationNano = src.DurationNano
CopyValueType(&dest.PeriodType, &src.PeriodType)
dest.Period = src.Period
dest.CommentStrindices = CopyInt32Slice(dest.CommentStrindices, src.CommentStrindices)
dest.ProfileId = src.ProfileId
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.OriginalPayloadFormat = src.OriginalPayloadFormat
dest.OriginalPayload = CopyByteSlice(dest.OriginalPayload, src.OriginalPayload)
dest.AttributeIndices = CopyInt32Slice(dest.AttributeIndices, src.AttributeIndices)
}
func GenTestProfile() *otlpprofiles.Profile {
orig := NewProfile()
orig.SampleType = *GenTestValueType()
orig.Sample = GenTestSampleSlice()
orig.TimeUnixNano = 1234567890
orig.DurationNano = 1234567890
orig.PeriodType = *GenTestValueType()
orig.Period = int64(13)
orig.CommentStrindices = GenTestInt32Slice()
orig.ProfileId = data.ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
orig.DroppedAttributesCount = uint32(13)
orig.OriginalPayloadFormat = "test_originalpayloadformat"
orig.OriginalPayload = GenTestByteSlice()
orig.AttributeIndices = GenTestInt32Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONProfile(orig *otlpprofiles.Profile, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("sampleType")
MarshalJSONValueType(&orig.SampleType, dest)
if len(orig.Sample) > 0 {
dest.WriteObjectField("sample")
dest.WriteArrayStart()
MarshalJSONSample(orig.Sample[0], dest)
for i := 1; i < len(orig.Sample); i++ {
dest.WriteMore()
MarshalJSONSample(orig.Sample[i], dest)
}
dest.WriteArrayEnd()
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.DurationNano != uint64(0) {
dest.WriteObjectField("durationNano")
dest.WriteUint64(orig.DurationNano)
}
dest.WriteObjectField("periodType")
MarshalJSONValueType(&orig.PeriodType, dest)
if orig.Period != int64(0) {
dest.WriteObjectField("period")
dest.WriteInt64(orig.Period)
}
if len(orig.CommentStrindices) > 0 {
dest.WriteObjectField("commentStrindices")
dest.WriteArrayStart()
dest.WriteInt32(orig.CommentStrindices[0])
for i := 1; i < len(orig.CommentStrindices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.CommentStrindices[i])
}
dest.WriteArrayEnd()
}
if orig.ProfileId != data.ProfileID([16]byte{}) {
dest.WriteObjectField("profileId")
MarshalJSONProfileID(&orig.ProfileId, dest)
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.OriginalPayloadFormat != "" {
dest.WriteObjectField("originalPayloadFormat")
dest.WriteString(orig.OriginalPayloadFormat)
}
if len(orig.OriginalPayload) > 0 {
dest.WriteObjectField("originalPayload")
dest.WriteBytes(orig.OriginalPayload)
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONProfile unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONProfile(orig *otlpprofiles.Profile, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "sampleType", "sample_type":
UnmarshalJSONValueType(&orig.SampleType, iter)
case "sample":
for iter.ReadArray() {
orig.Sample = append(orig.Sample, NewSample())
UnmarshalJSONSample(orig.Sample[len(orig.Sample)-1], iter)
}
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "durationNano", "duration_nano":
orig.DurationNano = iter.ReadUint64()
case "periodType", "period_type":
UnmarshalJSONValueType(&orig.PeriodType, iter)
case "period":
orig.Period = iter.ReadInt64()
case "commentStrindices", "comment_strindices":
for iter.ReadArray() {
orig.CommentStrindices = append(orig.CommentStrindices, iter.ReadInt32())
}
case "profileId", "profile_id":
UnmarshalJSONProfileID(&orig.ProfileId, iter)
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "originalPayloadFormat", "original_payload_format":
orig.OriginalPayloadFormat = iter.ReadString()
case "originalPayload", "original_payload":
orig.OriginalPayload = iter.ReadBytes()
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func SizeProtoProfile(orig *otlpprofiles.Profile) int {
var n int
var l int
_ = l
l = SizeProtoValueType(&orig.SampleType)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Sample {
l = SizeProtoSample(orig.Sample[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.DurationNano != 0 {
n += 1 + proto.Sov(uint64(orig.DurationNano))
}
l = SizeProtoValueType(&orig.PeriodType)
n += 1 + proto.Sov(uint64(l)) + l
if orig.Period != 0 {
n += 1 + proto.Sov(uint64(orig.Period))
}
if len(orig.CommentStrindices) > 0 {
l = 0
for _, e := range orig.CommentStrindices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
l = SizeProtoProfileID(&orig.ProfileId)
n += 1 + proto.Sov(uint64(l)) + l
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
l = len(orig.OriginalPayloadFormat)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.OriginalPayload)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoProfile(orig *otlpprofiles.Profile, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoValueType(&orig.SampleType, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Sample) - 1; i >= 0; i-- {
l = MarshalProtoSample(orig.Sample[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.DurationNano != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNano))
pos--
buf[pos] = 0x20
}
l = MarshalProtoValueType(&orig.PeriodType, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
if orig.Period != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Period))
pos--
buf[pos] = 0x30
}
l = len(orig.CommentStrindices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.CommentStrindices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x3a
}
l = MarshalProtoProfileID(&orig.ProfileId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x48
}
l = len(orig.OriginalPayloadFormat)
if l > 0 {
pos -= l
copy(buf[pos:], orig.OriginalPayloadFormat)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
}
l = len(orig.OriginalPayload)
if l > 0 {
pos -= l
copy(buf[pos:], orig.OriginalPayload)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x62
}
return len(buf) - pos
}
func UnmarshalProtoProfile(orig *otlpprofiles.Profile, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoValueType(&orig.SampleType, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Sample = append(orig.Sample, NewSample())
err = UnmarshalProtoSample(orig.Sample[len(orig.Sample)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DurationNano = uint64(num)
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoValueType(&orig.PeriodType, buf[startPos:pos])
if err != nil {
return err
}
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Period = int64(num)
case 7:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.CommentStrindices = append(orig.CommentStrindices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field CommentStrindices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.CommentStrindices = append(orig.CommentStrindices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType)
}
case 8:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoProfileID(&orig.ProfileId, buf[startPos:pos])
if err != nil {
return err
}
case 9:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.OriginalPayloadFormat = string(buf[startPos:pos])
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
if length != 0 {
orig.OriginalPayload = make([]byte, length)
copy(orig.OriginalPayload, buf[startPos:pos])
}
case 12:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolProfilesDictionary = sync.Pool{
New: func() any {
return &otlpprofiles.ProfilesDictionary{}
},
}
)
func NewProfilesDictionary() *otlpprofiles.ProfilesDictionary {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.ProfilesDictionary{}
}
return protoPoolProfilesDictionary.Get().(*otlpprofiles.ProfilesDictionary)
}
func DeleteProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.MappingTable {
DeleteMapping(orig.MappingTable[i], true)
}
for i := range orig.LocationTable {
DeleteLocation(orig.LocationTable[i], true)
}
for i := range orig.FunctionTable {
DeleteFunction(orig.FunctionTable[i], true)
}
for i := range orig.LinkTable {
DeleteLink(orig.LinkTable[i], true)
}
for i := range orig.AttributeTable {
DeleteKeyValueAndUnit(orig.AttributeTable[i], true)
}
for i := range orig.StackTable {
DeleteStack(orig.StackTable[i], true)
}
orig.Reset()
if nullable {
protoPoolProfilesDictionary.Put(orig)
}
}
func CopyProfilesDictionary(dest, src *otlpprofiles.ProfilesDictionary) {
// If copying to same object, just return.
if src == dest {
return
}
dest.MappingTable = CopyMappingSlice(dest.MappingTable, src.MappingTable)
dest.LocationTable = CopyLocationSlice(dest.LocationTable, src.LocationTable)
dest.FunctionTable = CopyFunctionSlice(dest.FunctionTable, src.FunctionTable)
dest.LinkTable = CopyLinkSlice(dest.LinkTable, src.LinkTable)
dest.StringTable = CopyStringSlice(dest.StringTable, src.StringTable)
dest.AttributeTable = CopyKeyValueAndUnitSlice(dest.AttributeTable, src.AttributeTable)
dest.StackTable = CopyStackSlice(dest.StackTable, src.StackTable)
}
func GenTestProfilesDictionary() *otlpprofiles.ProfilesDictionary {
orig := NewProfilesDictionary()
orig.MappingTable = GenTestMappingSlice()
orig.LocationTable = GenTestLocationSlice()
orig.FunctionTable = GenTestFunctionSlice()
orig.LinkTable = GenTestLinkSlice()
orig.StringTable = GenTestStringSlice()
orig.AttributeTable = GenTestKeyValueAndUnitSlice()
orig.StackTable = GenTestStackSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.MappingTable) > 0 {
dest.WriteObjectField("mappingTable")
dest.WriteArrayStart()
MarshalJSONMapping(orig.MappingTable[0], dest)
for i := 1; i < len(orig.MappingTable); i++ {
dest.WriteMore()
MarshalJSONMapping(orig.MappingTable[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.LocationTable) > 0 {
dest.WriteObjectField("locationTable")
dest.WriteArrayStart()
MarshalJSONLocation(orig.LocationTable[0], dest)
for i := 1; i < len(orig.LocationTable); i++ {
dest.WriteMore()
MarshalJSONLocation(orig.LocationTable[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.FunctionTable) > 0 {
dest.WriteObjectField("functionTable")
dest.WriteArrayStart()
MarshalJSONFunction(orig.FunctionTable[0], dest)
for i := 1; i < len(orig.FunctionTable); i++ {
dest.WriteMore()
MarshalJSONFunction(orig.FunctionTable[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.LinkTable) > 0 {
dest.WriteObjectField("linkTable")
dest.WriteArrayStart()
MarshalJSONLink(orig.LinkTable[0], dest)
for i := 1; i < len(orig.LinkTable); i++ {
dest.WriteMore()
MarshalJSONLink(orig.LinkTable[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.StringTable) > 0 {
dest.WriteObjectField("stringTable")
dest.WriteArrayStart()
dest.WriteString(orig.StringTable[0])
for i := 1; i < len(orig.StringTable); i++ {
dest.WriteMore()
dest.WriteString(orig.StringTable[i])
}
dest.WriteArrayEnd()
}
if len(orig.AttributeTable) > 0 {
dest.WriteObjectField("attributeTable")
dest.WriteArrayStart()
MarshalJSONKeyValueAndUnit(orig.AttributeTable[0], dest)
for i := 1; i < len(orig.AttributeTable); i++ {
dest.WriteMore()
MarshalJSONKeyValueAndUnit(orig.AttributeTable[i], dest)
}
dest.WriteArrayEnd()
}
if len(orig.StackTable) > 0 {
dest.WriteObjectField("stackTable")
dest.WriteArrayStart()
MarshalJSONStack(orig.StackTable[0], dest)
for i := 1; i < len(orig.StackTable); i++ {
dest.WriteMore()
MarshalJSONStack(orig.StackTable[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONProfilesDictionary unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "mappingTable", "mapping_table":
for iter.ReadArray() {
orig.MappingTable = append(orig.MappingTable, NewMapping())
UnmarshalJSONMapping(orig.MappingTable[len(orig.MappingTable)-1], iter)
}
case "locationTable", "location_table":
for iter.ReadArray() {
orig.LocationTable = append(orig.LocationTable, NewLocation())
UnmarshalJSONLocation(orig.LocationTable[len(orig.LocationTable)-1], iter)
}
case "functionTable", "function_table":
for iter.ReadArray() {
orig.FunctionTable = append(orig.FunctionTable, NewFunction())
UnmarshalJSONFunction(orig.FunctionTable[len(orig.FunctionTable)-1], iter)
}
case "linkTable", "link_table":
for iter.ReadArray() {
orig.LinkTable = append(orig.LinkTable, NewLink())
UnmarshalJSONLink(orig.LinkTable[len(orig.LinkTable)-1], iter)
}
case "stringTable", "string_table":
for iter.ReadArray() {
orig.StringTable = append(orig.StringTable, iter.ReadString())
}
case "attributeTable", "attribute_table":
for iter.ReadArray() {
orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
UnmarshalJSONKeyValueAndUnit(orig.AttributeTable[len(orig.AttributeTable)-1], iter)
}
case "stackTable", "stack_table":
for iter.ReadArray() {
orig.StackTable = append(orig.StackTable, NewStack())
UnmarshalJSONStack(orig.StackTable[len(orig.StackTable)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoProfilesDictionary(orig *otlpprofiles.ProfilesDictionary) int {
var n int
var l int
_ = l
for i := range orig.MappingTable {
l = SizeProtoMapping(orig.MappingTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.LocationTable {
l = SizeProtoLocation(orig.LocationTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.FunctionTable {
l = SizeProtoFunction(orig.FunctionTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.LinkTable {
l = SizeProtoLink(orig.LinkTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.StringTable {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.AttributeTable {
l = SizeProtoKeyValueAndUnit(orig.AttributeTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.StackTable {
l = SizeProtoStack(orig.StackTable[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.MappingTable) - 1; i >= 0; i-- {
l = MarshalProtoMapping(orig.MappingTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
for i := len(orig.LocationTable) - 1; i >= 0; i-- {
l = MarshalProtoLocation(orig.LocationTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.FunctionTable) - 1; i >= 0; i-- {
l = MarshalProtoFunction(orig.FunctionTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.LinkTable) - 1; i >= 0; i-- {
l = MarshalProtoLink(orig.LinkTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
for i := len(orig.StringTable) - 1; i >= 0; i-- {
l = len(orig.StringTable[i])
pos -= l
copy(buf[pos:], orig.StringTable[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
for i := len(orig.AttributeTable) - 1; i >= 0; i-- {
l = MarshalProtoKeyValueAndUnit(orig.AttributeTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
for i := len(orig.StackTable) - 1; i >= 0; i-- {
l = MarshalProtoStack(orig.StackTable[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
return len(buf) - pos
}
func UnmarshalProtoProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.MappingTable = append(orig.MappingTable, NewMapping())
err = UnmarshalProtoMapping(orig.MappingTable[len(orig.MappingTable)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LocationTable = append(orig.LocationTable, NewLocation())
err = UnmarshalProtoLocation(orig.LocationTable[len(orig.LocationTable)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.FunctionTable = append(orig.FunctionTable, NewFunction())
err = UnmarshalProtoFunction(orig.FunctionTable[len(orig.FunctionTable)-1], buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LinkTable = append(orig.LinkTable, NewLink())
err = UnmarshalProtoLink(orig.LinkTable[len(orig.LinkTable)-1], buf[startPos:pos])
if err != nil {
return err
}
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.StringTable = append(orig.StringTable, string(buf[startPos:pos]))
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
err = UnmarshalProtoKeyValueAndUnit(orig.AttributeTable[len(orig.AttributeTable)-1], buf[startPos:pos])
if err != nil {
return err
}
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.StackTable = append(orig.StackTable, NewStack())
err = UnmarshalProtoStack(orig.StackTable[len(orig.StackTable)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyProfileSlice(dest, src []*otlpprofiles.Profile) []*otlpprofiles.Profile {
var newDest []*otlpprofiles.Profile
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Profile, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfile()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfile(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfile()
}
}
for i := range src {
CopyProfile(newDest[i], src[i])
}
return newDest
}
func GenTestProfileSlice() []*otlpprofiles.Profile {
orig := make([]*otlpprofiles.Profile, 5)
orig[0] = NewProfile()
orig[1] = GenTestProfile()
orig[2] = NewProfile()
orig[3] = GenTestProfile()
orig[4] = NewProfile()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type ResourceWrapper struct {
orig *otlpresource.Resource
state *State
}
func GetResourceOrig(ms ResourceWrapper) *otlpresource.Resource {
return ms.orig
}
func GetResourceState(ms ResourceWrapper) *State {
return ms.state
}
func NewResourceWrapper(orig *otlpresource.Resource, state *State) ResourceWrapper {
return ResourceWrapper{orig: orig, state: state}
}
func GenTestResourceWrapper() ResourceWrapper {
orig := GenTestResource()
return NewResourceWrapper(orig, NewState())
}
var (
protoPoolResource = sync.Pool{
New: func() any {
return &otlpresource.Resource{}
},
}
)
func NewResource() *otlpresource.Resource {
if !UseProtoPooling.IsEnabled() {
return &otlpresource.Resource{}
}
return protoPoolResource.Get().(*otlpresource.Resource)
}
func DeleteResource(orig *otlpresource.Resource, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.EntityRefs {
DeleteEntityRef(orig.EntityRefs[i], true)
}
orig.Reset()
if nullable {
protoPoolResource.Put(orig)
}
}
func CopyResource(dest, src *otlpresource.Resource) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.EntityRefs = CopyEntityRefSlice(dest.EntityRefs, src.EntityRefs)
}
func GenTestResource() *otlpresource.Resource {
orig := NewResource()
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
orig.EntityRefs = GenTestEntityRefSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONResource(orig *otlpresource.Resource, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if len(orig.EntityRefs) > 0 {
dest.WriteObjectField("entityRefs")
dest.WriteArrayStart()
MarshalJSONEntityRef(orig.EntityRefs[0], dest)
for i := 1; i < len(orig.EntityRefs); i++ {
dest.WriteMore()
MarshalJSONEntityRef(orig.EntityRefs[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONResource unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONResource(orig *otlpresource.Resource, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "entityRefs", "entity_refs":
for iter.ReadArray() {
orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
UnmarshalJSONEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoResource(orig *otlpresource.Resource) int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
for i := range orig.EntityRefs {
l = SizeProtoEntityRef(orig.EntityRefs[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoResource(orig *otlpresource.Resource, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x10
}
for i := len(orig.EntityRefs) - 1; i >= 0; i-- {
l = MarshalProtoEntityRef(orig.EntityRefs[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoResource(orig *otlpresource.Resource, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
err = UnmarshalProtoEntityRef(orig.EntityRefs[len(orig.EntityRefs)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolResourceLogs = sync.Pool{
New: func() any {
return &otlplogs.ResourceLogs{}
},
}
)
func NewResourceLogs() *otlplogs.ResourceLogs {
if !UseProtoPooling.IsEnabled() {
return &otlplogs.ResourceLogs{}
}
return protoPoolResourceLogs.Get().(*otlplogs.ResourceLogs)
}
func DeleteResourceLogs(orig *otlplogs.ResourceLogs, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeLogs {
DeleteScopeLogs(orig.ScopeLogs[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceLogs.Put(orig)
}
}
func CopyResourceLogs(dest, src *otlplogs.ResourceLogs) {
// If copying to same object, just return.
if src == dest {
return
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeLogs = CopyScopeLogsSlice(dest.ScopeLogs, src.ScopeLogs)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestResourceLogs() *otlplogs.ResourceLogs {
orig := NewResourceLogs()
orig.Resource = *GenTestResource()
orig.ScopeLogs = GenTestScopeLogsSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONResourceLogs(orig *otlplogs.ResourceLogs, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
MarshalJSONResource(&orig.Resource, dest)
if len(orig.ScopeLogs) > 0 {
dest.WriteObjectField("scopeLogs")
dest.WriteArrayStart()
MarshalJSONScopeLogs(orig.ScopeLogs[0], dest)
for i := 1; i < len(orig.ScopeLogs); i++ {
dest.WriteMore()
MarshalJSONScopeLogs(orig.ScopeLogs[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONResourceLogs unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONResourceLogs(orig *otlplogs.ResourceLogs, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
UnmarshalJSONResource(&orig.Resource, iter)
case "scopeLogs", "scope_logs":
for iter.ReadArray() {
orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
UnmarshalJSONScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoResourceLogs(orig *otlplogs.ResourceLogs) int {
var n int
var l int
_ = l
l = SizeProtoResource(&orig.Resource)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeLogs {
l = SizeProtoScopeLogs(orig.ScopeLogs[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoResource(&orig.Resource, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeLogs) - 1; i >= 0; i-- {
l = MarshalProtoScopeLogs(orig.ScopeLogs[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoResourceLogs(orig *otlplogs.ResourceLogs, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoResource(&orig.Resource, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
err = UnmarshalProtoScopeLogs(orig.ScopeLogs[len(orig.ScopeLogs)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
func CopyResourceLogsSlice(dest, src []*otlplogs.ResourceLogs) []*otlplogs.ResourceLogs {
var newDest []*otlplogs.ResourceLogs
if cap(dest) < len(src) {
newDest = make([]*otlplogs.ResourceLogs, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceLogs()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceLogs(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceLogs()
}
}
for i := range src {
CopyResourceLogs(newDest[i], src[i])
}
return newDest
}
func GenTestResourceLogsSlice() []*otlplogs.ResourceLogs {
orig := make([]*otlplogs.ResourceLogs, 5)
orig[0] = NewResourceLogs()
orig[1] = GenTestResourceLogs()
orig[2] = NewResourceLogs()
orig[3] = GenTestResourceLogs()
orig[4] = NewResourceLogs()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolResourceMetrics = sync.Pool{
New: func() any {
return &otlpmetrics.ResourceMetrics{}
},
}
)
func NewResourceMetrics() *otlpmetrics.ResourceMetrics {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.ResourceMetrics{}
}
return protoPoolResourceMetrics.Get().(*otlpmetrics.ResourceMetrics)
}
func DeleteResourceMetrics(orig *otlpmetrics.ResourceMetrics, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeMetrics {
DeleteScopeMetrics(orig.ScopeMetrics[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceMetrics.Put(orig)
}
}
func CopyResourceMetrics(dest, src *otlpmetrics.ResourceMetrics) {
// If copying to same object, just return.
if src == dest {
return
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeMetrics = CopyScopeMetricsSlice(dest.ScopeMetrics, src.ScopeMetrics)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestResourceMetrics() *otlpmetrics.ResourceMetrics {
orig := NewResourceMetrics()
orig.Resource = *GenTestResource()
orig.ScopeMetrics = GenTestScopeMetricsSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONResourceMetrics(orig *otlpmetrics.ResourceMetrics, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
MarshalJSONResource(&orig.Resource, dest)
if len(orig.ScopeMetrics) > 0 {
dest.WriteObjectField("scopeMetrics")
dest.WriteArrayStart()
MarshalJSONScopeMetrics(orig.ScopeMetrics[0], dest)
for i := 1; i < len(orig.ScopeMetrics); i++ {
dest.WriteMore()
MarshalJSONScopeMetrics(orig.ScopeMetrics[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONResourceMetrics unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONResourceMetrics(orig *otlpmetrics.ResourceMetrics, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
UnmarshalJSONResource(&orig.Resource, iter)
case "scopeMetrics", "scope_metrics":
for iter.ReadArray() {
orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
UnmarshalJSONScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoResourceMetrics(orig *otlpmetrics.ResourceMetrics) int {
var n int
var l int
_ = l
l = SizeProtoResource(&orig.Resource)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeMetrics {
l = SizeProtoScopeMetrics(orig.ScopeMetrics[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoResource(&orig.Resource, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- {
l = MarshalProtoScopeMetrics(orig.ScopeMetrics[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoResourceMetrics(orig *otlpmetrics.ResourceMetrics, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoResource(&orig.Resource, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
err = UnmarshalProtoScopeMetrics(orig.ScopeMetrics[len(orig.ScopeMetrics)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyResourceMetricsSlice(dest, src []*otlpmetrics.ResourceMetrics) []*otlpmetrics.ResourceMetrics {
var newDest []*otlpmetrics.ResourceMetrics
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.ResourceMetrics, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceMetrics()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceMetrics(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceMetrics()
}
}
for i := range src {
CopyResourceMetrics(newDest[i], src[i])
}
return newDest
}
func GenTestResourceMetricsSlice() []*otlpmetrics.ResourceMetrics {
orig := make([]*otlpmetrics.ResourceMetrics, 5)
orig[0] = NewResourceMetrics()
orig[1] = GenTestResourceMetrics()
orig[2] = NewResourceMetrics()
orig[3] = GenTestResourceMetrics()
orig[4] = NewResourceMetrics()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolResourceProfiles = sync.Pool{
New: func() any {
return &otlpprofiles.ResourceProfiles{}
},
}
)
func NewResourceProfiles() *otlpprofiles.ResourceProfiles {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.ResourceProfiles{}
}
return protoPoolResourceProfiles.Get().(*otlpprofiles.ResourceProfiles)
}
func DeleteResourceProfiles(orig *otlpprofiles.ResourceProfiles, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeProfiles {
DeleteScopeProfiles(orig.ScopeProfiles[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceProfiles.Put(orig)
}
}
func CopyResourceProfiles(dest, src *otlpprofiles.ResourceProfiles) {
// If copying to same object, just return.
if src == dest {
return
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeProfiles = CopyScopeProfilesSlice(dest.ScopeProfiles, src.ScopeProfiles)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestResourceProfiles() *otlpprofiles.ResourceProfiles {
orig := NewResourceProfiles()
orig.Resource = *GenTestResource()
orig.ScopeProfiles = GenTestScopeProfilesSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONResourceProfiles(orig *otlpprofiles.ResourceProfiles, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
MarshalJSONResource(&orig.Resource, dest)
if len(orig.ScopeProfiles) > 0 {
dest.WriteObjectField("scopeProfiles")
dest.WriteArrayStart()
MarshalJSONScopeProfiles(orig.ScopeProfiles[0], dest)
for i := 1; i < len(orig.ScopeProfiles); i++ {
dest.WriteMore()
MarshalJSONScopeProfiles(orig.ScopeProfiles[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONResourceProfiles unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONResourceProfiles(orig *otlpprofiles.ResourceProfiles, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
UnmarshalJSONResource(&orig.Resource, iter)
case "scopeProfiles", "scope_profiles":
for iter.ReadArray() {
orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
UnmarshalJSONScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoResourceProfiles(orig *otlpprofiles.ResourceProfiles) int {
var n int
var l int
_ = l
l = SizeProtoResource(&orig.Resource)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeProfiles {
l = SizeProtoScopeProfiles(orig.ScopeProfiles[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoResource(&orig.Resource, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- {
l = MarshalProtoScopeProfiles(orig.ScopeProfiles[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoResourceProfiles(orig *otlpprofiles.ResourceProfiles, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoResource(&orig.Resource, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
err = UnmarshalProtoScopeProfiles(orig.ScopeProfiles[len(orig.ScopeProfiles)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyResourceProfilesSlice(dest, src []*otlpprofiles.ResourceProfiles) []*otlpprofiles.ResourceProfiles {
var newDest []*otlpprofiles.ResourceProfiles
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.ResourceProfiles, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceProfiles()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceProfiles(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceProfiles()
}
}
for i := range src {
CopyResourceProfiles(newDest[i], src[i])
}
return newDest
}
func GenTestResourceProfilesSlice() []*otlpprofiles.ResourceProfiles {
orig := make([]*otlpprofiles.ResourceProfiles, 5)
orig[0] = NewResourceProfiles()
orig[1] = GenTestResourceProfiles()
orig[2] = NewResourceProfiles()
orig[3] = GenTestResourceProfiles()
orig[4] = NewResourceProfiles()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolResourceSpans = sync.Pool{
New: func() any {
return &otlptrace.ResourceSpans{}
},
}
)
func NewResourceSpans() *otlptrace.ResourceSpans {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.ResourceSpans{}
}
return protoPoolResourceSpans.Get().(*otlptrace.ResourceSpans)
}
func DeleteResourceSpans(orig *otlptrace.ResourceSpans, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeSpans {
DeleteScopeSpans(orig.ScopeSpans[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceSpans.Put(orig)
}
}
func CopyResourceSpans(dest, src *otlptrace.ResourceSpans) {
// If copying to same object, just return.
if src == dest {
return
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeSpans = CopyScopeSpansSlice(dest.ScopeSpans, src.ScopeSpans)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestResourceSpans() *otlptrace.ResourceSpans {
orig := NewResourceSpans()
orig.Resource = *GenTestResource()
orig.ScopeSpans = GenTestScopeSpansSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONResourceSpans(orig *otlptrace.ResourceSpans, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
MarshalJSONResource(&orig.Resource, dest)
if len(orig.ScopeSpans) > 0 {
dest.WriteObjectField("scopeSpans")
dest.WriteArrayStart()
MarshalJSONScopeSpans(orig.ScopeSpans[0], dest)
for i := 1; i < len(orig.ScopeSpans); i++ {
dest.WriteMore()
MarshalJSONScopeSpans(orig.ScopeSpans[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONResourceSpans unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONResourceSpans(orig *otlptrace.ResourceSpans, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
UnmarshalJSONResource(&orig.Resource, iter)
case "scopeSpans", "scope_spans":
for iter.ReadArray() {
orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
UnmarshalJSONScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoResourceSpans(orig *otlptrace.ResourceSpans) int {
var n int
var l int
_ = l
l = SizeProtoResource(&orig.Resource)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeSpans {
l = SizeProtoScopeSpans(orig.ScopeSpans[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoResource(&orig.Resource, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeSpans) - 1; i >= 0; i-- {
l = MarshalProtoScopeSpans(orig.ScopeSpans[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoResourceSpans(orig *otlptrace.ResourceSpans, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoResource(&orig.Resource, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
err = UnmarshalProtoScopeSpans(orig.ScopeSpans[len(orig.ScopeSpans)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
func CopyResourceSpansSlice(dest, src []*otlptrace.ResourceSpans) []*otlptrace.ResourceSpans {
var newDest []*otlptrace.ResourceSpans
if cap(dest) < len(src) {
newDest = make([]*otlptrace.ResourceSpans, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceSpans()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceSpans(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceSpans()
}
}
for i := range src {
CopyResourceSpans(newDest[i], src[i])
}
return newDest
}
func GenTestResourceSpansSlice() []*otlptrace.ResourceSpans {
orig := make([]*otlptrace.ResourceSpans, 5)
orig[0] = NewResourceSpans()
orig[1] = GenTestResourceSpans()
orig[2] = NewResourceSpans()
orig[3] = GenTestResourceSpans()
orig[4] = NewResourceSpans()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSample = sync.Pool{
New: func() any {
return &otlpprofiles.Sample{}
},
}
)
func NewSample() *otlpprofiles.Sample {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Sample{}
}
return protoPoolSample.Get().(*otlpprofiles.Sample)
}
func DeleteSample(orig *otlpprofiles.Sample, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolSample.Put(orig)
}
}
func CopySample(dest, src *otlpprofiles.Sample) {
// If copying to same object, just return.
if src == dest {
return
}
dest.StackIndex = src.StackIndex
dest.Values = CopyInt64Slice(dest.Values, src.Values)
dest.AttributeIndices = CopyInt32Slice(dest.AttributeIndices, src.AttributeIndices)
dest.LinkIndex = src.LinkIndex
dest.TimestampsUnixNano = CopyUint64Slice(dest.TimestampsUnixNano, src.TimestampsUnixNano)
}
func GenTestSample() *otlpprofiles.Sample {
orig := NewSample()
orig.StackIndex = int32(13)
orig.Values = GenTestInt64Slice()
orig.AttributeIndices = GenTestInt32Slice()
orig.LinkIndex = int32(13)
orig.TimestampsUnixNano = GenTestUint64Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSample(orig *otlpprofiles.Sample, dest *json.Stream) {
dest.WriteObjectStart()
if orig.StackIndex != int32(0) {
dest.WriteObjectField("stackIndex")
dest.WriteInt32(orig.StackIndex)
}
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
dest.WriteInt64(orig.Values[0])
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
dest.WriteInt64(orig.Values[i])
}
dest.WriteArrayEnd()
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
if orig.LinkIndex != int32(0) {
dest.WriteObjectField("linkIndex")
dest.WriteInt32(orig.LinkIndex)
}
if len(orig.TimestampsUnixNano) > 0 {
dest.WriteObjectField("timestampsUnixNano")
dest.WriteArrayStart()
dest.WriteUint64(orig.TimestampsUnixNano[0])
for i := 1; i < len(orig.TimestampsUnixNano); i++ {
dest.WriteMore()
dest.WriteUint64(orig.TimestampsUnixNano[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSample unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSample(orig *otlpprofiles.Sample, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "stackIndex", "stack_index":
orig.StackIndex = iter.ReadInt32()
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, iter.ReadInt64())
}
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
case "linkIndex", "link_index":
orig.LinkIndex = iter.ReadInt32()
case "timestampsUnixNano", "timestamps_unix_nano":
for iter.ReadArray() {
orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, iter.ReadUint64())
}
default:
iter.Skip()
}
}
}
func SizeProtoSample(orig *otlpprofiles.Sample) int {
var n int
var l int
_ = l
if orig.StackIndex != 0 {
n += 1 + proto.Sov(uint64(orig.StackIndex))
}
if len(orig.Values) > 0 {
l = 0
for _, e := range orig.Values {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.LinkIndex != 0 {
n += 1 + proto.Sov(uint64(orig.LinkIndex))
}
l = len(orig.TimestampsUnixNano)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoSample(orig *otlpprofiles.Sample, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.StackIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.StackIndex))
pos--
buf[pos] = 0x8
}
l = len(orig.Values)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Values[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x12
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x1a
}
if orig.LinkIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex))
pos--
buf[pos] = 0x20
}
l = len(orig.TimestampsUnixNano)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimestampsUnixNano[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x2a
}
return len(buf) - pos
}
func UnmarshalProtoSample(orig *otlpprofiles.Sample, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.StackIndex = int32(num)
case 2:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.Values = append(orig.Values, int64(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field Values", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Values = append(orig.Values, int64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
case 3:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.LinkIndex = int32(num)
case 5:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.TimestampsUnixNano = make([]uint64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.TimestampsUnixNano[i] = uint64(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field TimestampsUnixNano", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopySampleSlice(dest, src []*otlpprofiles.Sample) []*otlpprofiles.Sample {
var newDest []*otlpprofiles.Sample
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Sample, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSample()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSample(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSample()
}
}
for i := range src {
CopySample(newDest[i], src[i])
}
return newDest
}
func GenTestSampleSlice() []*otlpprofiles.Sample {
orig := make([]*otlpprofiles.Sample, 5)
orig[0] = NewSample()
orig[1] = GenTestSample()
orig[2] = NewSample()
orig[3] = GenTestSample()
orig[4] = NewSample()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolScopeLogs = sync.Pool{
New: func() any {
return &otlplogs.ScopeLogs{}
},
}
)
func NewScopeLogs() *otlplogs.ScopeLogs {
if !UseProtoPooling.IsEnabled() {
return &otlplogs.ScopeLogs{}
}
return protoPoolScopeLogs.Get().(*otlplogs.ScopeLogs)
}
func DeleteScopeLogs(orig *otlplogs.ScopeLogs, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.LogRecords {
DeleteLogRecord(orig.LogRecords[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeLogs.Put(orig)
}
}
func CopyScopeLogs(dest, src *otlplogs.ScopeLogs) {
// If copying to same object, just return.
if src == dest {
return
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.LogRecords = CopyLogRecordSlice(dest.LogRecords, src.LogRecords)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestScopeLogs() *otlplogs.ScopeLogs {
orig := NewScopeLogs()
orig.Scope = *GenTestInstrumentationScope()
orig.LogRecords = GenTestLogRecordSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONScopeLogs(orig *otlplogs.ScopeLogs, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
MarshalJSONInstrumentationScope(&orig.Scope, dest)
if len(orig.LogRecords) > 0 {
dest.WriteObjectField("logRecords")
dest.WriteArrayStart()
MarshalJSONLogRecord(orig.LogRecords[0], dest)
for i := 1; i < len(orig.LogRecords); i++ {
dest.WriteMore()
MarshalJSONLogRecord(orig.LogRecords[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONScopeLogs unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONScopeLogs(orig *otlplogs.ScopeLogs, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
UnmarshalJSONInstrumentationScope(&orig.Scope, iter)
case "logRecords", "log_records":
for iter.ReadArray() {
orig.LogRecords = append(orig.LogRecords, NewLogRecord())
UnmarshalJSONLogRecord(orig.LogRecords[len(orig.LogRecords)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoScopeLogs(orig *otlplogs.ScopeLogs) int {
var n int
var l int
_ = l
l = SizeProtoInstrumentationScope(&orig.Scope)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.LogRecords {
l = SizeProtoLogRecord(orig.LogRecords[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoInstrumentationScope(&orig.Scope, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.LogRecords) - 1; i >= 0; i-- {
l = MarshalProtoLogRecord(orig.LogRecords[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoScopeLogs(orig *otlplogs.ScopeLogs, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoInstrumentationScope(&orig.Scope, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LogRecords = append(orig.LogRecords, NewLogRecord())
err = UnmarshalProtoLogRecord(orig.LogRecords[len(orig.LogRecords)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
func CopyScopeLogsSlice(dest, src []*otlplogs.ScopeLogs) []*otlplogs.ScopeLogs {
var newDest []*otlplogs.ScopeLogs
if cap(dest) < len(src) {
newDest = make([]*otlplogs.ScopeLogs, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeLogs()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeLogs(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeLogs()
}
}
for i := range src {
CopyScopeLogs(newDest[i], src[i])
}
return newDest
}
func GenTestScopeLogsSlice() []*otlplogs.ScopeLogs {
orig := make([]*otlplogs.ScopeLogs, 5)
orig[0] = NewScopeLogs()
orig[1] = GenTestScopeLogs()
orig[2] = NewScopeLogs()
orig[3] = GenTestScopeLogs()
orig[4] = NewScopeLogs()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolScopeMetrics = sync.Pool{
New: func() any {
return &otlpmetrics.ScopeMetrics{}
},
}
)
func NewScopeMetrics() *otlpmetrics.ScopeMetrics {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.ScopeMetrics{}
}
return protoPoolScopeMetrics.Get().(*otlpmetrics.ScopeMetrics)
}
func DeleteScopeMetrics(orig *otlpmetrics.ScopeMetrics, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Metrics {
DeleteMetric(orig.Metrics[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeMetrics.Put(orig)
}
}
func CopyScopeMetrics(dest, src *otlpmetrics.ScopeMetrics) {
// If copying to same object, just return.
if src == dest {
return
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Metrics = CopyMetricSlice(dest.Metrics, src.Metrics)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestScopeMetrics() *otlpmetrics.ScopeMetrics {
orig := NewScopeMetrics()
orig.Scope = *GenTestInstrumentationScope()
orig.Metrics = GenTestMetricSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONScopeMetrics(orig *otlpmetrics.ScopeMetrics, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
MarshalJSONInstrumentationScope(&orig.Scope, dest)
if len(orig.Metrics) > 0 {
dest.WriteObjectField("metrics")
dest.WriteArrayStart()
MarshalJSONMetric(orig.Metrics[0], dest)
for i := 1; i < len(orig.Metrics); i++ {
dest.WriteMore()
MarshalJSONMetric(orig.Metrics[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONScopeMetrics unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONScopeMetrics(orig *otlpmetrics.ScopeMetrics, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
UnmarshalJSONInstrumentationScope(&orig.Scope, iter)
case "metrics":
for iter.ReadArray() {
orig.Metrics = append(orig.Metrics, NewMetric())
UnmarshalJSONMetric(orig.Metrics[len(orig.Metrics)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoScopeMetrics(orig *otlpmetrics.ScopeMetrics) int {
var n int
var l int
_ = l
l = SizeProtoInstrumentationScope(&orig.Scope)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Metrics {
l = SizeProtoMetric(orig.Metrics[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoInstrumentationScope(&orig.Scope, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Metrics) - 1; i >= 0; i-- {
l = MarshalProtoMetric(orig.Metrics[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoScopeMetrics(orig *otlpmetrics.ScopeMetrics, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoInstrumentationScope(&orig.Scope, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Metrics = append(orig.Metrics, NewMetric())
err = UnmarshalProtoMetric(orig.Metrics[len(orig.Metrics)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopyScopeMetricsSlice(dest, src []*otlpmetrics.ScopeMetrics) []*otlpmetrics.ScopeMetrics {
var newDest []*otlpmetrics.ScopeMetrics
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.ScopeMetrics, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeMetrics()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeMetrics(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeMetrics()
}
}
for i := range src {
CopyScopeMetrics(newDest[i], src[i])
}
return newDest
}
func GenTestScopeMetricsSlice() []*otlpmetrics.ScopeMetrics {
orig := make([]*otlpmetrics.ScopeMetrics, 5)
orig[0] = NewScopeMetrics()
orig[1] = GenTestScopeMetrics()
orig[2] = NewScopeMetrics()
orig[3] = GenTestScopeMetrics()
orig[4] = NewScopeMetrics()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolScopeProfiles = sync.Pool{
New: func() any {
return &otlpprofiles.ScopeProfiles{}
},
}
)
func NewScopeProfiles() *otlpprofiles.ScopeProfiles {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.ScopeProfiles{}
}
return protoPoolScopeProfiles.Get().(*otlpprofiles.ScopeProfiles)
}
func DeleteScopeProfiles(orig *otlpprofiles.ScopeProfiles, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Profiles {
DeleteProfile(orig.Profiles[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeProfiles.Put(orig)
}
}
func CopyScopeProfiles(dest, src *otlpprofiles.ScopeProfiles) {
// If copying to same object, just return.
if src == dest {
return
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Profiles = CopyProfileSlice(dest.Profiles, src.Profiles)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestScopeProfiles() *otlpprofiles.ScopeProfiles {
orig := NewScopeProfiles()
orig.Scope = *GenTestInstrumentationScope()
orig.Profiles = GenTestProfileSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONScopeProfiles(orig *otlpprofiles.ScopeProfiles, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
MarshalJSONInstrumentationScope(&orig.Scope, dest)
if len(orig.Profiles) > 0 {
dest.WriteObjectField("profiles")
dest.WriteArrayStart()
MarshalJSONProfile(orig.Profiles[0], dest)
for i := 1; i < len(orig.Profiles); i++ {
dest.WriteMore()
MarshalJSONProfile(orig.Profiles[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONScopeProfiles unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONScopeProfiles(orig *otlpprofiles.ScopeProfiles, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
UnmarshalJSONInstrumentationScope(&orig.Scope, iter)
case "profiles":
for iter.ReadArray() {
orig.Profiles = append(orig.Profiles, NewProfile())
UnmarshalJSONProfile(orig.Profiles[len(orig.Profiles)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoScopeProfiles(orig *otlpprofiles.ScopeProfiles) int {
var n int
var l int
_ = l
l = SizeProtoInstrumentationScope(&orig.Scope)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Profiles {
l = SizeProtoProfile(orig.Profiles[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoInstrumentationScope(&orig.Scope, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Profiles) - 1; i >= 0; i-- {
l = MarshalProtoProfile(orig.Profiles[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoScopeProfiles(orig *otlpprofiles.ScopeProfiles, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoInstrumentationScope(&orig.Scope, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Profiles = append(orig.Profiles, NewProfile())
err = UnmarshalProtoProfile(orig.Profiles[len(orig.Profiles)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyScopeProfilesSlice(dest, src []*otlpprofiles.ScopeProfiles) []*otlpprofiles.ScopeProfiles {
var newDest []*otlpprofiles.ScopeProfiles
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.ScopeProfiles, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeProfiles()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeProfiles(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeProfiles()
}
}
for i := range src {
CopyScopeProfiles(newDest[i], src[i])
}
return newDest
}
func GenTestScopeProfilesSlice() []*otlpprofiles.ScopeProfiles {
orig := make([]*otlpprofiles.ScopeProfiles, 5)
orig[0] = NewScopeProfiles()
orig[1] = GenTestScopeProfiles()
orig[2] = NewScopeProfiles()
orig[3] = GenTestScopeProfiles()
orig[4] = NewScopeProfiles()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolScopeSpans = sync.Pool{
New: func() any {
return &otlptrace.ScopeSpans{}
},
}
)
func NewScopeSpans() *otlptrace.ScopeSpans {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.ScopeSpans{}
}
return protoPoolScopeSpans.Get().(*otlptrace.ScopeSpans)
}
func DeleteScopeSpans(orig *otlptrace.ScopeSpans, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Spans {
DeleteSpan(orig.Spans[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeSpans.Put(orig)
}
}
func CopyScopeSpans(dest, src *otlptrace.ScopeSpans) {
// If copying to same object, just return.
if src == dest {
return
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Spans = CopySpanSlice(dest.Spans, src.Spans)
dest.SchemaUrl = src.SchemaUrl
}
func GenTestScopeSpans() *otlptrace.ScopeSpans {
orig := NewScopeSpans()
orig.Scope = *GenTestInstrumentationScope()
orig.Spans = GenTestSpanSlice()
orig.SchemaUrl = "test_schemaurl"
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONScopeSpans(orig *otlptrace.ScopeSpans, dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
MarshalJSONInstrumentationScope(&orig.Scope, dest)
if len(orig.Spans) > 0 {
dest.WriteObjectField("spans")
dest.WriteArrayStart()
MarshalJSONSpan(orig.Spans[0], dest)
for i := 1; i < len(orig.Spans); i++ {
dest.WriteMore()
MarshalJSONSpan(orig.Spans[i], dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONScopeSpans unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONScopeSpans(orig *otlptrace.ScopeSpans, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
UnmarshalJSONInstrumentationScope(&orig.Scope, iter)
case "spans":
for iter.ReadArray() {
orig.Spans = append(orig.Spans, NewSpan())
UnmarshalJSONSpan(orig.Spans[len(orig.Spans)-1], iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func SizeProtoScopeSpans(orig *otlptrace.ScopeSpans) int {
var n int
var l int
_ = l
l = SizeProtoInstrumentationScope(&orig.Scope)
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Spans {
l = SizeProtoSpan(orig.Spans[i])
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoInstrumentationScope(&orig.Scope, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Spans) - 1; i >= 0; i-- {
l = MarshalProtoSpan(orig.Spans[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func UnmarshalProtoScopeSpans(orig *otlptrace.ScopeSpans, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoInstrumentationScope(&orig.Scope, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Spans = append(orig.Spans, NewSpan())
err = UnmarshalProtoSpan(orig.Spans[len(orig.Spans)-1], buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
func CopyScopeSpansSlice(dest, src []*otlptrace.ScopeSpans) []*otlptrace.ScopeSpans {
var newDest []*otlptrace.ScopeSpans
if cap(dest) < len(src) {
newDest = make([]*otlptrace.ScopeSpans, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeSpans()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeSpans(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeSpans()
}
}
for i := range src {
CopyScopeSpans(newDest[i], src[i])
}
return newDest
}
func GenTestScopeSpansSlice() []*otlptrace.ScopeSpans {
orig := make([]*otlptrace.ScopeSpans, 5)
orig[0] = NewScopeSpans()
orig[1] = GenTestScopeSpans()
orig[2] = NewScopeSpans()
orig[3] = GenTestScopeSpans()
orig[4] = NewScopeSpans()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSpan = sync.Pool{
New: func() any {
return &otlptrace.Span{}
},
}
)
func NewSpan() *otlptrace.Span {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.Span{}
}
return protoPoolSpan.Get().(*otlptrace.Span)
}
func DeleteSpan(orig *otlptrace.Span, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
DeleteSpanID(&orig.ParentSpanId, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.Events {
DeleteSpan_Event(orig.Events[i], true)
}
for i := range orig.Links {
DeleteSpan_Link(orig.Links[i], true)
}
DeleteStatus(&orig.Status, false)
orig.Reset()
if nullable {
protoPoolSpan.Put(orig)
}
}
func CopySpan(dest, src *otlptrace.Span) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TraceId = src.TraceId
dest.SpanId = src.SpanId
CopyTraceState(&dest.TraceState, &src.TraceState)
dest.ParentSpanId = src.ParentSpanId
dest.Flags = src.Flags
dest.Name = src.Name
dest.Kind = src.Kind
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.EndTimeUnixNano = src.EndTimeUnixNano
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Events = CopySpan_EventSlice(dest.Events, src.Events)
dest.DroppedEventsCount = src.DroppedEventsCount
dest.Links = CopySpan_LinkSlice(dest.Links, src.Links)
dest.DroppedLinksCount = src.DroppedLinksCount
CopyStatus(&dest.Status, &src.Status)
}
func GenTestSpan() *otlptrace.Span {
orig := NewSpan()
orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
orig.TraceState = *GenTestTraceState()
orig.ParentSpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
orig.Flags = uint32(13)
orig.Name = "test_name"
orig.Kind = otlptrace.Span_SpanKind(3)
orig.StartTimeUnixNano = 1234567890
orig.EndTimeUnixNano = 1234567890
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
orig.Events = GenTestSpan_EventSlice()
orig.DroppedEventsCount = uint32(13)
orig.Links = GenTestSpan_LinkSlice()
orig.DroppedLinksCount = uint32(13)
orig.Status = *GenTestStatus()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSpan(orig *otlptrace.Span, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TraceId != data.TraceID([16]byte{}) {
dest.WriteObjectField("traceId")
MarshalJSONTraceID(&orig.TraceId, dest)
}
if orig.SpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("spanId")
MarshalJSONSpanID(&orig.SpanId, dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
if orig.ParentSpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("parentSpanId")
MarshalJSONSpanID(&orig.ParentSpanId, dest)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if int32(orig.Kind) != 0 {
dest.WriteObjectField("kind")
dest.WriteInt32(int32(orig.Kind))
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.EndTimeUnixNano != uint64(0) {
dest.WriteObjectField("endTimeUnixNano")
dest.WriteUint64(orig.EndTimeUnixNano)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if len(orig.Events) > 0 {
dest.WriteObjectField("events")
dest.WriteArrayStart()
MarshalJSONSpan_Event(orig.Events[0], dest)
for i := 1; i < len(orig.Events); i++ {
dest.WriteMore()
MarshalJSONSpan_Event(orig.Events[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedEventsCount != uint32(0) {
dest.WriteObjectField("droppedEventsCount")
dest.WriteUint32(orig.DroppedEventsCount)
}
if len(orig.Links) > 0 {
dest.WriteObjectField("links")
dest.WriteArrayStart()
MarshalJSONSpan_Link(orig.Links[0], dest)
for i := 1; i < len(orig.Links); i++ {
dest.WriteMore()
MarshalJSONSpan_Link(orig.Links[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedLinksCount != uint32(0) {
dest.WriteObjectField("droppedLinksCount")
dest.WriteUint32(orig.DroppedLinksCount)
}
dest.WriteObjectField("status")
MarshalJSONStatus(&orig.Status, dest)
dest.WriteObjectEnd()
}
// UnmarshalJSONSpan unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSpan(orig *otlptrace.Span, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
UnmarshalJSONTraceID(&orig.TraceId, iter)
case "spanId", "span_id":
UnmarshalJSONSpanID(&orig.SpanId, iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "parentSpanId", "parent_span_id":
UnmarshalJSONSpanID(&orig.ParentSpanId, iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "name":
orig.Name = iter.ReadString()
case "kind":
orig.Kind = otlptrace.Span_SpanKind(iter.ReadEnumValue(otlptrace.Span_SpanKind_value))
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "endTimeUnixNano", "end_time_unix_nano":
orig.EndTimeUnixNano = iter.ReadUint64()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "events":
for iter.ReadArray() {
orig.Events = append(orig.Events, NewSpan_Event())
UnmarshalJSONSpan_Event(orig.Events[len(orig.Events)-1], iter)
}
case "droppedEventsCount", "dropped_events_count":
orig.DroppedEventsCount = iter.ReadUint32()
case "links":
for iter.ReadArray() {
orig.Links = append(orig.Links, NewSpan_Link())
UnmarshalJSONSpan_Link(orig.Links[len(orig.Links)-1], iter)
}
case "droppedLinksCount", "dropped_links_count":
orig.DroppedLinksCount = iter.ReadUint32()
case "status":
UnmarshalJSONStatus(&orig.Status, iter)
default:
iter.Skip()
}
}
}
func SizeProtoSpan(orig *otlptrace.Span) int {
var n int
var l int
_ = l
l = SizeProtoTraceID(&orig.TraceId)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoSpanID(&orig.SpanId)
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = SizeProtoSpanID(&orig.ParentSpanId)
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 6
}
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Kind != 0 {
n += 1 + proto.Sov(uint64(orig.Kind))
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.EndTimeUnixNano != 0 {
n += 9
}
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
for i := range orig.Events {
l = SizeProtoSpan_Event(orig.Events[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedEventsCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedEventsCount))
}
for i := range orig.Links {
l = SizeProtoSpan_Link(orig.Links[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedLinksCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedLinksCount))
}
l = SizeProtoStatus(&orig.Status)
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func MarshalProtoSpan(orig *otlptrace.Span, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoTraceID(&orig.TraceId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = MarshalProtoSpanID(&orig.SpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
l = len(orig.TraceState)
if l > 0 {
pos -= l
copy(buf[pos:], orig.TraceState)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = MarshalProtoSpanID(&orig.ParentSpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x1
pos--
buf[pos] = 0x85
}
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
if orig.Kind != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Kind))
pos--
buf[pos] = 0x30
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x39
}
if orig.EndTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.EndTimeUnixNano))
pos--
buf[pos] = 0x41
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x50
}
for i := len(orig.Events) - 1; i >= 0; i-- {
l = MarshalProtoSpan_Event(orig.Events[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
if orig.DroppedEventsCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedEventsCount))
pos--
buf[pos] = 0x60
}
for i := len(orig.Links) - 1; i >= 0; i-- {
l = MarshalProtoSpan_Link(orig.Links[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x6a
}
if orig.DroppedLinksCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedLinksCount))
pos--
buf[pos] = 0x70
}
l = MarshalProtoStatus(&orig.Status, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x7a
return len(buf) - pos
}
func UnmarshalProtoSpan(orig *otlptrace.Span, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoTraceID(&orig.TraceId, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.SpanId, buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.TraceState = string(buf[startPos:pos])
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.ParentSpanId, buf[startPos:pos])
if err != nil {
return err
}
case 16:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Kind = otlptrace.Span_SpanKind(num)
case 7:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 8:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.EndTimeUnixNano = uint64(num)
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Events = append(orig.Events, NewSpan_Event())
err = UnmarshalProtoSpan_Event(orig.Events[len(orig.Events)-1], buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedEventsCount = uint32(num)
case 13:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Links = append(orig.Links, NewSpan_Link())
err = UnmarshalProtoSpan_Link(orig.Links[len(orig.Links)-1], buf[startPos:pos])
if err != nil {
return err
}
case 14:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedLinksCount = uint32(num)
case 15:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoStatus(&orig.Status, buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSpan_Event = sync.Pool{
New: func() any {
return &otlptrace.Span_Event{}
},
}
)
func NewSpan_Event() *otlptrace.Span_Event {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.Span_Event{}
}
return protoPoolSpan_Event.Get().(*otlptrace.Span_Event)
}
func DeleteSpan_Event(orig *otlptrace.Span_Event, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolSpan_Event.Put(orig)
}
}
func CopySpan_Event(dest, src *otlptrace.Span_Event) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TimeUnixNano = src.TimeUnixNano
dest.Name = src.Name
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
}
func GenTestSpan_Event() *otlptrace.Span_Event {
orig := NewSpan_Event()
orig.TimeUnixNano = 1234567890
orig.Name = "test_name"
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSpan_Event(orig *otlptrace.Span_Event, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSpanEvent unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSpan_Event(orig *otlptrace.Span_Event, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "name":
orig.Name = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func SizeProtoSpan_Event(orig *otlptrace.Span_Event) int {
var n int
var l int
_ = l
if orig.TimeUnixNano != 0 {
n += 9
}
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
return n
}
func MarshalProtoSpan_Event(orig *otlptrace.Span_Event, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x9
}
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func UnmarshalProtoSpan_Event(orig *otlptrace.Span_Event, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
func CopySpan_EventSlice(dest, src []*otlptrace.Span_Event) []*otlptrace.Span_Event {
var newDest []*otlptrace.Span_Event
if cap(dest) < len(src) {
newDest = make([]*otlptrace.Span_Event, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan_Event()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpan_Event(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan_Event()
}
}
for i := range src {
CopySpan_Event(newDest[i], src[i])
}
return newDest
}
func GenTestSpan_EventSlice() []*otlptrace.Span_Event {
orig := make([]*otlptrace.Span_Event, 5)
orig[0] = NewSpan_Event()
orig[1] = GenTestSpan_Event()
orig[2] = NewSpan_Event()
orig[3] = GenTestSpan_Event()
orig[4] = NewSpan_Event()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSpan_Link = sync.Pool{
New: func() any {
return &otlptrace.Span_Link{}
},
}
)
func NewSpan_Link() *otlptrace.Span_Link {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.Span_Link{}
}
return protoPoolSpan_Link.Get().(*otlptrace.Span_Link)
}
func DeleteSpan_Link(orig *otlptrace.Span_Link, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolSpan_Link.Put(orig)
}
}
func CopySpan_Link(dest, src *otlptrace.Span_Link) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TraceId = src.TraceId
dest.SpanId = src.SpanId
CopyTraceState(&dest.TraceState, &src.TraceState)
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Flags = src.Flags
}
func GenTestSpan_Link() *otlptrace.Span_Link {
orig := NewSpan_Link()
orig.TraceId = data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
orig.SpanId = data.SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
orig.TraceState = *GenTestTraceState()
orig.Attributes = GenTestKeyValueSlice()
orig.DroppedAttributesCount = uint32(13)
orig.Flags = uint32(13)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSpan_Link(orig *otlptrace.Span_Link, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TraceId != data.TraceID([16]byte{}) {
dest.WriteObjectField("traceId")
MarshalJSONTraceID(&orig.TraceId, dest)
}
if orig.SpanId != data.SpanID([8]byte{}) {
dest.WriteObjectField("spanId")
MarshalJSONSpanID(&orig.SpanId, dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSpanLink unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSpan_Link(orig *otlptrace.Span_Link, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
UnmarshalJSONTraceID(&orig.TraceId, iter)
case "spanId", "span_id":
UnmarshalJSONSpanID(&orig.SpanId, iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func SizeProtoSpan_Link(orig *otlptrace.Span_Link) int {
var n int
var l int
_ = l
l = SizeProtoTraceID(&orig.TraceId)
n += 1 + proto.Sov(uint64(l)) + l
l = SizeProtoSpanID(&orig.SpanId)
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
if orig.Flags != 0 {
n += 5
}
return n
}
func MarshalProtoSpan_Link(orig *otlptrace.Span_Link, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = MarshalProtoTraceID(&orig.TraceId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = MarshalProtoSpanID(&orig.SpanId, buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
l = len(orig.TraceState)
if l > 0 {
pos -= l
copy(buf[pos:], orig.TraceState)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x28
}
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x35
}
return len(buf) - pos
}
func UnmarshalProtoSpan_Link(orig *otlptrace.Span_Link, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoTraceID(&orig.TraceId, buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = UnmarshalProtoSpanID(&orig.SpanId, buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.TraceState = string(buf[startPos:pos])
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 5:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 6:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
func CopySpan_LinkSlice(dest, src []*otlptrace.Span_Link) []*otlptrace.Span_Link {
var newDest []*otlptrace.Span_Link
if cap(dest) < len(src) {
newDest = make([]*otlptrace.Span_Link, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan_Link()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpan_Link(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan_Link()
}
}
for i := range src {
CopySpan_Link(newDest[i], src[i])
}
return newDest
}
func GenTestSpan_LinkSlice() []*otlptrace.Span_Link {
orig := make([]*otlptrace.Span_Link, 5)
orig[0] = NewSpan_Link()
orig[1] = GenTestSpan_Link()
orig[2] = NewSpan_Link()
orig[3] = GenTestSpan_Link()
orig[4] = NewSpan_Link()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
func CopySpanSlice(dest, src []*otlptrace.Span) []*otlptrace.Span {
var newDest []*otlptrace.Span
if cap(dest) < len(src) {
newDest = make([]*otlptrace.Span, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpan(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan()
}
}
for i := range src {
CopySpan(newDest[i], src[i])
}
return newDest
}
func GenTestSpanSlice() []*otlptrace.Span {
orig := make([]*otlptrace.Span, 5)
orig[0] = NewSpan()
orig[1] = GenTestSpan()
orig[2] = NewSpan()
orig[3] = GenTestSpan()
orig[4] = NewSpan()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolStack = sync.Pool{
New: func() any {
return &otlpprofiles.Stack{}
},
}
)
func NewStack() *otlpprofiles.Stack {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.Stack{}
}
return protoPoolStack.Get().(*otlpprofiles.Stack)
}
func DeleteStack(orig *otlpprofiles.Stack, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolStack.Put(orig)
}
}
func CopyStack(dest, src *otlpprofiles.Stack) {
// If copying to same object, just return.
if src == dest {
return
}
dest.LocationIndices = CopyInt32Slice(dest.LocationIndices, src.LocationIndices)
}
func GenTestStack() *otlpprofiles.Stack {
orig := NewStack()
orig.LocationIndices = GenTestInt32Slice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONStack(orig *otlpprofiles.Stack, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.LocationIndices) > 0 {
dest.WriteObjectField("locationIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.LocationIndices[0])
for i := 1; i < len(orig.LocationIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.LocationIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONStack unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONStack(orig *otlpprofiles.Stack, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "locationIndices", "location_indices":
for iter.ReadArray() {
orig.LocationIndices = append(orig.LocationIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func SizeProtoStack(orig *otlpprofiles.Stack) int {
var n int
var l int
_ = l
if len(orig.LocationIndices) > 0 {
l = 0
for _, e := range orig.LocationIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoStack(orig *otlpprofiles.Stack, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.LocationIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoStack(orig *otlpprofiles.Stack, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.LocationIndices = append(orig.LocationIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field LocationIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.LocationIndices = append(orig.LocationIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyStackSlice(dest, src []*otlpprofiles.Stack) []*otlpprofiles.Stack {
var newDest []*otlpprofiles.Stack
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.Stack, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStack()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteStack(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStack()
}
}
for i := range src {
CopyStack(newDest[i], src[i])
}
return newDest
}
func GenTestStackSlice() []*otlpprofiles.Stack {
orig := make([]*otlpprofiles.Stack, 5)
orig[0] = NewStack()
orig[1] = GenTestStack()
orig[2] = NewStack()
orig[3] = GenTestStack()
orig[4] = NewStack()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolStatus = sync.Pool{
New: func() any {
return &otlptrace.Status{}
},
}
)
func NewStatus() *otlptrace.Status {
if !UseProtoPooling.IsEnabled() {
return &otlptrace.Status{}
}
return protoPoolStatus.Get().(*otlptrace.Status)
}
func DeleteStatus(orig *otlptrace.Status, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolStatus.Put(orig)
}
}
func CopyStatus(dest, src *otlptrace.Status) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Message = src.Message
dest.Code = src.Code
}
func GenTestStatus() *otlptrace.Status {
orig := NewStatus()
orig.Message = "test_message"
orig.Code = otlptrace.Status_StatusCode(1)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONStatus(orig *otlptrace.Status, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Message != "" {
dest.WriteObjectField("message")
dest.WriteString(orig.Message)
}
if int32(orig.Code) != 0 {
dest.WriteObjectField("code")
dest.WriteInt32(int32(orig.Code))
}
dest.WriteObjectEnd()
}
// UnmarshalJSONStatus unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONStatus(orig *otlptrace.Status, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "message":
orig.Message = iter.ReadString()
case "code":
orig.Code = otlptrace.Status_StatusCode(iter.ReadEnumValue(otlptrace.Status_StatusCode_value))
default:
iter.Skip()
}
}
}
func SizeProtoStatus(orig *otlptrace.Status) int {
var n int
var l int
_ = l
l = len(orig.Message)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Code != 0 {
n += 1 + proto.Sov(uint64(orig.Code))
}
return n
}
func MarshalProtoStatus(orig *otlptrace.Status, buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Message)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Message)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
if orig.Code != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Code))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func UnmarshalProtoStatus(orig *otlptrace.Status, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Message = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Code = otlptrace.Status_StatusCode(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type StringSliceWrapper struct {
orig *[]string
state *State
}
func GetStringSliceOrig(ms StringSliceWrapper) *[]string {
return ms.orig
}
func GetStringSliceState(ms StringSliceWrapper) *State {
return ms.state
}
func NewStringSliceWrapper(orig *[]string, state *State) StringSliceWrapper {
return StringSliceWrapper{orig: orig, state: state}
}
func GenTestStringSliceWrapper() StringSliceWrapper {
orig := GenTestStringSlice()
return NewStringSliceWrapper(&orig, NewState())
}
func CopyStringSlice(dst, src []string) []string {
return append(dst[:0], src...)
}
func GenTestStringSlice() []string {
return []string{"a", "b", "c"}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSum = sync.Pool{
New: func() any {
return &otlpmetrics.Sum{}
},
}
)
func NewSum() *otlpmetrics.Sum {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Sum{}
}
return protoPoolSum.Get().(*otlpmetrics.Sum)
}
func DeleteSum(orig *otlpmetrics.Sum, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteNumberDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolSum.Put(orig)
}
}
func CopySum(dest, src *otlpmetrics.Sum) {
// If copying to same object, just return.
if src == dest {
return
}
dest.DataPoints = CopyNumberDataPointSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
dest.IsMonotonic = src.IsMonotonic
}
func GenTestSum() *otlpmetrics.Sum {
orig := NewSum()
orig.DataPoints = GenTestNumberDataPointSlice()
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(1)
orig.IsMonotonic = true
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSum(orig *otlpmetrics.Sum, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
MarshalJSONNumberDataPoint(orig.DataPoints[0], dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
MarshalJSONNumberDataPoint(orig.DataPoints[i], dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
if orig.IsMonotonic != false {
dest.WriteObjectField("isMonotonic")
dest.WriteBool(orig.IsMonotonic)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSum unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSum(orig *otlpmetrics.Sum, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
UnmarshalJSONNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(iter.ReadEnumValue(otlpmetrics.AggregationTemporality_value))
case "isMonotonic", "is_monotonic":
orig.IsMonotonic = iter.ReadBool()
default:
iter.Skip()
}
}
}
func SizeProtoSum(orig *otlpmetrics.Sum) int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = SizeProtoNumberDataPoint(orig.DataPoints[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
if orig.IsMonotonic {
n += 2
}
return n
}
func MarshalProtoSum(orig *otlpmetrics.Sum, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = MarshalProtoNumberDataPoint(orig.DataPoints[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
if orig.IsMonotonic {
pos--
if orig.IsMonotonic {
buf[pos] = 1
} else {
buf[pos] = 0
}
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func UnmarshalProtoSum(orig *otlpmetrics.Sum, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
err = UnmarshalProtoNumberDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = otlpmetrics.AggregationTemporality(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.IsMonotonic = num != 0
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSummary = sync.Pool{
New: func() any {
return &otlpmetrics.Summary{}
},
}
)
func NewSummary() *otlpmetrics.Summary {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.Summary{}
}
return protoPoolSummary.Get().(*otlpmetrics.Summary)
}
func DeleteSummary(orig *otlpmetrics.Summary, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteSummaryDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolSummary.Put(orig)
}
}
func CopySummary(dest, src *otlpmetrics.Summary) {
// If copying to same object, just return.
if src == dest {
return
}
dest.DataPoints = CopySummaryDataPointSlice(dest.DataPoints, src.DataPoints)
}
func GenTestSummary() *otlpmetrics.Summary {
orig := NewSummary()
orig.DataPoints = GenTestSummaryDataPointSlice()
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSummary(orig *otlpmetrics.Summary, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
MarshalJSONSummaryDataPoint(orig.DataPoints[0], dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
MarshalJSONSummaryDataPoint(orig.DataPoints[i], dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSummary unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSummary(orig *otlpmetrics.Summary, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
UnmarshalJSONSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], iter)
}
default:
iter.Skip()
}
}
}
func SizeProtoSummary(orig *otlpmetrics.Summary) int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = SizeProtoSummaryDataPoint(orig.DataPoints[i])
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func MarshalProtoSummary(orig *otlpmetrics.Summary, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = MarshalProtoSummaryDataPoint(orig.DataPoints[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func UnmarshalProtoSummary(orig *otlpmetrics.Summary, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
err = UnmarshalProtoSummaryDataPoint(orig.DataPoints[len(orig.DataPoints)-1], buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSummaryDataPoint = sync.Pool{
New: func() any {
return &otlpmetrics.SummaryDataPoint{}
},
}
)
func NewSummaryDataPoint() *otlpmetrics.SummaryDataPoint {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.SummaryDataPoint{}
}
return protoPoolSummaryDataPoint.Get().(*otlpmetrics.SummaryDataPoint)
}
func DeleteSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.QuantileValues {
DeleteSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], true)
}
orig.Reset()
if nullable {
protoPoolSummaryDataPoint.Put(orig)
}
}
func CopySummaryDataPoint(dest, src *otlpmetrics.SummaryDataPoint) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
dest.Sum = src.Sum
dest.QuantileValues = CopySummaryDataPoint_ValueAtQuantileSlice(dest.QuantileValues, src.QuantileValues)
dest.Flags = src.Flags
}
func GenTestSummaryDataPoint() *otlpmetrics.SummaryDataPoint {
orig := NewSummaryDataPoint()
orig.Attributes = GenTestKeyValueSlice()
orig.StartTimeUnixNano = 1234567890
orig.TimeUnixNano = 1234567890
orig.Count = uint64(13)
orig.Sum = float64(3.1415926)
orig.QuantileValues = GenTestSummaryDataPoint_ValueAtQuantileSlice()
orig.Flags = 1
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
MarshalJSONKeyValue(&orig.Attributes[0], dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
MarshalJSONKeyValue(&orig.Attributes[i], dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig.Sum != float64(0) {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if len(orig.QuantileValues) > 0 {
dest.WriteObjectField("quantileValues")
dest.WriteArrayStart()
MarshalJSONSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[0], dest)
for i := 1; i < len(orig.QuantileValues); i++ {
dest.WriteMore()
MarshalJSONSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSummaryDataPoint unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
UnmarshalJSONKeyValue(&orig.Attributes[len(orig.Attributes)-1], iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
orig.Sum = iter.ReadFloat64()
case "quantileValues", "quantile_values":
for iter.ReadArray() {
orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPoint_ValueAtQuantile())
UnmarshalJSONSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func SizeProtoSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint) int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = SizeProtoKeyValue(&orig.Attributes[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig.Sum != 0 {
n += 9
}
for i := range orig.QuantileValues {
l = SizeProtoSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i])
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
return n
}
func MarshalProtoSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = MarshalProtoKeyValue(&orig.Attributes[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig.Sum != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
for i := len(orig.QuantileValues) - 1; i >= 0; i-- {
l = MarshalProtoSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[i], buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x40
}
return len(buf) - pos
}
func UnmarshalProtoSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, otlpcommon.KeyValue{})
err = UnmarshalProtoKeyValue(&orig.Attributes[len(orig.Attributes)-1], buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Sum = math.Float64frombits(num)
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPoint_ValueAtQuantile())
err = UnmarshalProtoSummaryDataPoint_ValueAtQuantile(orig.QuantileValues[len(orig.QuantileValues)-1], buf[startPos:pos])
if err != nil {
return err
}
case 8:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolSummaryDataPoint_ValueAtQuantile = sync.Pool{
New: func() any {
return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}
},
}
)
func NewSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile {
if !UseProtoPooling.IsEnabled() {
return &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}
}
return protoPoolSummaryDataPoint_ValueAtQuantile.Get().(*otlpmetrics.SummaryDataPoint_ValueAtQuantile)
}
func DeleteSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolSummaryDataPoint_ValueAtQuantile.Put(orig)
}
}
func CopySummaryDataPoint_ValueAtQuantile(dest, src *otlpmetrics.SummaryDataPoint_ValueAtQuantile) {
// If copying to same object, just return.
if src == dest {
return
}
dest.Quantile = src.Quantile
dest.Value = src.Value
}
func GenTestSummaryDataPoint_ValueAtQuantile() *otlpmetrics.SummaryDataPoint_ValueAtQuantile {
orig := NewSummaryDataPoint_ValueAtQuantile()
orig.Quantile = float64(3.1415926)
orig.Value = float64(3.1415926)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, dest *json.Stream) {
dest.WriteObjectStart()
if orig.Quantile != float64(0) {
dest.WriteObjectField("quantile")
dest.WriteFloat64(orig.Quantile)
}
if orig.Value != float64(0) {
dest.WriteObjectField("value")
dest.WriteFloat64(orig.Value)
}
dest.WriteObjectEnd()
}
// UnmarshalJSONSummaryDataPointValueAtQuantile unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "quantile":
orig.Quantile = iter.ReadFloat64()
case "value":
orig.Value = iter.ReadFloat64()
default:
iter.Skip()
}
}
}
func SizeProtoSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile) int {
var n int
var l int
_ = l
if orig.Quantile != 0 {
n += 9
}
if orig.Value != 0 {
n += 9
}
return n
}
func MarshalProtoSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.Quantile != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile))
pos--
buf[pos] = 0x9
}
if orig.Value != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value))
pos--
buf[pos] = 0x11
}
return len(buf) - pos
}
func UnmarshalProtoSummaryDataPoint_ValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Quantile = math.Float64frombits(num)
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Value = math.Float64frombits(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopySummaryDataPoint_ValueAtQuantileSlice(dest, src []*otlpmetrics.SummaryDataPoint_ValueAtQuantile) []*otlpmetrics.SummaryDataPoint_ValueAtQuantile {
var newDest []*otlpmetrics.SummaryDataPoint_ValueAtQuantile
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint_ValueAtQuantile()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPoint_ValueAtQuantile(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint_ValueAtQuantile()
}
}
for i := range src {
CopySummaryDataPoint_ValueAtQuantile(newDest[i], src[i])
}
return newDest
}
func GenTestSummaryDataPoint_ValueAtQuantileSlice() []*otlpmetrics.SummaryDataPoint_ValueAtQuantile {
orig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, 5)
orig[0] = NewSummaryDataPoint_ValueAtQuantile()
orig[1] = GenTestSummaryDataPoint_ValueAtQuantile()
orig[2] = NewSummaryDataPoint_ValueAtQuantile()
orig[3] = GenTestSummaryDataPoint_ValueAtQuantile()
orig[4] = NewSummaryDataPoint_ValueAtQuantile()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
func CopySummaryDataPointSlice(dest, src []*otlpmetrics.SummaryDataPoint) []*otlpmetrics.SummaryDataPoint {
var newDest []*otlpmetrics.SummaryDataPoint
if cap(dest) < len(src) {
newDest = make([]*otlpmetrics.SummaryDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint()
}
}
for i := range src {
CopySummaryDataPoint(newDest[i], src[i])
}
return newDest
}
func GenTestSummaryDataPointSlice() []*otlpmetrics.SummaryDataPoint {
orig := make([]*otlpmetrics.SummaryDataPoint, 5)
orig[0] = NewSummaryDataPoint()
orig[1] = GenTestSummaryDataPoint()
orig[2] = NewSummaryDataPoint()
orig[3] = GenTestSummaryDataPoint()
orig[4] = NewSummaryDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type UInt64SliceWrapper struct {
orig *[]uint64
state *State
}
func GetUInt64SliceOrig(ms UInt64SliceWrapper) *[]uint64 {
return ms.orig
}
func GetUInt64SliceState(ms UInt64SliceWrapper) *State {
return ms.state
}
func NewUInt64SliceWrapper(orig *[]uint64, state *State) UInt64SliceWrapper {
return UInt64SliceWrapper{orig: orig, state: state}
}
func GenTestUInt64SliceWrapper() UInt64SliceWrapper {
orig := GenTestUint64Slice()
return NewUInt64SliceWrapper(&orig, NewState())
}
func CopyUint64Slice(dst, src []uint64) []uint64 {
return append(dst[:0], src...)
}
func GenTestUint64Slice() []uint64 {
return []uint64{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
var (
protoPoolValueType = sync.Pool{
New: func() any {
return &otlpprofiles.ValueType{}
},
}
)
func NewValueType() *otlpprofiles.ValueType {
if !UseProtoPooling.IsEnabled() {
return &otlpprofiles.ValueType{}
}
return protoPoolValueType.Get().(*otlpprofiles.ValueType)
}
func DeleteValueType(orig *otlpprofiles.ValueType, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolValueType.Put(orig)
}
}
func CopyValueType(dest, src *otlpprofiles.ValueType) {
// If copying to same object, just return.
if src == dest {
return
}
dest.TypeStrindex = src.TypeStrindex
dest.UnitStrindex = src.UnitStrindex
dest.AggregationTemporality = src.AggregationTemporality
}
func GenTestValueType() *otlpprofiles.ValueType {
orig := NewValueType()
orig.TypeStrindex = int32(13)
orig.UnitStrindex = int32(13)
orig.AggregationTemporality = otlpprofiles.AggregationTemporality(1)
return orig
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func MarshalJSONValueType(orig *otlpprofiles.ValueType, dest *json.Stream) {
dest.WriteObjectStart()
if orig.TypeStrindex != int32(0) {
dest.WriteObjectField("typeStrindex")
dest.WriteInt32(orig.TypeStrindex)
}
if orig.UnitStrindex != int32(0) {
dest.WriteObjectField("unitStrindex")
dest.WriteInt32(orig.UnitStrindex)
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
dest.WriteObjectEnd()
}
// UnmarshalJSONValueType unmarshals all properties from the current struct from the source iterator.
func UnmarshalJSONValueType(orig *otlpprofiles.ValueType, iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "typeStrindex", "type_strindex":
orig.TypeStrindex = iter.ReadInt32()
case "unitStrindex", "unit_strindex":
orig.UnitStrindex = iter.ReadInt32()
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = otlpprofiles.AggregationTemporality(iter.ReadEnumValue(otlpprofiles.AggregationTemporality_value))
default:
iter.Skip()
}
}
}
func SizeProtoValueType(orig *otlpprofiles.ValueType) int {
var n int
var l int
_ = l
if orig.TypeStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.TypeStrindex))
}
if orig.UnitStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.UnitStrindex))
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
return n
}
func MarshalProtoValueType(orig *otlpprofiles.ValueType, buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TypeStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.TypeStrindex))
pos--
buf[pos] = 0x8
}
if orig.UnitStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
pos--
buf[pos] = 0x10
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func UnmarshalProtoValueType(orig *otlpprofiles.ValueType, buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.TypeStrindex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.UnitStrindex = int32(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = otlpprofiles.AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
func CopyValueTypeSlice(dest, src []*otlpprofiles.ValueType) []*otlpprofiles.ValueType {
var newDest []*otlpprofiles.ValueType
if cap(dest) < len(src) {
newDest = make([]*otlpprofiles.ValueType, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewValueType()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteValueType(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewValueType()
}
}
for i := range src {
CopyValueType(newDest[i], src[i])
}
return newDest
}
func GenTestValueTypeSlice() []*otlpprofiles.ValueType {
orig := make([]*otlpprofiles.ValueType, 5)
orig[0] = NewValueType()
orig[1] = GenTestValueType()
orig[2] = NewValueType()
orig[3] = GenTestValueType()
orig[4] = NewValueType()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package grpcencoding // import "go.opentelemetry.io/collector/pdata/internal/grpcencoding"
import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/mem"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
otlpcollectortraces "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
)
var (
defaultBufferPoolSizes = []int{
256,
4 << 10, // 4KB (go page size)
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
512 << 10, // 512KB
1 << 20, // 1MB
4 << 20, // 4MB
16 << 20, // 16MB
}
otelBufferPool = mem.NewTieredBufferPool(defaultBufferPoolSizes...)
)
// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
// created with mem.NewTieredBufferPool that uses a set of default sizes optimized for
// expected telemetry workflows.
func DefaultBufferPool() mem.BufferPool {
return otelBufferPool
}
// Name is the name registered for the proto compressor.
const Name = "proto"
func init() {
encoding.RegisterCodecV2(&codecV2{delegate: encoding.GetCodecV2(Name)})
}
// codecV2 is a custom proto encoding that uses a different tier schema for the TieredBufferPool as well
// as it call into the custom marshal/unmarshal logic that works with memory pooling.
// If not an otlp payload fallback on the default grpc/proto encoding.
type codecV2 struct {
delegate encoding.CodecV2
}
func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) {
switch req := v.(type) {
case *otlpcollectorlogs.ExportLogsServiceRequest:
size := internal.SizeProtoExportLogsServiceRequest(req)
buf := otelBufferPool.Get(size)
n := internal.MarshalProtoExportLogsServiceRequest(req, (*buf)[:size])
*buf = (*buf)[:n]
return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
case *otlpcollectormetrics.ExportMetricsServiceRequest:
size := internal.SizeProtoExportMetricsServiceRequest(req)
buf := otelBufferPool.Get(size)
n := internal.MarshalProtoExportMetricsServiceRequest(req, (*buf)[:size])
*buf = (*buf)[:n]
return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
case *otlpcollectortraces.ExportTraceServiceRequest:
size := internal.SizeProtoExportTraceServiceRequest(req)
buf := otelBufferPool.Get(size)
n := internal.MarshalProtoExportTraceServiceRequest(req, (*buf)[:size])
*buf = (*buf)[:n]
return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
case *otlpcollectorprofile.ExportProfilesServiceRequest:
size := internal.SizeProtoExportProfilesServiceRequest(req)
buf := otelBufferPool.Get(size)
n := internal.MarshalProtoExportProfilesServiceRequest(req, (*buf)[:size])
*buf = (*buf)[:n]
return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
}
return c.delegate.Marshal(v)
}
func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
switch req := v.(type) {
case *otlpcollectorlogs.ExportLogsServiceRequest:
// TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
buf := data.MaterializeToBuffer(otelBufferPool)
defer buf.Free()
return internal.UnmarshalProtoExportLogsServiceRequest(req, buf.ReadOnlyData())
case *otlpcollectormetrics.ExportMetricsServiceRequest:
// TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
buf := data.MaterializeToBuffer(otelBufferPool)
defer buf.Free()
return internal.UnmarshalProtoExportMetricsServiceRequest(req, buf.ReadOnlyData())
case *otlpcollectortraces.ExportTraceServiceRequest:
// TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
buf := data.MaterializeToBuffer(otelBufferPool)
defer buf.Free()
return internal.UnmarshalProtoExportTraceServiceRequest(req, buf.ReadOnlyData())
case *otlpcollectorprofile.ExportProfilesServiceRequest:
// TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
buf := data.MaterializeToBuffer(otelBufferPool)
defer buf.Free()
return internal.UnmarshalProtoExportProfilesServiceRequest(req, buf.ReadOnlyData())
}
return c.delegate.Unmarshal(data, v)
}
func (c *codecV2) Name() string {
return Name
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"go.opentelemetry.io/collector/pdata/internal/data"
"go.opentelemetry.io/collector/pdata/internal/json"
)
func DeleteTraceID(*data.TraceID, bool) {}
func DeleteSpanID(*data.SpanID, bool) {}
func DeleteProfileID(*data.ProfileID, bool) {}
func GenTestTraceID() *data.TraceID {
id := data.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})
return &id
}
func GenTestSpanID() *data.SpanID {
id := data.SpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})
return &id
}
func GenTestProfileID() *data.ProfileID {
id := data.ProfileID([16]byte{16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
return &id
}
func MarshalJSONTraceID(id *data.TraceID, dest *json.Stream) {
id.MarshalJSONStream(dest)
}
func MarshalJSONSpanID(id *data.SpanID, dest *json.Stream) {
id.MarshalJSONStream(dest)
}
func MarshalJSONProfileID(id *data.ProfileID, dest *json.Stream) {
id.MarshalJSONStream(dest)
}
func UnmarshalJSONTraceID(id *data.TraceID, iter *json.Iterator) {
id.UnmarshalJSONIter(iter)
}
func UnmarshalJSONSpanID(id *data.SpanID, iter *json.Iterator) {
id.UnmarshalJSONIter(iter)
}
func UnmarshalJSONProfileID(id *data.ProfileID, iter *json.Iterator) {
id.UnmarshalJSONIter(iter)
}
func SizeProtoTraceID(id *data.TraceID) int {
return id.Size()
}
func SizeProtoSpanID(id *data.SpanID) int {
return id.Size()
}
func SizeProtoProfileID(id *data.ProfileID) int {
return id.Size()
}
func MarshalProtoTraceID(id *data.TraceID, buf []byte) int {
size := id.Size()
_, _ = id.MarshalTo(buf[len(buf)-size:])
return size
}
func MarshalProtoSpanID(id *data.SpanID, buf []byte) int {
size := id.Size()
_, _ = id.MarshalTo(buf[len(buf)-size:])
return size
}
func MarshalProtoProfileID(id *data.ProfileID, buf []byte) int {
size := id.Size()
_, _ = id.MarshalTo(buf[len(buf)-size:])
return size
}
func UnmarshalProtoTraceID(id *data.TraceID, buf []byte) error {
return id.Unmarshal(buf)
}
func UnmarshalProtoSpanID(id *data.SpanID, buf []byte) error {
return id.Unmarshal(buf)
}
func UnmarshalProtoProfileID(id *data.ProfileID, buf []byte) error {
return id.Unmarshal(buf)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package json // import "go.opentelemetry.io/collector/pdata/internal/json"
import (
"encoding/base64"
"strconv"
jsoniter "github.com/json-iterator/go"
)
func BorrowIterator(data []byte) *Iterator {
return &Iterator{
delegate: jsoniter.ConfigFastest.BorrowIterator(data),
}
}
func ReturnIterator(s *Iterator) {
jsoniter.ConfigFastest.ReturnIterator(s.delegate)
}
type Iterator struct {
delegate *jsoniter.Iterator
}
// ReadInt32 unmarshalls JSON data into an int32. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadInt32() int32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadInt32()
case jsoniter.StringValue:
val, err := strconv.ParseInt(iter.ReadString(), 10, 32)
if err != nil {
iter.ReportError("ReadInt32", err.Error())
return 0
}
return int32(val)
default:
iter.ReportError("ReadInt32", "unsupported value type")
return 0
}
}
// ReadUint32 unmarshalls JSON data into an uint32. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadUint32() uint32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadUint32()
case jsoniter.StringValue:
val, err := strconv.ParseUint(iter.ReadString(), 10, 32)
if err != nil {
iter.ReportError("ReadUint32", err.Error())
return 0
}
return uint32(val)
default:
iter.ReportError("ReadUint32", "unsupported value type")
return 0
}
}
// ReadInt64 unmarshalls JSON data into an int64. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadInt64() int64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadInt64()
case jsoniter.StringValue:
val, err := strconv.ParseInt(iter.ReadString(), 10, 64)
if err != nil {
iter.ReportError("ReadInt64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadInt64", "unsupported value type")
return 0
}
}
// ReadUint64 unmarshalls JSON data into an uint64. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadUint64() uint64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadUint64()
case jsoniter.StringValue:
val, err := strconv.ParseUint(iter.ReadString(), 10, 64)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
func (iter *Iterator) ReadFloat32() float32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadFloat32()
case jsoniter.StringValue:
val, err := strconv.ParseFloat(iter.ReadString(), 32)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return float32(val)
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
func (iter *Iterator) ReadFloat64() float64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadFloat64()
case jsoniter.StringValue:
val, err := strconv.ParseFloat(iter.ReadString(), 64)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
// ReadBool reads a json object as BoolValue
func (iter *Iterator) ReadBool() bool {
return iter.delegate.ReadBool()
}
// ReadString read string from iterator
func (iter *Iterator) ReadString() string {
return iter.delegate.ReadString()
}
// ReadBytes read base64 encoded bytes from iterator.
func (iter *Iterator) ReadBytes() []byte {
buf := iter.ReadStringAsSlice()
if len(buf) == 0 {
return nil
}
orig := make([]byte, base64.StdEncoding.DecodedLen(len(buf)))
n, err := base64.StdEncoding.Decode(orig, buf)
if err != nil {
iter.ReportError("base64.Decode", err.Error())
}
return orig[:n]
}
// ReadStringAsSlice read string from iterator without copying into string form.
// The []byte cannot be kept, as it will change after next iterator call.
func (iter *Iterator) ReadStringAsSlice() []byte {
return iter.delegate.ReadStringAsSlice()
}
// ReportError record a error in iterator instance with current position.
func (iter *Iterator) ReportError(operation, msg string) {
iter.delegate.ReportError(operation, msg)
}
// Error returns any recorded error if any otherwise it returns nil.
func (iter *Iterator) Error() error {
return iter.delegate.Error
}
// Skip skips a json object and positions to relatively the next json object
func (iter *Iterator) Skip() {
iter.delegate.Skip()
}
// ReadArray read array element, returns true if the array has more element to read.
func (iter *Iterator) ReadArray() bool {
return iter.delegate.ReadArray()
}
// ReadObject read one field from object.
// If object ended, returns empty string. Otherwise, returns the field name.
func (iter *Iterator) ReadObject() string {
return iter.delegate.ReadObject()
}
// ReadEnumValue returns the enum integer value representation. Accepts both enum names and enum integer values.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadEnumValue(valueMap map[string]int32) int32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.ReadInt32()
case jsoniter.StringValue:
val, ok := valueMap[iter.ReadString()]
// Same behavior with official protobuf JSON decoder,
// see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81
if !ok {
iter.ReportError("ReadEnumValue", "unknown string value")
return 0
}
return val
default:
iter.ReportError("ReadEnumValue", "unsupported value type")
return 0
}
}
// ResetBytes reuse iterator instance by specifying another byte array as input
func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.delegate.ResetBytes(input)
return iter
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package json // import "go.opentelemetry.io/collector/pdata/internal/json"
import (
"encoding/base64"
"errors"
"io"
"math"
"strconv"
jsoniter "github.com/json-iterator/go"
)
func BorrowStream(writer io.Writer) *Stream {
return &Stream{
Stream: jsoniter.ConfigFastest.BorrowStream(writer),
wmTracker: make([]bool, 32),
}
}
func ReturnStream(s *Stream) {
jsoniter.ConfigFastest.ReturnStream(s.Stream)
}
// Stream avoids the need to explicitly call the `Stream.WriteMore` method while marshaling objects by
// checking if a field was previously written inside the current object and automatically appending a ","
// if so before writing the next field.
type Stream struct {
*jsoniter.Stream
// wmTracker acts like a stack which pushes a new value when an object is started and removes the
// top when it is ended. The value added for every object tracks if there is any written field
// already for that object, and if it is then automatically add a "," before any new field.
wmTracker []bool
}
func (ots *Stream) WriteObjectStart() {
ots.Stream.WriteObjectStart()
ots.wmTracker = append(ots.wmTracker, false)
}
func (ots *Stream) WriteObjectField(field string) {
if ots.wmTracker[len(ots.wmTracker)-1] {
ots.WriteMore()
}
ots.Stream.WriteObjectField(field)
ots.wmTracker[len(ots.wmTracker)-1] = true
}
func (ots *Stream) WriteObjectEnd() {
ots.Stream.WriteObjectEnd()
ots.wmTracker = ots.wmTracker[:len(ots.wmTracker)-1]
}
// WriteInt64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
func (ots *Stream) WriteInt64(val int64) {
ots.WriteString(strconv.FormatInt(val, 10))
}
// WriteUint64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
func (ots *Stream) WriteUint64(val uint64) {
ots.WriteString(strconv.FormatUint(val, 10))
}
// WriteBytes writes the values as a base64 encoded string. This is per the protobuf encoding rules for bytes.
func (ots *Stream) WriteBytes(val []byte) {
if len(val) == 0 {
ots.WriteString("")
return
}
ots.WriteString(base64.StdEncoding.EncodeToString(val))
}
// WriteFloat64 writes the JSON value that will be a number or one of the special string
// values "NaN", "Infinity", and "-Infinity". Either numbers or strings are accepted.
// Empty strings are invalid. Exponent notation is also accepted.
// See https://protobuf.dev/programming-guides/json/.
func (ots *Stream) WriteFloat64(val float64) {
if math.IsNaN(val) {
ots.WriteString("NaN")
return
}
if math.IsInf(val, 1) {
ots.WriteString("Infinity")
return
}
if math.IsInf(val, -1) {
ots.WriteString("-Infinity")
return
}
ots.Stream.WriteFloat64(val)
}
func (ots *Stream) ReportError(err error) {
ots.Stream.Error = errors.Join(ots.Stream.Error, err)
}
func (ots *Stream) Error() error {
return ots.Stream.Error
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol.
// Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateLogs(rls []*otlplogs.ResourceLogs) {
for _, rl := range rls {
if len(rl.ScopeLogs) == 0 {
rl.ScopeLogs = rl.DeprecatedScopeLogs
}
rl.DeprecatedScopeLogs = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol.
// Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateMetrics(rms []*otlpmetrics.ResourceMetrics) {
for _, rm := range rms {
if len(rm.ScopeMetrics) == 0 {
rm.ScopeMetrics = rm.DeprecatedScopeMetrics
}
rm.DeprecatedScopeMetrics = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol.
// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol.
// Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateTraces(rss []*otlptrace.ResourceSpans) {
for _, rs := range rss {
if len(rs.ScopeSpans) == 0 {
rs.ScopeSpans = rs.DeprecatedScopeSpans
}
rs.DeprecatedScopeSpans = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
// EncodeVarint encodes the variant at the end of the buffer.
func EncodeVarint(buf []byte, offset int, v uint64) int {
offset -= Sov(v)
base := offset
for v >= 1<<7 {
//nolint:gosec
buf[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
buf[offset] = uint8(v)
return base
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
import (
"math/bits"
)
func Sov(x uint64) (n int) {
return (bits.Len64(x|1) + 6) / 7
}
func Soz(x uint64) (n int) {
//nolint:gosec
return Sov((x << 1) ^ uint64((int64(x) >> 63)))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
// WireType represents the proto wire type.
type WireType int8
const (
WireTypeVarint WireType = 0
WireTypeI64 WireType = 1
WireTypeLen WireType = 2
WireTypeStartGroup WireType = 3
WireTypeEndGroup WireType = 4
WireTypeI32 WireType = 5
)
var (
ErrInvalidLength = errors.New("proto: negative length found during unmarshaling")
ErrIntOverflow = errors.New("proto: integer overflow")
ErrUnexpectedEndOfGroup = errors.New("proto: unexpected end of group")
)
// ConsumeUnknown parses buf starting at pos as a wireType field, reporting the new position.
func ConsumeUnknown(buf []byte, pos int, wireType WireType) (int, error) {
var err error
l := len(buf)
depth := 0
for pos < l {
switch wireType {
case WireTypeVarint:
_, pos, err = ConsumeVarint(buf, pos)
return pos, err
case WireTypeI64:
_, pos, err = ConsumeI64(buf, pos)
return pos, err
case WireTypeLen:
_, pos, err = ConsumeLen(buf, pos)
return pos, err
case WireTypeStartGroup:
depth++
case WireTypeEndGroup:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroup
}
depth--
case WireTypeI32:
_, pos, err = ConsumeI32(buf, pos)
return pos, err
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
// Only when parsing a group can be here, if done return otherwise parse more tags.
if depth == 0 {
return pos, nil
}
// If in a group parsing, move to the next tag.
_, wireType, pos, err = ConsumeTag(buf, pos)
if err != nil {
return 0, err
}
}
return 0, io.ErrUnexpectedEOF
}
// ConsumeI64 parses buf starting at pos as a WireTypeI64 field, reporting the value and the new position.
func ConsumeI64(buf []byte, pos int) (uint64, int, error) {
pos += 8
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return binary.LittleEndian.Uint64(buf[pos-8:]), pos, nil
}
// ConsumeLen parses buf starting at pos as a WireTypeLen field, reporting the len and the new position.
func ConsumeLen(buf []byte, pos int) (int, int, error) {
var num uint64
var err error
num, pos, err = ConsumeVarint(buf, pos)
if err != nil {
return 0, 0, err
}
//nolint:gosec
length := int(num)
if length < 0 {
return 0, 0, ErrInvalidLength
}
pos += length
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return length, pos, nil
}
// ConsumeI32 parses buf starting at pos as a WireTypeI32 field, reporting the value and the new position.
func ConsumeI32(buf []byte, pos int) (uint32, int, error) {
pos += 4
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return binary.LittleEndian.Uint32(buf[pos-4:]), pos, nil
}
// ConsumeTag parses buf starting at pos as a varint-encoded tag, reporting the new position.
func ConsumeTag(buf []byte, pos int) (int32, WireType, int, error) {
tag, pos, err := ConsumeVarint(buf, pos)
if err != nil {
return 0, 0, 0, err
}
//nolint:gosec
fieldNum := int32(tag >> 3)
//nolint:gosec
wireType := int8(tag & 0x7)
if fieldNum <= 0 {
return 0, 0, 0, fmt.Errorf("proto: Link: illegal field=%d (tag=%d, pos=%d)", fieldNum, tag, pos)
}
return fieldNum, WireType(wireType), pos, nil
}
// ConsumeVarint parses buf starting at pos as a varint-encoded uint64, reporting the new position.
func ConsumeVarint(buf []byte, pos int) (uint64, int, error) {
l := len(buf)
var num uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, 0, ErrIntOverflow
}
if pos >= l {
return 0, 0, io.ErrUnexpectedEOF
}
b := buf[pos]
pos++
num |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
return num, pos, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"sync/atomic"
"go.opentelemetry.io/collector/featuregate"
)
var _ = featuregate.GlobalRegistry().MustRegister(
"pdata.useCustomProtoEncoding",
featuregate.StageStable,
featuregate.WithRegisterDescription("When enabled, enable custom proto encoding. This is required step to enable featuregate pdata.useProtoPooling."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
featuregate.WithRegisterFromVersion("v0.133.0"),
featuregate.WithRegisterToVersion("v0.137.0"),
)
var UseProtoPooling = featuregate.GlobalRegistry().MustRegister(
"pdata.useProtoPooling",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("When enabled, enable using local memory pools for underlying data that the pdata messages are pushed to."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
featuregate.WithRegisterFromVersion("v0.133.0"),
)
// State defines an ownership state of pmetric.Metrics, plog.Logs or ptrace.Traces.
type State struct {
refs atomic.Int32
state uint32
}
const (
defaultState uint32 = 0
stateReadOnlyBit = uint32(1 << 0)
statePipelineOwnedBit = uint32(1 << 1)
)
func NewState() *State {
st := &State{
state: defaultState,
}
st.refs.Store(1)
return st
}
func (st *State) MarkReadOnly() {
st.state |= stateReadOnlyBit
}
func (st *State) IsReadOnly() bool {
return st.state&stateReadOnlyBit != 0
}
// AssertMutable panics if the state is not StateMutable.
func (st *State) AssertMutable() {
if st.state&stateReadOnlyBit != 0 {
panic("invalid access to shared data")
}
}
// MarkPipelineOwned marks the data as owned by the pipeline, returns true if the data were
// previously not owned by the pipeline, otherwise false.
func (st *State) MarkPipelineOwned() bool {
if st.state&statePipelineOwnedBit != 0 {
return false
}
st.state |= statePipelineOwnedBit
return true
}
// Ref add one to the count of active references.
func (st *State) Ref() {
st.refs.Add(1)
}
// Unref returns true if reference count got to 0 which means no more active references,
// otherwise it returns false.
func (st *State) Unref() bool {
refs := st.refs.Add(-1)
switch {
case refs > 0:
return false
case refs == 0:
return true
default:
panic("Cannot unref freed data")
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// LogsToProto internal helper to convert Logs to protobuf representation.
func LogsToProto(l LogsWrapper) otlplogs.LogsData {
return otlplogs.LogsData{
ResourceLogs: l.orig.ResourceLogs,
}
}
// LogsFromProto internal helper to convert protobuf representation to Logs.
// This function set exclusive state assuming that it's called only once per Logs.
func LogsFromProto(orig otlplogs.LogsData) LogsWrapper {
return NewLogsWrapper(&otlpcollectorlog.ExportLogsServiceRequest{
ResourceLogs: orig.ResourceLogs,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
type MapWrapper struct {
orig *[]otlpcommon.KeyValue
state *State
}
func GetMapOrig(ms MapWrapper) *[]otlpcommon.KeyValue {
return ms.orig
}
func GetMapState(ms MapWrapper) *State {
return ms.state
}
func NewMapWrapper(orig *[]otlpcommon.KeyValue, state *State) MapWrapper {
return MapWrapper{orig: orig, state: state}
}
func GenTestMapWrapper() MapWrapper {
orig := GenTestKeyValueSlice()
return NewMapWrapper(&orig, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// MetricsToProto internal helper to convert Metrics to protobuf representation.
func MetricsToProto(l MetricsWrapper) otlpmetrics.MetricsData {
return otlpmetrics.MetricsData{
ResourceMetrics: l.orig.ResourceMetrics,
}
}
// MetricsFromProto internal helper to convert protobuf representation to Metrics.
// This function set exclusive state assuming that it's called only once per Metrics.
func MetricsFromProto(orig otlpmetrics.MetricsData) MetricsWrapper {
return NewMetricsWrapper(&otlpcollectormetrics.ExportMetricsServiceRequest{
ResourceMetrics: orig.ResourceMetrics,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ProfilesToProto internal helper to convert Profiles to protobuf representation.
func ProfilesToProto(l ProfilesWrapper) otlpprofile.ProfilesData {
return otlpprofile.ProfilesData{
ResourceProfiles: l.orig.ResourceProfiles,
Dictionary: l.orig.Dictionary,
}
}
// ProfilesFromProto internal helper to convert protobuf representation to Profiles.
// This function set exclusive state assuming that it's called only once per Profiles.
func ProfilesFromProto(orig otlpprofile.ProfilesData) ProfilesWrapper {
return NewProfilesWrapper(&otlpcollectorprofile.ExportProfilesServiceRequest{
ResourceProfiles: orig.ResourceProfiles,
Dictionary: orig.Dictionary,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// TracesToProto internal helper to convert Traces to protobuf representation.
func TracesToProto(l TracesWrapper) otlptrace.TracesData {
return otlptrace.TracesData{
ResourceSpans: l.orig.ResourceSpans,
}
}
// TracesFromProto internal helper to convert protobuf representation to Traces.
// This function set exclusive state assuming that it's called only once per Traces.
func TracesFromProto(orig otlptrace.TracesData) TracesWrapper {
return NewTracesWrapper(&otlpcollectortrace.ExportTraceServiceRequest{
ResourceSpans: orig.ResourceSpans,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
type TraceStateWrapper struct {
orig *string
state *State
}
func GetTraceStateOrig(ms TraceStateWrapper) *string {
return ms.orig
}
func GetTraceStateState(ms TraceStateWrapper) *State {
return ms.state
}
func NewTraceStateWrapper(orig *string, state *State) TraceStateWrapper {
return TraceStateWrapper{orig: orig, state: state}
}
func GenTestTraceStateWrapper() TraceStateWrapper {
return NewTraceStateWrapper(GenTestTraceState(), NewState())
}
func CopyTraceState(dest, src *string) {
*dest = *src
}
func GenTestTraceState() *string {
orig := new(string)
*orig = "rojo=00f067aa0ba902b7"
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
type ValueWrapper struct {
orig *otlpcommon.AnyValue
state *State
}
func GetValueOrig(ms ValueWrapper) *otlpcommon.AnyValue {
return ms.orig
}
func GetValueState(ms ValueWrapper) *State {
return ms.state
}
func NewValueWrapper(orig *otlpcommon.AnyValue, state *State) ValueWrapper {
return ValueWrapper{orig: orig, state: state}
}
func GenTestValueWrapper() ValueWrapper {
orig := GenTestAnyValue()
return NewValueWrapper(orig, NewState())
}
func NewOrigAnyValueStringValue() *otlpcommon.AnyValue_StringValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_StringValue{}
}
return ProtoPoolAnyValue_StringValue.Get().(*otlpcommon.AnyValue_StringValue)
}
func NewOrigAnyValueIntValue() *otlpcommon.AnyValue_IntValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_IntValue{}
}
return ProtoPoolAnyValue_IntValue.Get().(*otlpcommon.AnyValue_IntValue)
}
func NewOrigAnyValueBoolValue() *otlpcommon.AnyValue_BoolValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_BoolValue{}
}
return ProtoPoolAnyValue_BoolValue.Get().(*otlpcommon.AnyValue_BoolValue)
}
func NewOrigAnyValueDoubleValue() *otlpcommon.AnyValue_DoubleValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_DoubleValue{}
}
return ProtoPoolAnyValue_DoubleValue.Get().(*otlpcommon.AnyValue_DoubleValue)
}
func NewOrigAnyValueBytesValue() *otlpcommon.AnyValue_BytesValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_BytesValue{}
}
return ProtoPoolAnyValue_BytesValue.Get().(*otlpcommon.AnyValue_BytesValue)
}
func NewOrigAnyValueArrayValue() *otlpcommon.AnyValue_ArrayValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_ArrayValue{}
}
return ProtoPoolAnyValue_ArrayValue.Get().(*otlpcommon.AnyValue_ArrayValue)
}
func NewOrigAnyValueKvlistValue() *otlpcommon.AnyValue_KvlistValue {
if !UseProtoPooling.IsEnabled() {
return &otlpcommon.AnyValue_KvlistValue{}
}
return ProtoPoolAnyValue_KvlistValue.Get().(*otlpcommon.AnyValue_KvlistValue)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// ByteSlice represents a []byte slice.
// The instance of ByteSlice can be assigned to multiple objects since it's immutable.
//
// Must use NewByteSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ByteSlice internal.ByteSliceWrapper
func (ms ByteSlice) getOrig() *[]byte {
return internal.GetByteSliceOrig(internal.ByteSliceWrapper(ms))
}
func (ms ByteSlice) getState() *internal.State {
return internal.GetByteSliceState(internal.ByteSliceWrapper(ms))
}
// NewByteSlice creates a new empty ByteSlice.
func NewByteSlice() ByteSlice {
orig := []byte(nil)
return ByteSlice(internal.NewByteSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []byte slice.
func (ms ByteSlice) AsRaw() []byte {
return internal.CopyByteSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []byte into the slice ByteSlice.
func (ms ByteSlice) FromRaw(val []byte) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyByteSlice(*ms.getOrig(), val)
}
// Len returns length of the []byte slice value.
// Equivalent of len(byteSlice).
func (ms ByteSlice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of byteSlice[i].
func (ms ByteSlice) At(i int) byte {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms ByteSlice) All() iter.Seq2[int, byte] {
return func(yield func(int, byte) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets byte item at particular index.
// Equivalent of byteSlice[i] = val
func (ms ByteSlice) SetAt(i int, val byte) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures ByteSlice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]byte, len(byteSlice), newCap)
// copy(buf, byteSlice)
// byteSlice = buf
func (ms ByteSlice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]byte, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to ByteSlice.
// Equivalent of byteSlice = append(byteSlice, elms...)
func (ms ByteSlice) Append(elms ...byte) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms ByteSlice) MoveTo(dest ByteSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms ByteSlice) MoveAndAppendTo(dest ByteSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms ByteSlice) RemoveIf(f func(byte) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero byte
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms ByteSlice) CopyTo(dest ByteSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyByteSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another ByteSlice
func (ms ByteSlice) Equal(val ByteSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Float64Slice represents a []float64 slice.
// The instance of Float64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewFloat64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Float64Slice internal.Float64SliceWrapper
func (ms Float64Slice) getOrig() *[]float64 {
return internal.GetFloat64SliceOrig(internal.Float64SliceWrapper(ms))
}
func (ms Float64Slice) getState() *internal.State {
return internal.GetFloat64SliceState(internal.Float64SliceWrapper(ms))
}
// NewFloat64Slice creates a new empty Float64Slice.
func NewFloat64Slice() Float64Slice {
orig := []float64(nil)
return Float64Slice(internal.NewFloat64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []float64 slice.
func (ms Float64Slice) AsRaw() []float64 {
return internal.CopyFloat64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []float64 into the slice Float64Slice.
func (ms Float64Slice) FromRaw(val []float64) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyFloat64Slice(*ms.getOrig(), val)
}
// Len returns length of the []float64 slice value.
// Equivalent of len(float64Slice).
func (ms Float64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of float64Slice[i].
func (ms Float64Slice) At(i int) float64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Float64Slice) All() iter.Seq2[int, float64] {
return func(yield func(int, float64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets float64 item at particular index.
// Equivalent of float64Slice[i] = val
func (ms Float64Slice) SetAt(i int, val float64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Float64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]float64, len(float64Slice), newCap)
// copy(buf, float64Slice)
// float64Slice = buf
func (ms Float64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]float64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Float64Slice.
// Equivalent of float64Slice = append(float64Slice, elms...)
func (ms Float64Slice) Append(elms ...float64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Float64Slice) MoveTo(dest Float64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Float64Slice) MoveAndAppendTo(dest Float64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Float64Slice) RemoveIf(f func(float64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero float64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Float64Slice) CopyTo(dest Float64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyFloat64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Float64Slice
func (ms Float64Slice) Equal(val Float64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// InstrumentationScope is a message representing the instrumentation scope information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewInstrumentationScope function to create new instances.
// Important: zero-initialized instance is not valid for use.
type InstrumentationScope internal.InstrumentationScopeWrapper
func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *internal.State) InstrumentationScope {
return InstrumentationScope(internal.NewInstrumentationScopeWrapper(orig, state))
}
// NewInstrumentationScope creates a new empty InstrumentationScope.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewInstrumentationScope() InstrumentationScope {
return newInstrumentationScope(internal.NewInstrumentationScope(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteInstrumentationScope(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Name returns the name associated with this InstrumentationScope.
func (ms InstrumentationScope) Name() string {
return ms.getOrig().Name
}
// SetName replaces the name associated with this InstrumentationScope.
func (ms InstrumentationScope) SetName(v string) {
ms.getState().AssertMutable()
ms.getOrig().Name = v
}
// Version returns the version associated with this InstrumentationScope.
func (ms InstrumentationScope) Version() string {
return ms.getOrig().Version
}
// SetVersion replaces the version associated with this InstrumentationScope.
func (ms InstrumentationScope) SetVersion(v string) {
ms.getState().AssertMutable()
ms.getOrig().Version = v
}
// Attributes returns the Attributes associated with this InstrumentationScope.
func (ms InstrumentationScope) Attributes() Map {
return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope.
func (ms InstrumentationScope) DroppedAttributesCount() uint32 {
return ms.getOrig().DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this InstrumentationScope.
func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) {
ms.getState().AssertMutable()
ms.getOrig().DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) {
dest.getState().AssertMutable()
internal.CopyInstrumentationScope(dest.getOrig(), ms.getOrig())
}
func (ms InstrumentationScope) getOrig() *otlpcommon.InstrumentationScope {
return internal.GetInstrumentationScopeOrig(internal.InstrumentationScopeWrapper(ms))
}
func (ms InstrumentationScope) getState() *internal.State {
return internal.GetInstrumentationScopeState(internal.InstrumentationScopeWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Int32Slice represents a []int32 slice.
// The instance of Int32Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewInt32Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Int32Slice internal.Int32SliceWrapper
func (ms Int32Slice) getOrig() *[]int32 {
return internal.GetInt32SliceOrig(internal.Int32SliceWrapper(ms))
}
func (ms Int32Slice) getState() *internal.State {
return internal.GetInt32SliceState(internal.Int32SliceWrapper(ms))
}
// NewInt32Slice creates a new empty Int32Slice.
func NewInt32Slice() Int32Slice {
orig := []int32(nil)
return Int32Slice(internal.NewInt32SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int32 slice.
func (ms Int32Slice) AsRaw() []int32 {
return internal.CopyInt32Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int32 into the slice Int32Slice.
func (ms Int32Slice) FromRaw(val []int32) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyInt32Slice(*ms.getOrig(), val)
}
// Len returns length of the []int32 slice value.
// Equivalent of len(int32Slice).
func (ms Int32Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of int32Slice[i].
func (ms Int32Slice) At(i int) int32 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Int32Slice) All() iter.Seq2[int, int32] {
return func(yield func(int, int32) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets int32 item at particular index.
// Equivalent of int32Slice[i] = val
func (ms Int32Slice) SetAt(i int, val int32) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Int32Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]int32, len(int32Slice), newCap)
// copy(buf, int32Slice)
// int32Slice = buf
func (ms Int32Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]int32, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Int32Slice.
// Equivalent of int32Slice = append(int32Slice, elms...)
func (ms Int32Slice) Append(elms ...int32) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Int32Slice) MoveTo(dest Int32Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Int32Slice) MoveAndAppendTo(dest Int32Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Int32Slice) RemoveIf(f func(int32) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero int32
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int32Slice) CopyTo(dest Int32Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyInt32Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int32Slice
func (ms Int32Slice) Equal(val Int32Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Int64Slice represents a []int64 slice.
// The instance of Int64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Int64Slice internal.Int64SliceWrapper
func (ms Int64Slice) getOrig() *[]int64 {
return internal.GetInt64SliceOrig(internal.Int64SliceWrapper(ms))
}
func (ms Int64Slice) getState() *internal.State {
return internal.GetInt64SliceState(internal.Int64SliceWrapper(ms))
}
// NewInt64Slice creates a new empty Int64Slice.
func NewInt64Slice() Int64Slice {
orig := []int64(nil)
return Int64Slice(internal.NewInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int64 slice.
func (ms Int64Slice) AsRaw() []int64 {
return internal.CopyInt64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int64 into the slice Int64Slice.
func (ms Int64Slice) FromRaw(val []int64) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyInt64Slice(*ms.getOrig(), val)
}
// Len returns length of the []int64 slice value.
// Equivalent of len(int64Slice).
func (ms Int64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of int64Slice[i].
func (ms Int64Slice) At(i int) int64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Int64Slice) All() iter.Seq2[int, int64] {
return func(yield func(int, int64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets int64 item at particular index.
// Equivalent of int64Slice[i] = val
func (ms Int64Slice) SetAt(i int, val int64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Int64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]int64, len(int64Slice), newCap)
// copy(buf, int64Slice)
// int64Slice = buf
func (ms Int64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]int64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Int64Slice.
// Equivalent of int64Slice = append(int64Slice, elms...)
func (ms Int64Slice) Append(elms ...int64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Int64Slice) MoveTo(dest Int64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Int64Slice) MoveAndAppendTo(dest Int64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Int64Slice) RemoveIf(f func(int64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero int64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int64Slice) CopyTo(dest Int64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyInt64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int64Slice
func (ms Int64Slice) Equal(val Int64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1"
)
// Resource is a message representing the resource information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResource function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Resource internal.ResourceWrapper
func newResource(orig *otlpresource.Resource, state *internal.State) Resource {
return Resource(internal.NewResourceWrapper(orig, state))
}
// NewResource creates a new empty Resource.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResource() Resource {
return newResource(internal.NewResource(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Resource) MoveTo(dest Resource) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteResource(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Attributes returns the Attributes associated with this Resource.
func (ms Resource) Attributes() Map {
return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Resource.
func (ms Resource) DroppedAttributesCount() uint32 {
return ms.getOrig().DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Resource.
func (ms Resource) SetDroppedAttributesCount(v uint32) {
ms.getState().AssertMutable()
ms.getOrig().DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Resource) CopyTo(dest Resource) {
dest.getState().AssertMutable()
internal.CopyResource(dest.getOrig(), ms.getOrig())
}
func (ms Resource) getOrig() *otlpresource.Resource {
return internal.GetResourceOrig(internal.ResourceWrapper(ms))
}
func (ms Resource) getState() *internal.State {
return internal.GetResourceState(internal.ResourceWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Slice logically represents a slice of Value.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Slice internal.SliceWrapper
func newSlice(orig *[]otlpcommon.AnyValue, state *internal.State) Slice {
return Slice(internal.NewSliceWrapper(orig, state))
}
// NewSlice creates a SliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSlice() Slice {
orig := []otlpcommon.AnyValue(nil)
return newSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSlice()".
func (es Slice) Len() int {
return len(*es.getOrig())
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es Slice) At(i int) Value {
return newValue(&(*es.getOrig())[i], es.getState())
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es Slice) All() iter.Seq2[int, Value] {
return func(yield func(int, Value) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new Slice can be initialized:
//
// es := NewSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es Slice) EnsureCapacity(newCap int) {
es.getState().AssertMutable()
oldCap := cap(*es.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]otlpcommon.AnyValue, len(*es.getOrig()), newCap)
copy(newOrig, *es.getOrig())
*es.getOrig() = newOrig
}
// AppendEmpty will append to the end of the slice an empty Value.
// It returns the newly added Value.
func (es Slice) AppendEmpty() Value {
es.getState().AssertMutable()
*es.getOrig() = append(*es.getOrig(), otlpcommon.AnyValue{})
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es Slice) MoveAndAppendTo(dest Slice) {
es.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.getOrig() == dest.getOrig() {
return
}
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *es.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...)
}
*es.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es Slice) RemoveIf(f func(Value) bool) {
es.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*es.getOrig()); i++ {
if f(es.At(i)) {
internal.DeleteAnyValue(&(*es.getOrig())[i], false)
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.getOrig())[newLen] = (*es.getOrig())[i]
(*es.getOrig())[i].Reset()
newLen++
}
*es.getOrig() = (*es.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es Slice) CopyTo(dest Slice) {
dest.getState().AssertMutable()
if es.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyAnyValueSlice(*dest.getOrig(), *es.getOrig())
}
func (ms Slice) getOrig() *[]otlpcommon.AnyValue {
return internal.GetSliceOrig(internal.SliceWrapper(ms))
}
func (ms Slice) getState() *internal.State {
return internal.GetSliceState(internal.SliceWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// StringSlice represents a []string slice.
// The instance of StringSlice can be assigned to multiple objects since it's immutable.
//
// Must use NewStringSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type StringSlice internal.StringSliceWrapper
func (ms StringSlice) getOrig() *[]string {
return internal.GetStringSliceOrig(internal.StringSliceWrapper(ms))
}
func (ms StringSlice) getState() *internal.State {
return internal.GetStringSliceState(internal.StringSliceWrapper(ms))
}
// NewStringSlice creates a new empty StringSlice.
func NewStringSlice() StringSlice {
orig := []string(nil)
return StringSlice(internal.NewStringSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []string slice.
func (ms StringSlice) AsRaw() []string {
return internal.CopyStringSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []string into the slice StringSlice.
func (ms StringSlice) FromRaw(val []string) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyStringSlice(*ms.getOrig(), val)
}
// Len returns length of the []string slice value.
// Equivalent of len(stringSlice).
func (ms StringSlice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of stringSlice[i].
func (ms StringSlice) At(i int) string {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms StringSlice) All() iter.Seq2[int, string] {
return func(yield func(int, string) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets string item at particular index.
// Equivalent of stringSlice[i] = val
func (ms StringSlice) SetAt(i int, val string) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures StringSlice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]string, len(stringSlice), newCap)
// copy(buf, stringSlice)
// stringSlice = buf
func (ms StringSlice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]string, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to StringSlice.
// Equivalent of stringSlice = append(stringSlice, elms...)
func (ms StringSlice) Append(elms ...string) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms StringSlice) MoveTo(dest StringSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms StringSlice) MoveAndAppendTo(dest StringSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms StringSlice) RemoveIf(f func(string) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero string
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms StringSlice) CopyTo(dest StringSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyStringSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another StringSlice
func (ms StringSlice) Equal(val StringSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// UInt64Slice represents a []uint64 slice.
// The instance of UInt64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewUInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type UInt64Slice internal.UInt64SliceWrapper
func (ms UInt64Slice) getOrig() *[]uint64 {
return internal.GetUInt64SliceOrig(internal.UInt64SliceWrapper(ms))
}
func (ms UInt64Slice) getState() *internal.State {
return internal.GetUInt64SliceState(internal.UInt64SliceWrapper(ms))
}
// NewUInt64Slice creates a new empty UInt64Slice.
func NewUInt64Slice() UInt64Slice {
orig := []uint64(nil)
return UInt64Slice(internal.NewUInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []uint64 slice.
func (ms UInt64Slice) AsRaw() []uint64 {
return internal.CopyUint64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []uint64 into the slice UInt64Slice.
func (ms UInt64Slice) FromRaw(val []uint64) {
ms.getState().AssertMutable()
*ms.getOrig() = internal.CopyUint64Slice(*ms.getOrig(), val)
}
// Len returns length of the []uint64 slice value.
// Equivalent of len(uInt64Slice).
func (ms UInt64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of uInt64Slice[i].
func (ms UInt64Slice) At(i int) uint64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms UInt64Slice) All() iter.Seq2[int, uint64] {
return func(yield func(int, uint64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets uint64 item at particular index.
// Equivalent of uInt64Slice[i] = val
func (ms UInt64Slice) SetAt(i int, val uint64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures UInt64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]uint64, len(uInt64Slice), newCap)
// copy(buf, uInt64Slice)
// uInt64Slice = buf
func (ms UInt64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]uint64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to UInt64Slice.
// Equivalent of uInt64Slice = append(uInt64Slice, elms...)
func (ms UInt64Slice) Append(elms ...uint64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms UInt64Slice) MoveTo(dest UInt64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms UInt64Slice) MoveAndAppendTo(dest UInt64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms UInt64Slice) RemoveIf(f func(uint64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero uint64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyUint64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another UInt64Slice
func (ms UInt64Slice) Equal(val UInt64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"iter"
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// Map stores a map of string keys to elements of Value type.
//
// Must use NewMap function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Map internal.MapWrapper
// NewMap creates a Map with 0 elements.
func NewMap() Map {
orig := []otlpcommon.KeyValue(nil)
return Map(internal.NewMapWrapper(&orig, internal.NewState()))
}
func (m Map) getOrig() *[]otlpcommon.KeyValue {
return internal.GetMapOrig(internal.MapWrapper(m))
}
func (m Map) getState() *internal.State {
return internal.GetMapState(internal.MapWrapper(m))
}
func newMap(orig *[]otlpcommon.KeyValue, state *internal.State) Map {
return Map(internal.NewMapWrapper(orig, state))
}
// Clear erases any existing entries in this Map instance.
func (m Map) Clear() {
m.getState().AssertMutable()
*m.getOrig() = nil
}
// EnsureCapacity increases the capacity of this Map instance, if necessary,
// to ensure that it can hold at least the number of elements specified by the capacity argument.
func (m Map) EnsureCapacity(capacity int) {
m.getState().AssertMutable()
oldOrig := *m.getOrig()
if capacity <= cap(oldOrig) {
return
}
*m.getOrig() = make([]otlpcommon.KeyValue, len(oldOrig), capacity)
copy(*m.getOrig(), oldOrig)
}
// Get returns the Value associated with the key and true. The returned
// Value is not a copy, it is a reference to the value stored in this map.
// It is allowed to modify the returned value using Value.Set* functions.
// Such modification will be applied to the value stored in this map.
// Accessing the returned value after modifying the underlying map
// (removing or adding new values) is an undefined behavior.
//
// If the key does not exist, returns a zero-initialized KeyValue and false.
// Calling any functions on the returned invalid instance may cause a panic.
func (m Map) Get(key string) (Value, bool) {
for i := range *m.getOrig() {
akv := &(*m.getOrig())[i]
if akv.Key == key {
return newValue(&akv.Value, m.getState()), true
}
}
return newValue(nil, m.getState()), false
}
// Remove removes the entry associated with the key and returns true if the key
// was present in the map, otherwise returns false.
func (m Map) Remove(key string) bool {
m.getState().AssertMutable()
for i := range *m.getOrig() {
akv := &(*m.getOrig())[i]
if akv.Key == key {
*akv = (*m.getOrig())[len(*m.getOrig())-1]
*m.getOrig() = (*m.getOrig())[:len(*m.getOrig())-1]
return true
}
}
return false
}
// RemoveIf removes the entries for which the function in question returns true
func (m Map) RemoveIf(f func(string, Value) bool) {
m.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*m.getOrig()); i++ {
if f((*m.getOrig())[i].Key, newValue(&(*m.getOrig())[i].Value, m.getState())) {
(*m.getOrig())[i] = otlpcommon.KeyValue{}
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*m.getOrig())[newLen] = (*m.getOrig())[i]
(*m.getOrig())[i] = otlpcommon.KeyValue{}
newLen++
}
*m.getOrig() = (*m.getOrig())[:newLen]
}
// PutEmpty inserts or updates an empty value to the map under given key
// and return the updated/inserted value.
func (m Map) PutEmpty(k string) Value {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.getOrig().Value = nil
return newValue(av.getOrig(), m.getState())
}
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState())
}
// GetOrPutEmpty returns the Value associated with the key and true (loaded) if the key exists in the map,
// otherwise inserts an empty value to the map under the given key and returns the inserted value
// and false (loaded).
func (m Map) GetOrPutEmpty(k string) (Value, bool) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av, true
}
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()), false
}
// PutStr performs the Insert or Update action. The Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutStr(k, v string) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetStr(v)
return
}
ov := internal.NewOrigAnyValueStringValue()
ov.StringValue = v
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
}
// PutInt performs the Insert or Update action. The int Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutInt(k string, v int64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetInt(v)
return
}
ov := internal.NewOrigAnyValueIntValue()
ov.IntValue = v
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
}
// PutDouble performs the Insert or Update action. The double Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutDouble(k string, v float64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetDouble(v)
return
}
ov := internal.NewOrigAnyValueDoubleValue()
ov.DoubleValue = v
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
}
// PutBool performs the Insert or Update action. The bool Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutBool(k string, v bool) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetBool(v)
return
}
ov := internal.NewOrigAnyValueBoolValue()
ov.BoolValue = v
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
}
// PutEmptyBytes inserts or updates an empty byte slice under given key and returns it.
func (m Map) PutEmptyBytes(k string) ByteSlice {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptyBytes()
}
ov := internal.NewOrigAnyValueBytesValue()
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
return ByteSlice(internal.NewByteSliceWrapper(&ov.BytesValue, m.getState()))
}
// PutEmptyMap inserts or updates an empty map under given key and returns it.
func (m Map) PutEmptyMap(k string) Map {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptyMap()
}
ov := internal.NewOrigAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
return Map(internal.NewMapWrapper(&ov.KvlistValue.Values, m.getState()))
}
// PutEmptySlice inserts or updates an empty slice under given key and returns it.
func (m Map) PutEmptySlice(k string) Slice {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptySlice()
}
ov := internal.NewOrigAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
*m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: ov}})
return Slice(internal.NewSliceWrapper(&ov.ArrayValue.Values, m.getState()))
}
// Len returns the length of this map.
//
// Because the Map is represented internally by a slice of pointers, and the data are comping from the wire,
// it is possible that when iterating using "Range" to get access to fewer elements because nil elements are skipped.
func (m Map) Len() int {
return len(*m.getOrig())
}
// Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration.
//
// Example:
//
// sm.Range(func(k string, v Value) bool {
// ...
// })
func (m Map) Range(f func(k string, v Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
if !f(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
break
}
}
}
// All returns an iterator over key-value pairs in the Map.
//
// for k, v := range es.All() {
// ... // Do something with key-value pair
// }
func (m Map) All() iter.Seq2[string, Value] {
return func(yield func(string, Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
if !yield(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
return
}
}
}
}
// MoveTo moves all key/values from the current map overriding the destination and
// resetting the current instance to its zero value
func (m Map) MoveTo(dest Map) {
m.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if m.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *m.getOrig()
*m.getOrig() = nil
}
// CopyTo copies all elements from the current map overriding the destination.
func (m Map) CopyTo(dest Map) {
dest.getState().AssertMutable()
if m.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyKeyValueSlice(*dest.getOrig(), *m.getOrig())
}
// AsRaw returns a standard go map representation of this Map.
func (m Map) AsRaw() map[string]any {
rawMap := make(map[string]any, m.Len())
m.Range(func(k string, v Value) bool {
rawMap[k] = v.AsRaw()
return true
})
return rawMap
}
// FromRaw overrides this Map instance from a standard go map.
func (m Map) FromRaw(rawMap map[string]any) error {
m.getState().AssertMutable()
if len(rawMap) == 0 {
*m.getOrig() = nil
return nil
}
var errs error
origs := make([]otlpcommon.KeyValue, len(rawMap))
ix := 0
for k, iv := range rawMap {
origs[ix].Key = k
errs = multierr.Append(errs, newValue(&origs[ix].Value, m.getState()).FromRaw(iv))
ix++
}
*m.getOrig() = origs
return errs
}
// Equal checks equality with another Map
func (m Map) Equal(val Map) bool {
if m.Len() != val.Len() {
return false
}
fullEqual := true
m.Range(func(k string, v Value) bool {
vv, ok := val.Get(k)
if !ok {
fullEqual = false
return fullEqual
}
if !v.Equal(vv) {
fullEqual = false
}
return fullEqual
})
return fullEqual
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"go.uber.org/multierr"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// AsRaw return []any copy of the Slice.
func (es Slice) AsRaw() []any {
rawSlice := make([]any, 0, es.Len())
for i := 0; i < es.Len(); i++ {
rawSlice = append(rawSlice, es.At(i).AsRaw())
}
return rawSlice
}
// FromRaw copies []any into the Slice.
func (es Slice) FromRaw(rawSlice []any) error {
es.getState().AssertMutable()
if len(rawSlice) == 0 {
*es.getOrig() = nil
return nil
}
var errs error
origs := make([]otlpcommon.AnyValue, len(rawSlice))
for ix, iv := range rawSlice {
errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv))
}
*es.getOrig() = origs
return errs
}
// Equal checks equality with another Slice
func (es Slice) Equal(val Slice) bool {
if es.Len() != val.Len() {
return false
}
for i := 0; i < es.Len(); i++ {
if !es.At(i).Equal(val.At(i)) {
return false
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal/data"
)
var emptySpanID = SpanID([8]byte{})
// SpanID is span identifier.
type SpanID [8]byte
// NewSpanIDEmpty returns a new empty (all zero bytes) SpanID.
func NewSpanIDEmpty() SpanID {
return emptySpanID
}
// String returns string representation of the SpanID.
//
// Important: Don't rely on this method to get a string identifier of SpanID,
// Use hex.EncodeToString explicitly instead.
// This method meant to implement Stringer interface for display purposes only.
func (ms SpanID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms SpanID) IsEmpty() bool {
return data.SpanID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"time"
)
// Timestamp is a time specified as UNIX Epoch time in nanoseconds since
// 1970-01-01 00:00:00 +0000 UTC.
type Timestamp uint64
// NewTimestampFromTime constructs a new Timestamp from the provided time.Time.
func NewTimestampFromTime(t time.Time) Timestamp {
//nolint:gosec
return Timestamp(uint64(t.UnixNano()))
}
// AsTime converts this to a time.Time.
func (ts Timestamp) AsTime() time.Time {
//nolint:gosec
return time.Unix(0, int64(ts)).UTC()
}
// String returns the string representation of this in UTC.
func (ts Timestamp) String() string {
return ts.AsTime().String()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// TraceState represents the trace state from the w3c-trace-context.
//
// Must use NewTraceState function to create new instances.
// Important: zero-initialized instance is not valid for use.
type TraceState internal.TraceStateWrapper
func NewTraceState() TraceState {
return TraceState(internal.NewTraceStateWrapper(new(string), internal.NewState()))
}
func (ms TraceState) getOrig() *string {
return internal.GetTraceStateOrig(internal.TraceStateWrapper(ms))
}
func (ms TraceState) getState() *internal.State {
return internal.GetTraceStateState(internal.TraceStateWrapper(ms))
}
// AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
func (ms TraceState) AsRaw() string {
return *ms.getOrig()
}
// FromRaw copies the string representation in w3c-trace-context format of the tracestate into this TraceState.
func (ms TraceState) FromRaw(v string) {
ms.getState().AssertMutable()
*ms.getOrig() = v
}
// MoveTo moves the TraceState instance overriding the destination
// and resetting the current instance to its zero value.
func (ms TraceState) MoveTo(dest TraceState) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = ""
}
// CopyTo copies the TraceState instance overriding the destination.
func (ms TraceState) CopyTo(dest TraceState) {
dest.getState().AssertMutable()
*dest.getOrig() = *ms.getOrig()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal/data"
)
var emptyTraceID = TraceID([16]byte{})
// TraceID is a trace identifier.
type TraceID [16]byte
// NewTraceIDEmpty returns a new empty (all zero bytes) TraceID.
func NewTraceIDEmpty() TraceID {
return emptyTraceID
}
// String returns string representation of the TraceID.
//
// Important: Don't rely on this method to get a string identifier of TraceID.
// Use hex.EncodeToString explicitly instead.
// This method meant to implement Stringer interface for display purposes only.
func (ms TraceID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms TraceID) IsEmpty() bool {
return data.TraceID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/base64"
"encoding/json"
"fmt"
"math"
"strconv"
"go.opentelemetry.io/collector/pdata/internal"
otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1"
)
// ValueType specifies the type of Value.
type ValueType int32
const (
ValueTypeEmpty ValueType = iota
ValueTypeStr
ValueTypeInt
ValueTypeDouble
ValueTypeBool
ValueTypeMap
ValueTypeSlice
ValueTypeBytes
)
// String returns the string representation of the ValueType.
func (avt ValueType) String() string {
switch avt {
case ValueTypeEmpty:
return "Empty"
case ValueTypeStr:
return "Str"
case ValueTypeBool:
return "Bool"
case ValueTypeInt:
return "Int"
case ValueTypeDouble:
return "Double"
case ValueTypeMap:
return "Map"
case ValueTypeSlice:
return "Slice"
case ValueTypeBytes:
return "Bytes"
}
return ""
}
// Value is a mutable cell containing any value. Typically used as an element of Map or Slice.
// Must use one of NewValue+ functions below to create new instances.
//
// Intended to be passed by value since internally it is just a pointer to actual
// value representation. For the same reason passing by value and calling setters
// will modify the original, e.g.:
//
// func f1(val Value) { val.SetInt(234) }
// func f2() {
// v := NewValueStr("a string")
// f1(v)
// _ := v.Type() // this will return ValueTypeInt
// }
//
// Important: zero-initialized instance is not valid for use. All Value functions below must
// be called only on instances that are created via NewValue+ functions.
type Value internal.ValueWrapper
// NewValueEmpty creates a new Value with an empty value.
func NewValueEmpty() Value {
return newValue(&otlpcommon.AnyValue{}, internal.NewState())
}
// NewValueStr creates a new Value with the given string value.
func NewValueStr(v string) Value {
ov := internal.NewOrigAnyValueStringValue()
ov.StringValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueInt creates a new Value with the given int64 value.
func NewValueInt(v int64) Value {
ov := internal.NewOrigAnyValueIntValue()
ov.IntValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueDouble creates a new Value with the given float64 value.
func NewValueDouble(v float64) Value {
ov := internal.NewOrigAnyValueDoubleValue()
ov.DoubleValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBool creates a new Value with the given bool value.
func NewValueBool(v bool) Value {
ov := internal.NewOrigAnyValueBoolValue()
ov.BoolValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueMap creates a new Value of map type.
func NewValueMap() Value {
ov := internal.NewOrigAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueSlice creates a new Value of array type.
func NewValueSlice() Value {
ov := internal.NewOrigAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBytes creates a new empty Value of byte type.
func NewValueBytes() Value {
ov := internal.NewOrigAnyValueBytesValue()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
func newValue(orig *otlpcommon.AnyValue, state *internal.State) Value {
return Value(internal.NewValueWrapper(orig, state))
}
func (v Value) getOrig() *otlpcommon.AnyValue {
return internal.GetValueOrig(internal.ValueWrapper(v))
}
func (v Value) getState() *internal.State {
return internal.GetValueState(internal.ValueWrapper(v))
}
// FromRaw sets the value from the given raw value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) FromRaw(iv any) error {
switch tv := iv.(type) {
case nil:
v.getOrig().Value = nil
case string:
v.SetStr(tv)
case int:
v.SetInt(int64(tv))
case int8:
v.SetInt(int64(tv))
case int16:
v.SetInt(int64(tv))
case int32:
v.SetInt(int64(tv))
case int64:
v.SetInt(tv)
case uint:
//nolint:gosec
v.SetInt(int64(tv))
case uint8:
v.SetInt(int64(tv))
case uint16:
v.SetInt(int64(tv))
case uint32:
v.SetInt(int64(tv))
case uint64:
//nolint:gosec
v.SetInt(int64(tv))
case float32:
v.SetDouble(float64(tv))
case float64:
v.SetDouble(tv)
case bool:
v.SetBool(tv)
case []byte:
v.SetEmptyBytes().FromRaw(tv)
case map[string]any:
return v.SetEmptyMap().FromRaw(tv)
case []any:
return v.SetEmptySlice().FromRaw(tv)
default:
return fmt.Errorf("<Invalid value type %T>", tv)
}
return nil
}
// Type returns the type of the value for this Value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) Type() ValueType {
switch v.getOrig().Value.(type) {
case *otlpcommon.AnyValue_StringValue:
return ValueTypeStr
case *otlpcommon.AnyValue_BoolValue:
return ValueTypeBool
case *otlpcommon.AnyValue_IntValue:
return ValueTypeInt
case *otlpcommon.AnyValue_DoubleValue:
return ValueTypeDouble
case *otlpcommon.AnyValue_KvlistValue:
return ValueTypeMap
case *otlpcommon.AnyValue_ArrayValue:
return ValueTypeSlice
case *otlpcommon.AnyValue_BytesValue:
return ValueTypeBytes
}
return ValueTypeEmpty
}
// Str returns the string value associated with this Value.
// The shorter name is used instead of String to avoid implementing fmt.Stringer interface.
// If the Type() is not ValueTypeStr then returns empty string.
func (v Value) Str() string {
return v.getOrig().GetStringValue()
}
// Int returns the int64 value associated with this Value.
// If the Type() is not ValueTypeInt then returns int64(0).
func (v Value) Int() int64 {
return v.getOrig().GetIntValue()
}
// Double returns the float64 value associated with this Value.
// If the Type() is not ValueTypeDouble then returns float64(0).
func (v Value) Double() float64 {
return v.getOrig().GetDoubleValue()
}
// Bool returns the bool value associated with this Value.
// If the Type() is not ValueTypeBool then returns false.
func (v Value) Bool() bool {
return v.getOrig().GetBoolValue()
}
// Map returns the map value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeMap
// then it returns an invalid map. Note that using such map can cause panic.
func (v Value) Map() Map {
kvlist := v.getOrig().GetKvlistValue()
if kvlist == nil {
return Map{}
}
return newMap(&kvlist.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Slice returns the slice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeSlice
// then returns an invalid slice. Note that using such slice can cause panic.
func (v Value) Slice() Slice {
arr := v.getOrig().GetArrayValue()
if arr == nil {
return Slice{}
}
return newSlice(&arr.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Bytes returns the ByteSlice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes
// then returns an invalid ByteSlice object. Note that using such slice can cause panic.
func (v Value) Bytes() ByteSlice {
bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue)
if !ok {
return ByteSlice{}
}
return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, internal.GetValueState(internal.ValueWrapper(v))))
}
// SetStr replaces the string value associated with this Value,
// it also changes the type to be ValueTypeStr.
// The shorter name is used instead of SetString to avoid implementing
// fmt.Stringer interface by the corresponding getter method.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetStr(sv string) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueStringValue()
ov.StringValue = sv
v.getOrig().Value = ov
}
// SetInt replaces the int64 value associated with this Value,
// it also changes the type to be ValueTypeInt.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetInt(iv int64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueIntValue()
ov.IntValue = iv
v.getOrig().Value = ov
}
// SetDouble replaces the float64 value associated with this Value,
// it also changes the type to be ValueTypeDouble.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetDouble(dv float64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueDoubleValue()
ov.DoubleValue = dv
v.getOrig().Value = ov
}
// SetBool replaces the bool value associated with this Value,
// it also changes the type to be ValueTypeBool.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetBool(bv bool) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueBoolValue()
ov.BoolValue = bv
v.getOrig().Value = ov
}
// SetEmptyBytes sets value to an empty byte slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyBytes() ByteSlice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
bv := internal.NewOrigAnyValueBytesValue()
v.getOrig().Value = bv
return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, v.getState()))
}
// SetEmptyMap sets value to an empty map and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyMap() Map {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
v.getOrig().Value = ov
return newMap(&ov.KvlistValue.Values, v.getState())
}
// SetEmptySlice sets value to an empty slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptySlice() Slice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewOrigAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
v.getOrig().Value = ov
return newSlice(&ov.ArrayValue.Values, v.getState())
}
// MoveTo moves the Value from current overriding the destination and
// resetting the current instance to empty value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) MoveTo(dest Value) {
v.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if v.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *v.getOrig()
v.getOrig().Value = nil
}
// CopyTo copies the Value instance overriding the destination.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) CopyTo(dest Value) {
dest.getState().AssertMutable()
internal.CopyAnyValue(dest.getOrig(), v.getOrig())
}
// AsString converts an OTLP Value object of any type to its equivalent string
// representation. This differs from Str which only returns a non-empty value
// if the ValueType is ValueTypeStr.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) AsString() string {
switch v.Type() {
case ValueTypeEmpty:
return ""
case ValueTypeStr:
return v.Str()
case ValueTypeBool:
return strconv.FormatBool(v.Bool())
case ValueTypeDouble:
return float64AsString(v.Double())
case ValueTypeInt:
return strconv.FormatInt(v.Int(), 10)
case ValueTypeMap:
jsonStr, _ := json.Marshal(v.Map().AsRaw())
return string(jsonStr)
case ValueTypeBytes:
return base64.StdEncoding.EncodeToString(*v.Bytes().getOrig())
case ValueTypeSlice:
jsonStr, _ := json.Marshal(v.Slice().AsRaw())
return string(jsonStr)
default:
return fmt.Sprintf("<Unknown OpenTelemetry attribute value type %q>", v.Type())
}
}
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.7:src/encoding/json/encode.go;l=585.
// This allows us to avoid using reflection.
func float64AsString(f float64) string {
if math.IsInf(f, 0) || math.IsNaN(f) {
return "json: unsupported value: " + strconv.FormatFloat(f, 'g', -1, 64)
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
scratch := [64]byte{}
b := scratch[:0]
abs := math.Abs(f)
fmt := byte('f')
if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
fmt = 'e'
}
b = strconv.AppendFloat(b, f, fmt, -1, 64)
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
return string(b)
}
func (v Value) AsRaw() any {
switch v.Type() {
case ValueTypeEmpty:
return nil
case ValueTypeStr:
return v.Str()
case ValueTypeBool:
return v.Bool()
case ValueTypeDouble:
return v.Double()
case ValueTypeInt:
return v.Int()
case ValueTypeBytes:
return v.Bytes().AsRaw()
case ValueTypeMap:
return v.Map().AsRaw()
case ValueTypeSlice:
return v.Slice().AsRaw()
}
return fmt.Sprintf("<Unknown OpenTelemetry value type %q>", v.Type())
}
func (v Value) Equal(c Value) bool {
if v.Type() != c.Type() {
return false
}
switch v.Type() {
case ValueTypeEmpty:
return true
case ValueTypeStr:
return v.Str() == c.Str()
case ValueTypeBool:
return v.Bool() == c.Bool()
case ValueTypeDouble:
return v.Double() == c.Double()
case ValueTypeInt:
return v.Int() == c.Int()
case ValueTypeBytes:
return v.Bytes().Equal(c.Bytes())
case ValueTypeMap:
return v.Map().Equal(c.Map())
case ValueTypeSlice:
return v.Slice().Equal(c.Slice())
}
return false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// LogRecord are experimental implementation of OpenTelemetry Log Data Model.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLogRecord function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecord struct {
orig *otlplogs.LogRecord
state *internal.State
}
func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord {
return LogRecord{orig: orig, state: state}
}
// NewLogRecord creates a new empty LogRecord.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLogRecord() LogRecord {
return newLogRecord(internal.NewLogRecord(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms LogRecord) MoveTo(dest LogRecord) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLogRecord(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Timestamp returns the timestamp associated with this LogRecord.
func (ms LogRecord) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this LogRecord.
func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ObservedTimestamp returns the observedtimestamp associated with this LogRecord.
func (ms LogRecord) ObservedTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.ObservedTimeUnixNano)
}
// SetObservedTimestamp replaces the observedtimestamp associated with this LogRecord.
func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.ObservedTimeUnixNano = uint64(v)
}
// SeverityNumber returns the severitynumber associated with this LogRecord.
func (ms LogRecord) SeverityNumber() SeverityNumber {
return SeverityNumber(ms.orig.SeverityNumber)
}
// SetSeverityNumber replaces the severitynumber associated with this LogRecord.
func (ms LogRecord) SetSeverityNumber(v SeverityNumber) {
ms.state.AssertMutable()
ms.orig.SeverityNumber = otlplogs.SeverityNumber(v)
}
// SeverityText returns the severitytext associated with this LogRecord.
func (ms LogRecord) SeverityText() string {
return ms.orig.SeverityText
}
// SetSeverityText replaces the severitytext associated with this LogRecord.
func (ms LogRecord) SetSeverityText(v string) {
ms.state.AssertMutable()
ms.orig.SeverityText = v
}
// Body returns the body associated with this LogRecord.
func (ms LogRecord) Body() pcommon.Value {
return pcommon.Value(internal.NewValueWrapper(&ms.orig.Body, ms.state))
}
// Attributes returns the Attributes associated with this LogRecord.
func (ms LogRecord) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord.
func (ms LogRecord) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord.
func (ms LogRecord) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Flags returns the flags associated with this LogRecord.
func (ms LogRecord) Flags() LogRecordFlags {
return LogRecordFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this LogRecord.
func (ms LogRecord) SetFlags(v LogRecordFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// TraceID returns the traceid associated with this LogRecord.
func (ms LogRecord) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this LogRecord.
func (ms LogRecord) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = data.TraceID(v)
}
// SpanID returns the spanid associated with this LogRecord.
func (ms LogRecord) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this LogRecord.
func (ms LogRecord) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = data.SpanID(v)
}
// EventName returns the eventname associated with this LogRecord.
func (ms LogRecord) EventName() string {
return ms.orig.EventName
}
// SetEventName replaces the eventname associated with this LogRecord.
func (ms LogRecord) SetEventName(v string) {
ms.state.AssertMutable()
ms.orig.EventName = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms LogRecord) CopyTo(dest LogRecord) {
dest.state.AssertMutable()
internal.CopyLogRecord(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// LogRecordSlice logically represents a slice of LogRecord.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLogRecordSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecordSlice struct {
orig *[]*otlplogs.LogRecord
state *internal.State
}
func newLogRecordSlice(orig *[]*otlplogs.LogRecord, state *internal.State) LogRecordSlice {
return LogRecordSlice{orig: orig, state: state}
}
// NewLogRecordSlice creates a LogRecordSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLogRecordSlice() LogRecordSlice {
orig := []*otlplogs.LogRecord(nil)
return newLogRecordSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLogRecordSlice()".
func (es LogRecordSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LogRecordSlice) At(i int) LogRecord {
return newLogRecord((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LogRecordSlice) All() iter.Seq2[int, LogRecord] {
return func(yield func(int, LogRecord) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LogRecordSlice can be initialized:
//
// es := NewLogRecordSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LogRecordSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty LogRecord.
// It returns the newly added LogRecord.
func (es LogRecordSlice) AppendEmpty() LogRecord {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLogRecord())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LogRecordSlice) MoveAndAppendTo(dest LogRecordSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLogRecord((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LogRecordSlice) CopyTo(dest LogRecordSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLogRecordSlice(*dest.orig, *es.orig)
}
// Sort sorts the LogRecord elements within LogRecordSlice given the
// provided less function so that two instances of LogRecordSlice
// can be compared.
func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
)
// Logs is the top-level struct that is propagated through the logs pipeline.
// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Logs internal.LogsWrapper
func newLogs(orig *otlpcollectorlogs.ExportLogsServiceRequest, state *internal.State) Logs {
return Logs(internal.NewLogsWrapper(orig, state))
}
// NewLogs creates a new empty Logs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLogs() Logs {
return newLogs(internal.NewExportLogsServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Logs) MoveTo(dest Logs) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportLogsServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceLogs returns the ResourceLogs associated with this Logs.
func (ms Logs) ResourceLogs() ResourceLogsSlice {
return newResourceLogsSlice(&ms.getOrig().ResourceLogs, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Logs) CopyTo(dest Logs) {
dest.getState().AssertMutable()
internal.CopyExportLogsServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Logs) getOrig() *otlpcollectorlogs.ExportLogsServiceRequest {
return internal.GetLogsOrig(internal.LogsWrapper(ms))
}
func (ms Logs) getState() *internal.State {
return internal.GetLogsState(internal.LogsWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceLogs is a collection of logs from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogs struct {
orig *otlplogs.ResourceLogs
state *internal.State
}
func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) ResourceLogs {
return ResourceLogs{orig: orig, state: state}
}
// NewResourceLogs creates a new empty ResourceLogs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceLogs() ResourceLogs {
return newResourceLogs(internal.NewResourceLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceLogs) MoveTo(dest ResourceLogs) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceLogs(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceLogs.
func (ms ResourceLogs) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeLogs returns the ScopeLogs associated with this ResourceLogs.
func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice {
return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceLogs.
func (ms ResourceLogs) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceLogs.
func (ms ResourceLogs) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceLogs) CopyTo(dest ResourceLogs) {
dest.state.AssertMutable()
internal.CopyResourceLogs(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// ResourceLogsSlice logically represents a slice of ResourceLogs.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogsSlice struct {
orig *[]*otlplogs.ResourceLogs
state *internal.State
}
func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs, state *internal.State) ResourceLogsSlice {
return ResourceLogsSlice{orig: orig, state: state}
}
// NewResourceLogsSlice creates a ResourceLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceLogsSlice() ResourceLogsSlice {
orig := []*otlplogs.ResourceLogs(nil)
return newResourceLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceLogsSlice()".
func (es ResourceLogsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceLogsSlice) At(i int) ResourceLogs {
return newResourceLogs((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceLogsSlice) All() iter.Seq2[int, ResourceLogs] {
return func(yield func(int, ResourceLogs) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceLogsSlice can be initialized:
//
// es := NewResourceLogsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceLogsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceLogs.
// It returns the newly added ResourceLogs.
func (es ResourceLogsSlice) AppendEmpty() ResourceLogs {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceLogs())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceLogs((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceLogsSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceLogs elements within ResourceLogsSlice given the
// provided less function so that two instances of ResourceLogsSlice
// can be compared.
func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeLogs is a collection of logs from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogs struct {
orig *otlplogs.ScopeLogs
state *internal.State
}
func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs {
return ScopeLogs{orig: orig, state: state}
}
// NewScopeLogs creates a new empty ScopeLogs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeLogs() ScopeLogs {
return newScopeLogs(internal.NewScopeLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeLogs) MoveTo(dest ScopeLogs) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeLogs(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeLogs.
func (ms ScopeLogs) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// LogRecords returns the LogRecords associated with this ScopeLogs.
func (ms ScopeLogs) LogRecords() LogRecordSlice {
return newLogRecordSlice(&ms.orig.LogRecords, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeLogs.
func (ms ScopeLogs) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeLogs.
func (ms ScopeLogs) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeLogs) CopyTo(dest ScopeLogs) {
dest.state.AssertMutable()
internal.CopyScopeLogs(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// ScopeLogsSlice logically represents a slice of ScopeLogs.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogsSlice struct {
orig *[]*otlplogs.ScopeLogs
state *internal.State
}
func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs, state *internal.State) ScopeLogsSlice {
return ScopeLogsSlice{orig: orig, state: state}
}
// NewScopeLogsSlice creates a ScopeLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeLogsSlice() ScopeLogsSlice {
orig := []*otlplogs.ScopeLogs(nil)
return newScopeLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeLogsSlice()".
func (es ScopeLogsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeLogsSlice) At(i int) ScopeLogs {
return newScopeLogs((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeLogsSlice) All() iter.Seq2[int, ScopeLogs] {
return func(yield func(int, ScopeLogs) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeLogsSlice can be initialized:
//
// es := NewScopeLogsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeLogsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeLogs.
// It returns the newly added ScopeLogs.
func (es ScopeLogsSlice) AppendEmpty() ScopeLogs {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeLogs())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeLogsSlice) MoveAndAppendTo(dest ScopeLogsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeLogs((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeLogsSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeLogs elements within ScopeLogsSlice given the
// provided less function so that two instances of ScopeLogsSlice
// can be compared.
func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals Logs to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalLogs to the OTLP/JSON format.
func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportLogsServiceRequest(ld.getOrig(), dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
var _ Unmarshaler = (*JSONUnmarshaler)(nil)
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Logs.
type JSONUnmarshaler struct{}
// UnmarshalLogs from OTLP/JSON format into Logs.
func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
ld := NewLogs()
internal.UnmarshalJSONExportLogsServiceRequest(ld.getOrig(), iter)
if iter.Error() != nil {
return Logs{}, iter.Error()
}
otlp.MigrateLogs(ld.getOrig().ResourceLogs)
return ld, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
const isSampledMask = uint32(1)
var DefaultLogRecordFlags = LogRecordFlags(0)
// LogRecordFlags defines flags for the LogRecord. The 8 least significant bits are the trace flags as
// defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0.
type LogRecordFlags uint32
// IsSampled returns true if the LogRecordFlags contains the IsSampled flag.
func (ms LogRecordFlags) IsSampled() bool {
return uint32(ms)&isSampledMask != 0
}
// WithIsSampled returns a new LogRecordFlags, with the IsSampled flag set to the given value.
func (ms LogRecordFlags) WithIsSampled(b bool) LogRecordFlags {
orig := uint32(ms)
if b {
orig |= isSampledMask
} else {
orig &^= isSampledMask
}
return LogRecordFlags(orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
// MarkReadOnly marks the Logs as shared so that no further modifications can be done on it.
func (ms Logs) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Logs instance is read-only.
func (ms Logs) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// LogRecordCount calculates the total number of log records.
func (ms Logs) LogRecordCount() int {
logCount := 0
rss := ms.ResourceLogs()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ill := rs.ScopeLogs()
for i := 0; i < ill.Len(); i++ {
logs := ill.At(i)
logCount += logs.LogRecords().Len()
}
}
return logCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
size := internal.SizeProtoExportLogsServiceRequest(ld.getOrig())
buf := make([]byte, size)
_ = internal.MarshalProtoExportLogsServiceRequest(ld.getOrig(), buf)
return buf, nil
}
func (e *ProtoMarshaler) LogsSize(ld Logs) int {
return internal.SizeProtoExportLogsServiceRequest(ld.getOrig())
}
func (e *ProtoMarshaler) ResourceLogsSize(ld ResourceLogs) int {
return internal.SizeProtoResourceLogs(ld.orig)
}
func (e *ProtoMarshaler) ScopeLogsSize(ld ScopeLogs) int {
return internal.SizeProtoScopeLogs(ld.orig)
}
func (e *ProtoMarshaler) LogRecordSize(ld LogRecord) int {
return internal.SizeProtoLogRecord(ld.orig)
}
var _ Unmarshaler = (*ProtoUnmarshaler)(nil)
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
ld := NewLogs()
err := internal.UnmarshalProtoExportLogsServiceRequest(ld.getOrig(), buf)
if err != nil {
return Logs{}, err
}
return ld, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plogotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *otlpcollectorlogs.ExportLogsPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *otlpcollectorlogs.ExportLogsPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportLogsPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportLogsPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedLogRecords returns the rejectedlogrecords associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedLogRecords() int64 {
return ms.orig.RejectedLogRecords
}
// SetRejectedLogRecords replaces the rejectedlogrecords associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedLogRecords(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedLogRecords = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportLogsPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plogotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *otlpcollectorlogs.ExportLogsServiceResponse
state *internal.State
}
func newExportResponse(orig *otlpcollectorlogs.ExportLogsServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportLogsServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportLogsServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportLogsServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
_ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded.
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Logs service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export plog.Logs to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otlpcollectorlog.NewLogsServiceClient(cc)}
}
type grpcClient struct {
rawClient otlpcollectorlog.LogsServiceClient
}
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC LogsService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the Server to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otlpcollectorlog.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv})
}
type rawLogsServer struct {
srv GRPCServer
}
func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlog.ExportLogsServiceRequest) (*otlpcollectorlog.ExportLogsServiceResponse, error) {
otlp.MigrateLogs(request.ResourceLogs)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/plog"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for plog.Logs data.
type ExportRequest struct {
orig *otlpcollectorlog.ExportLogsServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &otlpcollectorlog.ExportLogsServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromLogs returns a ExportRequest from plog.Logs.
// Because ExportRequest is a wrapper for plog.Logs,
// any changes to the provided Logs struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromLogs(ld plog.Logs) ExportRequest {
return ExportRequest{
orig: internal.GetLogsOrig(internal.LogsWrapper(ld)),
state: internal.GetLogsState(internal.LogsWrapper(ld)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportLogsServiceRequest(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportLogsServiceRequest(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := internal.UnmarshalProtoExportLogsServiceRequest(ms.orig, data)
if err != nil {
return err
}
otlp.MigrateLogs(ms.orig.ResourceLogs)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportLogsServiceRequest(ms.orig, dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportLogsServiceRequest(ms.orig, iter)
return iter.Error()
}
func (ms ExportRequest) Logs() plog.Logs {
return plog.Logs(internal.NewLogsWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportLogsServiceResponse(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportLogsServiceResponse(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return internal.UnmarshalProtoExportLogsServiceResponse(ms.orig, data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportLogsServiceResponse(ms.orig, dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportLogsServiceResponse(ms.orig, iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1"
)
// SeverityNumber represents severity number of a log record.
type SeverityNumber int32
const (
SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED)
SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE)
SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2)
SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3)
SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4)
SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG)
SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2)
SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3)
SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4)
SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO)
SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2)
SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3)
SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4)
SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN)
SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2)
SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3)
SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4)
SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR)
SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2)
SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3)
SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4)
SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL)
SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2)
SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3)
SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4)
)
// String returns the string representation of the SeverityNumber.
func (sn SeverityNumber) String() string {
switch sn {
case SeverityNumberUnspecified:
return "Unspecified"
case SeverityNumberTrace:
return "Trace"
case SeverityNumberTrace2:
return "Trace2"
case SeverityNumberTrace3:
return "Trace3"
case SeverityNumberTrace4:
return "Trace4"
case SeverityNumberDebug:
return "Debug"
case SeverityNumberDebug2:
return "Debug2"
case SeverityNumberDebug3:
return "Debug3"
case SeverityNumberDebug4:
return "Debug4"
case SeverityNumberInfo:
return "Info"
case SeverityNumberInfo2:
return "Info2"
case SeverityNumberInfo3:
return "Info3"
case SeverityNumberInfo4:
return "Info4"
case SeverityNumberWarn:
return "Warn"
case SeverityNumberWarn2:
return "Warn2"
case SeverityNumberWarn3:
return "Warn3"
case SeverityNumberWarn4:
return "Warn4"
case SeverityNumberError:
return "Error"
case SeverityNumberError2:
return "Error2"
case SeverityNumberError3:
return "Error3"
case SeverityNumberError4:
return "Error4"
case SeverityNumberFatal:
return "Fatal"
case SeverityNumberFatal2:
return "Fatal2"
case SeverityNumberFatal3:
return "Fatal3"
case SeverityNumberFatal4:
return "Fatal4"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// AggregationTemporality defines how a metric aggregator reports aggregated values.
// It describes how those values relate to the time interval over which they are aggregated.
type AggregationTemporality int32
const (
// AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
// AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
// AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
)
// String returns the string representation of the AggregationTemporality.
func (at AggregationTemporality) String() string {
switch at {
case AggregationTemporalityUnspecified:
return "Unspecified"
case AggregationTemporalityDelta:
return "Delta"
case AggregationTemporalityCumulative:
return "Cumulative"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// ExemplarValueType specifies the type of Exemplar measurement value.
type ExemplarValueType int32
const (
// ExemplarValueTypeEmpty means that exemplar value is unset.
ExemplarValueTypeEmpty ExemplarValueType = iota
ExemplarValueTypeInt
ExemplarValueTypeDouble
)
// String returns the string representation of the ExemplarValueType.
func (nt ExemplarValueType) String() string {
switch nt {
case ExemplarValueTypeEmpty:
return "Empty"
case ExemplarValueTypeInt:
return "Int"
case ExemplarValueTypeDouble:
return "Double"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Exemplar is a sample input double measurement.
//
// Exemplars also hold information about the environment when the measurement was recorded,
// for example the span and trace ID of the active span when the exemplar was recorded.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExemplar function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Exemplar struct {
orig *otlpmetrics.Exemplar
state *internal.State
}
func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar {
return Exemplar{orig: orig, state: state}
}
// NewExemplar creates a new empty Exemplar.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExemplar() Exemplar {
return newExemplar(internal.NewExemplar(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Exemplar) MoveTo(dest Exemplar) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExemplar(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// FilteredAttributes returns the FilteredAttributes associated with this Exemplar.
func (ms Exemplar) FilteredAttributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.FilteredAttributes, ms.state))
}
// Timestamp returns the timestamp associated with this Exemplar.
func (ms Exemplar) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this Exemplar.
func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ValueType returns the type of the value for this Exemplar.
// Calling this function on zero-initialized Exemplar will cause a panic.
func (ms Exemplar) ValueType() ExemplarValueType {
switch ms.orig.Value.(type) {
case *otlpmetrics.Exemplar_AsDouble:
return ExemplarValueTypeDouble
case *otlpmetrics.Exemplar_AsInt:
return ExemplarValueTypeInt
}
return ExemplarValueTypeEmpty
}
// DoubleValue returns the double associated with this Exemplar.
func (ms Exemplar) DoubleValue() float64 {
return ms.orig.GetAsDouble()
}
// SetDoubleValue replaces the double associated with this Exemplar.
func (ms Exemplar) SetDoubleValue(v float64) {
ms.state.AssertMutable()
var ov *otlpmetrics.Exemplar_AsDouble
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsDouble{}
} else {
ov = internal.ProtoPoolExemplar_AsDouble.Get().(*otlpmetrics.Exemplar_AsDouble)
}
ov.AsDouble = v
ms.orig.Value = ov
}
// IntValue returns the int associated with this Exemplar.
func (ms Exemplar) IntValue() int64 {
return ms.orig.GetAsInt()
}
// SetIntValue replaces the int associated with this Exemplar.
func (ms Exemplar) SetIntValue(v int64) {
ms.state.AssertMutable()
var ov *otlpmetrics.Exemplar_AsInt
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Exemplar_AsInt{}
} else {
ov = internal.ProtoPoolExemplar_AsInt.Get().(*otlpmetrics.Exemplar_AsInt)
}
ov.AsInt = v
ms.orig.Value = ov
}
// SpanID returns the spanid associated with this Exemplar.
func (ms Exemplar) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Exemplar.
func (ms Exemplar) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = data.SpanID(v)
}
// TraceID returns the traceid associated with this Exemplar.
func (ms Exemplar) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Exemplar.
func (ms Exemplar) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = data.TraceID(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Exemplar) CopyTo(dest Exemplar) {
dest.state.AssertMutable()
internal.CopyExemplar(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExemplarSlice logically represents a slice of Exemplar.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewExemplarSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExemplarSlice struct {
orig *[]otlpmetrics.Exemplar
state *internal.State
}
func newExemplarSlice(orig *[]otlpmetrics.Exemplar, state *internal.State) ExemplarSlice {
return ExemplarSlice{orig: orig, state: state}
}
// NewExemplarSlice creates a ExemplarSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExemplarSlice() ExemplarSlice {
orig := []otlpmetrics.Exemplar(nil)
return newExemplarSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewExemplarSlice()".
func (es ExemplarSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ExemplarSlice) At(i int) Exemplar {
return newExemplar(&(*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ExemplarSlice) All() iter.Seq2[int, Exemplar] {
return func(yield func(int, Exemplar) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ExemplarSlice can be initialized:
//
// es := NewExemplarSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ExemplarSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]otlpmetrics.Exemplar, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Exemplar.
// It returns the newly added Exemplar.
func (es ExemplarSlice) AppendEmpty() Exemplar {
es.state.AssertMutable()
*es.orig = append(*es.orig, otlpmetrics.Exemplar{})
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ExemplarSlice) MoveAndAppendTo(dest ExemplarSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteExemplar(&(*es.orig)[i], false)
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
(*es.orig)[i].Reset()
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExemplarSlice) CopyTo(dest ExemplarSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyExemplarSlice(*dest.orig, *es.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogram struct {
orig *otlpmetrics.ExponentialHistogram
state *internal.State
}
func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *internal.State) ExponentialHistogram {
return ExponentialHistogram{orig: orig, state: state}
}
// NewExponentialHistogram creates a new empty ExponentialHistogram.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogram() ExponentialHistogram {
return newExponentialHistogram(internal.NewExponentialHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogram(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this ExponentialHistogram.
func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice {
return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this ExponentialHistogram.
func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram.
func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) {
dest.state.AssertMutable()
internal.CopyExponentialHistogram(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPoint struct {
orig *otlpmetrics.ExponentialHistogramDataPoint
state *internal.State
}
func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint {
return ExponentialHistogramDataPoint{orig: orig, state: state}
}
// NewExponentialHistogramDataPoint creates a new empty ExponentialHistogramDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint {
return newExponentialHistogramDataPoint(internal.NewExponentialHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogramDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Sum() float64 {
return ms.orig.GetSum()
}
// HasSum returns true if the ExponentialHistogramDataPoint contains a
// Sum value otherwise.
func (ms ExponentialHistogramDataPoint) HasSum() bool {
return ms.orig.Sum_ != nil
}
// SetSum replaces the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: v}
}
// RemoveSum removes the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveSum() {
ms.state.AssertMutable()
ms.orig.Sum_ = nil
}
// Scale returns the scale associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Scale() int32 {
return ms.orig.Scale
}
// SetScale replaces the scale associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetScale(v int32) {
ms.state.AssertMutable()
ms.orig.Scale = v
}
// ZeroCount returns the zerocount associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) ZeroCount() uint64 {
return ms.orig.ZeroCount
}
// SetZeroCount replaces the zerocount associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetZeroCount(v uint64) {
ms.state.AssertMutable()
ms.orig.ZeroCount = v
}
// Positive returns the positive associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Positive() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(&ms.orig.Positive, ms.state)
}
// Negative returns the negative associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Negative() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(&ms.orig.Negative, ms.state)
}
// Flags returns the flags associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Min returns the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Min() float64 {
return ms.orig.GetMin()
}
// HasMin returns true if the ExponentialHistogramDataPoint contains a
// Min value otherwise.
func (ms ExponentialHistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
// SetMin replaces the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveMin() {
ms.state.AssertMutable()
ms.orig.Min_ = nil
}
// Max returns the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Max() float64 {
return ms.orig.GetMax()
}
// HasMax returns true if the ExponentialHistogramDataPoint contains a
// Max value otherwise.
func (ms ExponentialHistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
// SetMax replaces the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveMax() {
ms.state.AssertMutable()
ms.orig.Max_ = nil
}
// ZeroThreshold returns the zerothreshold associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) ZeroThreshold() float64 {
return ms.orig.ZeroThreshold
}
// SetZeroThreshold replaces the zerothreshold associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) {
ms.state.AssertMutable()
ms.orig.ZeroThreshold = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) {
dest.state.AssertMutable()
internal.CopyExponentialHistogramDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPointBuckets function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointBuckets struct {
orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets
state *internal.State
}
func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, state *internal.State) ExponentialHistogramDataPointBuckets {
return ExponentialHistogramDataPointBuckets{orig: orig, state: state}
}
// NewExponentialHistogramDataPointBuckets creates a new empty ExponentialHistogramDataPointBuckets.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(internal.NewExponentialHistogramDataPoint_Buckets(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramDataPointBuckets) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogramDataPoint_Buckets(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Offset returns the offset associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) Offset() int32 {
return ms.orig.Offset
}
// SetOffset replaces the offset associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) {
ms.state.AssertMutable()
ms.orig.Offset = v
}
// BucketCounts returns the BucketCounts associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) {
dest.state.AssertMutable()
internal.CopyExponentialHistogramDataPoint_Buckets(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointSlice struct {
orig *[]*otlpmetrics.ExponentialHistogramDataPoint
state *internal.State
}
func newExponentialHistogramDataPointSlice(orig *[]*otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice {
return ExponentialHistogramDataPointSlice{orig: orig, state: state}
}
// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice {
orig := []*otlpmetrics.ExponentialHistogramDataPoint(nil)
return newExponentialHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewExponentialHistogramDataPointSlice()".
func (es ExponentialHistogramDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ExponentialHistogramDataPointSlice) At(i int) ExponentialHistogramDataPoint {
return newExponentialHistogramDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ExponentialHistogramDataPointSlice) All() iter.Seq2[int, ExponentialHistogramDataPoint] {
return func(yield func(int, ExponentialHistogramDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ExponentialHistogramDataPointSlice can be initialized:
//
// es := NewExponentialHistogramDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.ExponentialHistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ExponentialHistogramDataPoint.
// It returns the newly added ExponentialHistogramDataPoint.
func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewExponentialHistogramDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ExponentialHistogramDataPointSlice) MoveAndAppendTo(dest ExponentialHistogramDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogramDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteExponentialHistogramDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyExponentialHistogramDataPointSlice(*dest.orig, *es.orig)
}
// Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the
// provided less function so that two instances of ExponentialHistogramDataPointSlice
// can be compared.
func (es ExponentialHistogramDataPointSlice) Sort(less func(a, b ExponentialHistogramDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewGauge function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Gauge struct {
orig *otlpmetrics.Gauge
state *internal.State
}
func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge {
return Gauge{orig: orig, state: state}
}
// NewGauge creates a new empty Gauge.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewGauge() Gauge {
return newGauge(internal.NewGauge(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Gauge) MoveTo(dest Gauge) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteGauge(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Gauge.
func (ms Gauge) DataPoints() NumberDataPointSlice {
return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Gauge) CopyTo(dest Gauge) {
dest.state.AssertMutable()
internal.CopyGauge(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Histogram struct {
orig *otlpmetrics.Histogram
state *internal.State
}
func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram {
return Histogram{orig: orig, state: state}
}
// NewHistogram creates a new empty Histogram.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogram() Histogram {
return newHistogram(internal.NewHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Histogram) MoveTo(dest Histogram) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteHistogram(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Histogram.
func (ms Histogram) DataPoints() HistogramDataPointSlice {
return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Histogram.
func (ms Histogram) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram.
func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Histogram) CopyTo(dest Histogram) {
dest.state.AssertMutable()
internal.CopyHistogram(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPoint struct {
orig *otlpmetrics.HistogramDataPoint
state *internal.State
}
func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPoint {
return HistogramDataPoint{orig: orig, state: state}
}
// NewHistogramDataPoint creates a new empty HistogramDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogramDataPoint() HistogramDataPoint {
return newHistogramDataPoint(internal.NewHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteHistogramDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Sum() float64 {
return ms.orig.GetSum()
}
// HasSum returns true if the HistogramDataPoint contains a
// Sum value otherwise.
func (ms HistogramDataPoint) HasSum() bool {
return ms.orig.Sum_ != nil
}
// SetSum replaces the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: v}
}
// RemoveSum removes the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveSum() {
ms.state.AssertMutable()
ms.orig.Sum_ = nil
}
// BucketCounts returns the BucketCounts associated with this HistogramDataPoint.
func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
// ExplicitBounds returns the ExplicitBounds associated with this HistogramDataPoint.
func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice {
return pcommon.Float64Slice(internal.NewFloat64SliceWrapper(&ms.orig.ExplicitBounds, ms.state))
}
// Exemplars returns the Exemplars associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Flags returns the flags associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// Min returns the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Min() float64 {
return ms.orig.GetMin()
}
// HasMin returns true if the HistogramDataPoint contains a
// Min value otherwise.
func (ms HistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
// SetMin replaces the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveMin() {
ms.state.AssertMutable()
ms.orig.Min_ = nil
}
// Max returns the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Max() float64 {
return ms.orig.GetMax()
}
// HasMax returns true if the HistogramDataPoint contains a
// Max value otherwise.
func (ms HistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
// SetMax replaces the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveMax() {
ms.state.AssertMutable()
ms.orig.Max_ = nil
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) {
dest.state.AssertMutable()
internal.CopyHistogramDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// HistogramDataPointSlice logically represents a slice of HistogramDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPointSlice struct {
orig *[]*otlpmetrics.HistogramDataPoint
state *internal.State
}
func newHistogramDataPointSlice(orig *[]*otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPointSlice {
return HistogramDataPointSlice{orig: orig, state: state}
}
// NewHistogramDataPointSlice creates a HistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewHistogramDataPointSlice() HistogramDataPointSlice {
orig := []*otlpmetrics.HistogramDataPoint(nil)
return newHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewHistogramDataPointSlice()".
func (es HistogramDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es HistogramDataPointSlice) At(i int) HistogramDataPoint {
return newHistogramDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es HistogramDataPointSlice) All() iter.Seq2[int, HistogramDataPoint] {
return func(yield func(int, HistogramDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new HistogramDataPointSlice can be initialized:
//
// es := NewHistogramDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es HistogramDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.HistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty HistogramDataPoint.
// It returns the newly added HistogramDataPoint.
func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewHistogramDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es HistogramDataPointSlice) MoveAndAppendTo(dest HistogramDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteHistogramDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyHistogramDataPointSlice(*dest.orig, *es.orig)
}
// Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the
// provided less function so that two instances of HistogramDataPointSlice
// can be compared.
func (es HistogramDataPointSlice) Sort(less func(a, b HistogramDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Metric represents one metric as a collection of datapoints.
// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMetric function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Metric struct {
orig *otlpmetrics.Metric
state *internal.State
}
func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric {
return Metric{orig: orig, state: state}
}
// NewMetric creates a new empty Metric.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMetric() Metric {
return newMetric(internal.NewMetric(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Metric) MoveTo(dest Metric) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteMetric(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Name returns the name associated with this Metric.
func (ms Metric) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this Metric.
func (ms Metric) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Description returns the description associated with this Metric.
func (ms Metric) Description() string {
return ms.orig.Description
}
// SetDescription replaces the description associated with this Metric.
func (ms Metric) SetDescription(v string) {
ms.state.AssertMutable()
ms.orig.Description = v
}
// Unit returns the unit associated with this Metric.
func (ms Metric) Unit() string {
return ms.orig.Unit
}
// SetUnit replaces the unit associated with this Metric.
func (ms Metric) SetUnit(v string) {
ms.state.AssertMutable()
ms.orig.Unit = v
}
// Type returns the type of the data for this Metric.
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Type() MetricType {
switch ms.orig.Data.(type) {
case *otlpmetrics.Metric_Gauge:
return MetricTypeGauge
case *otlpmetrics.Metric_Sum:
return MetricTypeSum
case *otlpmetrics.Metric_Histogram:
return MetricTypeHistogram
case *otlpmetrics.Metric_ExponentialHistogram:
return MetricTypeExponentialHistogram
case *otlpmetrics.Metric_Summary:
return MetricTypeSummary
}
return MetricTypeEmpty
}
// Gauge returns the gauge associated with this Metric.
//
// Calling this function when Type() != MetricTypeGauge returns an invalid
// zero-initialized instance of Gauge. Note that using such Gauge instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Gauge() Gauge {
v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Gauge)
if !ok {
return Gauge{}
}
return newGauge(v.Gauge, ms.state)
}
// SetEmptyGauge sets an empty gauge to this Metric.
//
// After this, Type() function will return MetricTypeGauge".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyGauge() Gauge {
ms.state.AssertMutable()
var ov *otlpmetrics.Metric_Gauge
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Gauge{}
} else {
ov = internal.ProtoPoolMetric_Gauge.Get().(*otlpmetrics.Metric_Gauge)
}
ov.Gauge = internal.NewGauge()
ms.orig.Data = ov
return newGauge(ov.Gauge, ms.state)
}
// Sum returns the sum associated with this Metric.
//
// Calling this function when Type() != MetricTypeSum returns an invalid
// zero-initialized instance of Sum. Note that using such Sum instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Sum() Sum {
v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Sum)
if !ok {
return Sum{}
}
return newSum(v.Sum, ms.state)
}
// SetEmptySum sets an empty sum to this Metric.
//
// After this, Type() function will return MetricTypeSum".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySum() Sum {
ms.state.AssertMutable()
var ov *otlpmetrics.Metric_Sum
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Sum{}
} else {
ov = internal.ProtoPoolMetric_Sum.Get().(*otlpmetrics.Metric_Sum)
}
ov.Sum = internal.NewSum()
ms.orig.Data = ov
return newSum(ov.Sum, ms.state)
}
// Histogram returns the histogram associated with this Metric.
//
// Calling this function when Type() != MetricTypeHistogram returns an invalid
// zero-initialized instance of Histogram. Note that using such Histogram instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Histogram() Histogram {
v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Histogram)
if !ok {
return Histogram{}
}
return newHistogram(v.Histogram, ms.state)
}
// SetEmptyHistogram sets an empty histogram to this Metric.
//
// After this, Type() function will return MetricTypeHistogram".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyHistogram() Histogram {
ms.state.AssertMutable()
var ov *otlpmetrics.Metric_Histogram
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Histogram{}
} else {
ov = internal.ProtoPoolMetric_Histogram.Get().(*otlpmetrics.Metric_Histogram)
}
ov.Histogram = internal.NewHistogram()
ms.orig.Data = ov
return newHistogram(ov.Histogram, ms.state)
}
// ExponentialHistogram returns the exponentialhistogram associated with this Metric.
//
// Calling this function when Type() != MetricTypeExponentialHistogram returns an invalid
// zero-initialized instance of ExponentialHistogram. Note that using such ExponentialHistogram instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) ExponentialHistogram() ExponentialHistogram {
v, ok := ms.orig.GetData().(*otlpmetrics.Metric_ExponentialHistogram)
if !ok {
return ExponentialHistogram{}
}
return newExponentialHistogram(v.ExponentialHistogram, ms.state)
}
// SetEmptyExponentialHistogram sets an empty exponentialhistogram to this Metric.
//
// After this, Type() function will return MetricTypeExponentialHistogram".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram {
ms.state.AssertMutable()
var ov *otlpmetrics.Metric_ExponentialHistogram
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_ExponentialHistogram{}
} else {
ov = internal.ProtoPoolMetric_ExponentialHistogram.Get().(*otlpmetrics.Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = internal.NewExponentialHistogram()
ms.orig.Data = ov
return newExponentialHistogram(ov.ExponentialHistogram, ms.state)
}
// Summary returns the summary associated with this Metric.
//
// Calling this function when Type() != MetricTypeSummary returns an invalid
// zero-initialized instance of Summary. Note that using such Summary instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Summary() Summary {
v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Summary)
if !ok {
return Summary{}
}
return newSummary(v.Summary, ms.state)
}
// SetEmptySummary sets an empty summary to this Metric.
//
// After this, Type() function will return MetricTypeSummary".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySummary() Summary {
ms.state.AssertMutable()
var ov *otlpmetrics.Metric_Summary
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.Metric_Summary{}
} else {
ov = internal.ProtoPoolMetric_Summary.Get().(*otlpmetrics.Metric_Summary)
}
ov.Summary = internal.NewSummary()
ms.orig.Data = ov
return newSummary(ov.Summary, ms.state)
}
// Metadata returns the Metadata associated with this Metric.
func (ms Metric) Metadata() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Metadata, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Metric) CopyTo(dest Metric) {
dest.state.AssertMutable()
internal.CopyMetric(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
)
// Metrics is the top-level struct that is propagated through the metrics pipeline.
// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Metrics internal.MetricsWrapper
func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *internal.State) Metrics {
return Metrics(internal.NewMetricsWrapper(orig, state))
}
// NewMetrics creates a new empty Metrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMetrics() Metrics {
return newMetrics(internal.NewExportMetricsServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Metrics) MoveTo(dest Metrics) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportMetricsServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceMetrics returns the ResourceMetrics associated with this Metrics.
func (ms Metrics) ResourceMetrics() ResourceMetricsSlice {
return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Metrics) CopyTo(dest Metrics) {
dest.getState().AssertMutable()
internal.CopyExportMetricsServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Metrics) getOrig() *otlpcollectormetrics.ExportMetricsServiceRequest {
return internal.GetMetricsOrig(internal.MetricsWrapper(ms))
}
func (ms Metrics) getState() *internal.State {
return internal.GetMetricsState(internal.MetricsWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// MetricSlice logically represents a slice of Metric.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewMetricSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type MetricSlice struct {
orig *[]*otlpmetrics.Metric
state *internal.State
}
func newMetricSlice(orig *[]*otlpmetrics.Metric, state *internal.State) MetricSlice {
return MetricSlice{orig: orig, state: state}
}
// NewMetricSlice creates a MetricSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewMetricSlice() MetricSlice {
orig := []*otlpmetrics.Metric(nil)
return newMetricSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewMetricSlice()".
func (es MetricSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es MetricSlice) At(i int) Metric {
return newMetric((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es MetricSlice) All() iter.Seq2[int, Metric] {
return func(yield func(int, Metric) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new MetricSlice can be initialized:
//
// es := NewMetricSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es MetricSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.Metric, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Metric.
// It returns the newly added Metric.
func (es MetricSlice) AppendEmpty() Metric {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewMetric())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es MetricSlice) RemoveIf(f func(Metric) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteMetric((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es MetricSlice) CopyTo(dest MetricSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyMetricSlice(*dest.orig, *es.orig)
}
// Sort sorts the Metric elements within MetricSlice given the
// provided less function so that two instances of MetricSlice
// can be compared.
func (es MetricSlice) Sort(less func(a, b Metric) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewNumberDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPoint struct {
orig *otlpmetrics.NumberDataPoint
state *internal.State
}
func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPoint {
return NumberDataPoint{orig: orig, state: state}
}
// NewNumberDataPoint creates a new empty NumberDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewNumberDataPoint() NumberDataPoint {
return newNumberDataPoint(internal.NewNumberDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteNumberDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this NumberDataPoint.
func (ms NumberDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ValueType returns the type of the value for this NumberDataPoint.
// Calling this function on zero-initialized NumberDataPoint will cause a panic.
func (ms NumberDataPoint) ValueType() NumberDataPointValueType {
switch ms.orig.Value.(type) {
case *otlpmetrics.NumberDataPoint_AsDouble:
return NumberDataPointValueTypeDouble
case *otlpmetrics.NumberDataPoint_AsInt:
return NumberDataPointValueTypeInt
}
return NumberDataPointValueTypeEmpty
}
// DoubleValue returns the double associated with this NumberDataPoint.
func (ms NumberDataPoint) DoubleValue() float64 {
return ms.orig.GetAsDouble()
}
// SetDoubleValue replaces the double associated with this NumberDataPoint.
func (ms NumberDataPoint) SetDoubleValue(v float64) {
ms.state.AssertMutable()
var ov *otlpmetrics.NumberDataPoint_AsDouble
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsDouble{}
} else {
ov = internal.ProtoPoolNumberDataPoint_AsDouble.Get().(*otlpmetrics.NumberDataPoint_AsDouble)
}
ov.AsDouble = v
ms.orig.Value = ov
}
// IntValue returns the int associated with this NumberDataPoint.
func (ms NumberDataPoint) IntValue() int64 {
return ms.orig.GetAsInt()
}
// SetIntValue replaces the int associated with this NumberDataPoint.
func (ms NumberDataPoint) SetIntValue(v int64) {
ms.state.AssertMutable()
var ov *otlpmetrics.NumberDataPoint_AsInt
if !internal.UseProtoPooling.IsEnabled() {
ov = &otlpmetrics.NumberDataPoint_AsInt{}
} else {
ov = internal.ProtoPoolNumberDataPoint_AsInt.Get().(*otlpmetrics.NumberDataPoint_AsInt)
}
ov.AsInt = v
ms.orig.Value = ov
}
// Exemplars returns the Exemplars associated with this NumberDataPoint.
func (ms NumberDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Flags returns the flags associated with this NumberDataPoint.
func (ms NumberDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this NumberDataPoint.
func (ms NumberDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) {
dest.state.AssertMutable()
internal.CopyNumberDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// NumberDataPointSlice logically represents a slice of NumberDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewNumberDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPointSlice struct {
orig *[]*otlpmetrics.NumberDataPoint
state *internal.State
}
func newNumberDataPointSlice(orig *[]*otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPointSlice {
return NumberDataPointSlice{orig: orig, state: state}
}
// NewNumberDataPointSlice creates a NumberDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewNumberDataPointSlice() NumberDataPointSlice {
orig := []*otlpmetrics.NumberDataPoint(nil)
return newNumberDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewNumberDataPointSlice()".
func (es NumberDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es NumberDataPointSlice) At(i int) NumberDataPoint {
return newNumberDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es NumberDataPointSlice) All() iter.Seq2[int, NumberDataPoint] {
return func(yield func(int, NumberDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new NumberDataPointSlice can be initialized:
//
// es := NewNumberDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es NumberDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.NumberDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty NumberDataPoint.
// It returns the newly added NumberDataPoint.
func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewNumberDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es NumberDataPointSlice) MoveAndAppendTo(dest NumberDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteNumberDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyNumberDataPointSlice(*dest.orig, *es.orig)
}
// Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the
// provided less function so that two instances of NumberDataPointSlice
// can be compared.
func (es NumberDataPointSlice) Sort(less func(a, b NumberDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceMetrics is a collection of metrics from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetrics struct {
orig *otlpmetrics.ResourceMetrics
state *internal.State
}
func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetrics {
return ResourceMetrics{orig: orig, state: state}
}
// NewResourceMetrics creates a new empty ResourceMetrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceMetrics() ResourceMetrics {
return newResourceMetrics(internal.NewResourceMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceMetrics(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceMetrics.
func (ms ResourceMetrics) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics.
func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice {
return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceMetrics.
func (ms ResourceMetrics) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceMetrics.
func (ms ResourceMetrics) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) {
dest.state.AssertMutable()
internal.CopyResourceMetrics(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ResourceMetricsSlice logically represents a slice of ResourceMetrics.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetricsSlice struct {
orig *[]*otlpmetrics.ResourceMetrics
state *internal.State
}
func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetricsSlice {
return ResourceMetricsSlice{orig: orig, state: state}
}
// NewResourceMetricsSlice creates a ResourceMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceMetricsSlice() ResourceMetricsSlice {
orig := []*otlpmetrics.ResourceMetrics(nil)
return newResourceMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceMetricsSlice()".
func (es ResourceMetricsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceMetricsSlice) At(i int) ResourceMetrics {
return newResourceMetrics((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceMetricsSlice) All() iter.Seq2[int, ResourceMetrics] {
return func(yield func(int, ResourceMetrics) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceMetricsSlice can be initialized:
//
// es := NewResourceMetricsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceMetricsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.ResourceMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceMetrics.
// It returns the newly added ResourceMetrics.
func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceMetrics())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceMetrics((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceMetricsSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the
// provided less function so that two instances of ResourceMetricsSlice
// can be compared.
func (es ResourceMetricsSlice) Sort(less func(a, b ResourceMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeMetrics is a collection of metrics from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetrics struct {
orig *otlpmetrics.ScopeMetrics
state *internal.State
}
func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetrics {
return ScopeMetrics{orig: orig, state: state}
}
// NewScopeMetrics creates a new empty ScopeMetrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeMetrics() ScopeMetrics {
return newScopeMetrics(internal.NewScopeMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeMetrics(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeMetrics.
func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Metrics returns the Metrics associated with this ScopeMetrics.
func (ms ScopeMetrics) Metrics() MetricSlice {
return newMetricSlice(&ms.orig.Metrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeMetrics.
func (ms ScopeMetrics) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeMetrics.
func (ms ScopeMetrics) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) {
dest.state.AssertMutable()
internal.CopyScopeMetrics(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// ScopeMetricsSlice logically represents a slice of ScopeMetrics.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetricsSlice struct {
orig *[]*otlpmetrics.ScopeMetrics
state *internal.State
}
func newScopeMetricsSlice(orig *[]*otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetricsSlice {
return ScopeMetricsSlice{orig: orig, state: state}
}
// NewScopeMetricsSlice creates a ScopeMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeMetricsSlice() ScopeMetricsSlice {
orig := []*otlpmetrics.ScopeMetrics(nil)
return newScopeMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeMetricsSlice()".
func (es ScopeMetricsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeMetricsSlice) At(i int) ScopeMetrics {
return newScopeMetrics((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeMetricsSlice) All() iter.Seq2[int, ScopeMetrics] {
return func(yield func(int, ScopeMetrics) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeMetricsSlice can be initialized:
//
// es := NewScopeMetricsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeMetricsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.ScopeMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeMetrics.
// It returns the newly added ScopeMetrics.
func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeMetrics())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeMetricsSlice) MoveAndAppendTo(dest ScopeMetricsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeMetrics((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeMetricsSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the
// provided less function so that two instances of ScopeMetricsSlice
// can be compared.
func (es ScopeMetricsSlice) Sort(less func(a, b ScopeMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSum function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Sum struct {
orig *otlpmetrics.Sum
state *internal.State
}
func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum {
return Sum{orig: orig, state: state}
}
// NewSum creates a new empty Sum.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSum() Sum {
return newSum(internal.NewSum(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Sum) MoveTo(dest Sum) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSum(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Sum.
func (ms Sum) DataPoints() NumberDataPointSlice {
return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Sum.
func (ms Sum) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this Sum.
func (ms Sum) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v)
}
// IsMonotonic returns the ismonotonic associated with this Sum.
func (ms Sum) IsMonotonic() bool {
return ms.orig.IsMonotonic
}
// SetIsMonotonic replaces the ismonotonic associated with this Sum.
func (ms Sum) SetIsMonotonic(v bool) {
ms.state.AssertMutable()
ms.orig.IsMonotonic = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Sum) CopyTo(dest Sum) {
dest.state.AssertMutable()
internal.CopySum(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummary function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Summary struct {
orig *otlpmetrics.Summary
state *internal.State
}
func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary {
return Summary{orig: orig, state: state}
}
// NewSummary creates a new empty Summary.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummary() Summary {
return newSummary(internal.NewSummary(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Summary) MoveTo(dest Summary) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummary(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Summary.
func (ms Summary) DataPoints() SummaryDataPointSlice {
return newSummaryDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Summary) CopyTo(dest Summary) {
dest.state.AssertMutable()
internal.CopySummary(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummaryDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPoint struct {
orig *otlpmetrics.SummaryDataPoint
state *internal.State
}
func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPoint {
return SummaryDataPoint{orig: orig, state: state}
}
// NewSummaryDataPoint creates a new empty SummaryDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPoint() SummaryDataPoint {
return newSummaryDataPoint(internal.NewSummaryDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummaryDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Sum() float64 {
return ms.orig.Sum
}
// SetSum replaces the sum associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum = v
}
// QuantileValues returns the QuantileValues associated with this SummaryDataPoint.
func (ms SummaryDataPoint) QuantileValues() SummaryDataPointValueAtQuantileSlice {
return newSummaryDataPointValueAtQuantileSlice(&ms.orig.QuantileValues, ms.state)
}
// Flags returns the flags associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) {
dest.state.AssertMutable()
internal.CopySummaryDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointSlice logically represents a slice of SummaryDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSummaryDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointSlice struct {
orig *[]*otlpmetrics.SummaryDataPoint
state *internal.State
}
func newSummaryDataPointSlice(orig *[]*otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPointSlice {
return SummaryDataPointSlice{orig: orig, state: state}
}
// NewSummaryDataPointSlice creates a SummaryDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointSlice() SummaryDataPointSlice {
orig := []*otlpmetrics.SummaryDataPoint(nil)
return newSummaryDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSummaryDataPointSlice()".
func (es SummaryDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SummaryDataPointSlice) At(i int) SummaryDataPoint {
return newSummaryDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SummaryDataPointSlice) All() iter.Seq2[int, SummaryDataPoint] {
return func(yield func(int, SummaryDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SummaryDataPointSlice can be initialized:
//
// es := NewSummaryDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SummaryDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.SummaryDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SummaryDataPoint.
// It returns the newly added SummaryDataPoint.
func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSummaryDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SummaryDataPointSlice) MoveAndAppendTo(dest SummaryDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSummaryDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySummaryDataPointSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the
// provided less function so that two instances of SummaryDataPointSlice
// can be compared.
func (es SummaryDataPointSlice) Sort(less func(a, b SummaryDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummaryDataPointValueAtQuantile function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantile struct {
orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile
state *internal.State
}
func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile {
return SummaryDataPointValueAtQuantile{orig: orig, state: state}
}
// NewSummaryDataPointValueAtQuantile creates a new empty SummaryDataPointValueAtQuantile.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile {
return newSummaryDataPointValueAtQuantile(internal.NewSummaryDataPoint_ValueAtQuantile(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQuantile) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummaryDataPoint_ValueAtQuantile(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Quantile returns the quantile associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) Quantile() float64 {
return ms.orig.Quantile
}
// SetQuantile replaces the quantile associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) SetQuantile(v float64) {
ms.state.AssertMutable()
ms.orig.Quantile = v
}
// Value returns the value associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) Value() float64 {
return ms.orig.Value
}
// SetValue replaces the value associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) {
ms.state.AssertMutable()
ms.orig.Value = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) {
dest.state.AssertMutable()
internal.CopySummaryDataPoint_ValueAtQuantile(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
// SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantileSlice struct {
orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile
state *internal.State
}
func newSummaryDataPointValueAtQuantileSlice(orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice {
return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state}
}
// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice {
orig := []*otlpmetrics.SummaryDataPoint_ValueAtQuantile(nil)
return newSummaryDataPointValueAtQuantileSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSummaryDataPointValueAtQuantileSlice()".
func (es SummaryDataPointValueAtQuantileSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SummaryDataPointValueAtQuantileSlice) At(i int) SummaryDataPointValueAtQuantile {
return newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SummaryDataPointValueAtQuantileSlice) All() iter.Seq2[int, SummaryDataPointValueAtQuantile] {
return func(yield func(int, SummaryDataPointValueAtQuantile) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SummaryDataPointValueAtQuantileSlice can be initialized:
//
// es := NewSummaryDataPointValueAtQuantileSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SummaryDataPointValueAtQuantile.
// It returns the newly added SummaryDataPointValueAtQuantile.
func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSummaryDataPoint_ValueAtQuantile())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SummaryDataPointValueAtQuantileSlice) MoveAndAppendTo(dest SummaryDataPointValueAtQuantileSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointValueAtQuantile) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSummaryDataPoint_ValueAtQuantile((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValueAtQuantileSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySummaryDataPoint_ValueAtQuantileSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the
// provided less function so that two instances of SummaryDataPointValueAtQuantileSlice
// can be compared.
func (es SummaryDataPointValueAtQuantileSlice) Sort(less func(a, b SummaryDataPointValueAtQuantile) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
var _ Marshaler = (*JSONMarshaler)(nil)
// JSONMarshaler marshals Metrics to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalMetrics to the OTLP/JSON format.
func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportMetricsServiceRequest(md.getOrig(), dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Metrics.
type JSONUnmarshaler struct{}
// UnmarshalMetrics from OTLP/JSON format into Metrics.
func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
md := NewMetrics()
internal.UnmarshalJSONExportMetricsServiceRequest(md.getOrig(), iter)
if iter.Error() != nil {
return Metrics{}, iter.Error()
}
otlp.MigrateMetrics(md.getOrig().ResourceMetrics)
return md, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
const noRecordValueMask = uint32(1)
var DefaultDataPointFlags = DataPointFlags(0)
// DataPointFlags defines how a metric aggregator reports aggregated values.
// It describes how those values relate to the time interval over which they are aggregated.
type DataPointFlags uint32
// NoRecordedValue returns true if the DataPointFlags contains the NoRecordedValue flag.
func (ms DataPointFlags) NoRecordedValue() bool {
return uint32(ms)&noRecordValueMask != 0
}
// WithNoRecordedValue returns a new DataPointFlags, with the NoRecordedValue flag set to the given value.
func (ms DataPointFlags) WithNoRecordedValue(b bool) DataPointFlags {
orig := uint32(ms)
if b {
orig |= noRecordValueMask
} else {
orig &^= noRecordValueMask
}
return DataPointFlags(orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// MetricType specifies the type of data in a Metric.
type MetricType int32
const (
// MetricTypeEmpty means that metric type is unset.
MetricTypeEmpty MetricType = iota
MetricTypeGauge
MetricTypeSum
MetricTypeHistogram
MetricTypeExponentialHistogram
MetricTypeSummary
)
// String returns the string representation of the MetricType.
func (mdt MetricType) String() string {
switch mdt {
case MetricTypeEmpty:
return "Empty"
case MetricTypeGauge:
return "Gauge"
case MetricTypeSum:
return "Sum"
case MetricTypeHistogram:
return "Histogram"
case MetricTypeExponentialHistogram:
return "ExponentialHistogram"
case MetricTypeSummary:
return "Summary"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it.
func (ms Metrics) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Metrics instance is read-only.
func (ms Metrics) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// MetricCount calculates the total number of metrics.
func (ms Metrics) MetricCount() int {
metricCount := 0
rms := ms.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
metricCount += ilm.Metrics().Len()
}
}
return metricCount
}
// DataPointCount calculates the total number of data points.
func (ms Metrics) DataPointCount() (dataPointCount int) {
rms := ms.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
ms := ilm.Metrics()
for k := 0; k < ms.Len(); k++ {
m := ms.At(k)
switch m.Type() {
case MetricTypeGauge:
dataPointCount += m.Gauge().DataPoints().Len()
case MetricTypeSum:
dataPointCount += m.Sum().DataPoints().Len()
case MetricTypeHistogram:
dataPointCount += m.Histogram().DataPoints().Len()
case MetricTypeExponentialHistogram:
dataPointCount += m.ExponentialHistogram().DataPoints().Len()
case MetricTypeSummary:
dataPointCount += m.Summary().DataPoints().Len()
}
}
}
}
return dataPointCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// NumberDataPointValueType specifies the type of NumberDataPoint value.
type NumberDataPointValueType int32
const (
// NumberDataPointValueTypeEmpty means that data point value is unset.
NumberDataPointValueTypeEmpty NumberDataPointValueType = iota
NumberDataPointValueTypeInt
NumberDataPointValueTypeDouble
)
// String returns the string representation of the NumberDataPointValueType.
func (nt NumberDataPointValueType) String() string {
switch nt {
case NumberDataPointValueTypeEmpty:
return "Empty"
case NumberDataPointValueTypeInt:
return "Int"
case NumberDataPointValueTypeDouble:
return "Double"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
size := internal.SizeProtoExportMetricsServiceRequest(md.getOrig())
buf := make([]byte, size)
_ = internal.MarshalProtoExportMetricsServiceRequest(md.getOrig(), buf)
return buf, nil
}
func (e *ProtoMarshaler) MetricsSize(md Metrics) int {
return internal.SizeProtoExportMetricsServiceRequest(md.getOrig())
}
func (e *ProtoMarshaler) ResourceMetricsSize(md ResourceMetrics) int {
return internal.SizeProtoResourceMetrics(md.orig)
}
func (e *ProtoMarshaler) ScopeMetricsSize(md ScopeMetrics) int {
return internal.SizeProtoScopeMetrics(md.orig)
}
func (e *ProtoMarshaler) MetricSize(md Metric) int {
return internal.SizeProtoMetric(md.orig)
}
func (e *ProtoMarshaler) NumberDataPointSize(md NumberDataPoint) int {
return internal.SizeProtoNumberDataPoint(md.orig)
}
func (e *ProtoMarshaler) SummaryDataPointSize(md SummaryDataPoint) int {
return internal.SizeProtoSummaryDataPoint(md.orig)
}
func (e *ProtoMarshaler) HistogramDataPointSize(md HistogramDataPoint) int {
return internal.SizeProtoHistogramDataPoint(md.orig)
}
func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(md ExponentialHistogramDataPoint) int {
return internal.SizeProtoExponentialHistogramDataPoint(md.orig)
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
md := NewMetrics()
err := internal.UnmarshalProtoExportMetricsServiceRequest(md.getOrig(), buf)
if err != nil {
return Metrics{}, err
}
return md, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetricotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *otlpcollectormetrics.ExportMetricsPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportMetricsPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportMetricsPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedDataPoints returns the rejecteddatapoints associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedDataPoints() int64 {
return ms.orig.RejectedDataPoints
}
// SetRejectedDataPoints replaces the rejecteddatapoints associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedDataPoints(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedDataPoints = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportMetricsPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetricotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *otlpcollectormetrics.ExportMetricsServiceResponse
state *internal.State
}
func newExportResponse(orig *otlpcollectormetrics.ExportMetricsServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportMetricsServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportMetricsServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportMetricsServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
_ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded.
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Metrics service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export pmetric.Metrics to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)}
}
type grpcClient struct {
rawClient otlpcollectormetrics.MetricsServiceClient
}
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC MetricsService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv})
}
type rawMetricsServer struct {
srv GRPCServer
}
func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) {
otlp.MigrateMetrics(request.ResourceMetrics)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/pmetric"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for pmetric.Metrics data.
type ExportRequest struct {
orig *otlpcollectormetrics.ExportMetricsServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &otlpcollectormetrics.ExportMetricsServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromMetrics returns a ExportRequest from pmetric.Metrics.
// Because ExportRequest is a wrapper for pmetric.Metrics,
// any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest {
return ExportRequest{
orig: internal.GetMetricsOrig(internal.MetricsWrapper(md)),
state: internal.GetMetricsState(internal.MetricsWrapper(md)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportMetricsServiceRequest(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportMetricsServiceRequest(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := internal.UnmarshalProtoExportMetricsServiceRequest(ms.orig, data)
if err != nil {
return err
}
otlp.MigrateMetrics(ms.orig.ResourceMetrics)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportMetricsServiceRequest(ms.orig, dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportMetricsServiceRequest(ms.orig, iter)
return iter.Error()
}
func (ms ExportRequest) Metrics() pmetric.Metrics {
return pmetric.Metrics(internal.NewMetricsWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportMetricsServiceResponse(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportMetricsServiceResponse(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return internal.UnmarshalProtoExportMetricsServiceResponse(ms.orig, data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportMetricsServiceResponse(ms.orig, dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportMetricsServiceResponse(ms.orig, iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// AggregationTemporality specifies the method of aggregating metric values,
// either DELTA (change since last report) or CUMULATIVE (total since a fixed
// start time).
type AggregationTemporality int32
const (
// AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
AggregationTemporalityUnspecified = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
// AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
AggregationTemporalityDelta = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
// AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
AggregationTemporalityCumulative = AggregationTemporality(otlpprofiles.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
)
// String returns the string representation of the AggregationTemporality.
func (at AggregationTemporality) String() string {
switch at {
case AggregationTemporalityUnspecified:
return "Unspecified"
case AggregationTemporalityDelta:
return "Delta"
case AggregationTemporalityCumulative:
return "Cumulative"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"fmt"
"math"
"go.opentelemetry.io/collector/pdata/pcommon"
)
type attributable interface {
AttributeIndices() pcommon.Int32Slice
}
// FromAttributeIndices builds a [pcommon.Map] containing the attributes of a
// record.
// The record can be any struct that implements an `AttributeIndices` method.
// Updates made to the return map will not be applied back to the record.
func FromAttributeIndices(table KeyValueAndUnitSlice, record attributable, dic ProfilesDictionary) pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(record.AttributeIndices().Len())
for i := 0; i < record.AttributeIndices().Len(); i++ {
kv := table.At(int(record.AttributeIndices().At(i)))
key := dic.StringTable().At(int(kv.KeyStrindex()))
kv.Value().CopyTo(m.PutEmpty(key))
}
return m
}
var errTooManyAttributeTableEntries = errors.New("too many entries in AttributeTable")
// PutAttribute updates an AttributeTable and a record's AttributeIndices to
// add or update an attribute.
// The assumption is that attributes are a map as for other signals (metrics, logs, etc.), thus
// the same key must not appear twice in a list of attributes / attribute indices.
// The record can be any struct that implements an `AttributeIndices` method.
//
// Deprecated: [v0.138.0] use SetAttribute instead.
func PutAttribute(table KeyValueAndUnitSlice, record attributable, dic ProfilesDictionary, key string, value pcommon.Value) error {
for i := range record.AttributeIndices().Len() {
idx := int(record.AttributeIndices().At(i))
if idx < 0 || idx >= table.Len() {
return fmt.Errorf("index value %d out of range in AttributeIndices[%d]", idx, i)
}
attr := table.At(idx)
if dic.StringTable().At(int(attr.KeyStrindex())) == key {
if attr.Value().Equal(value) {
// Attribute already exists, nothing to do.
return nil
}
// If the attribute table already contains the key/value pair, just update the index.
for j := range table.Len() {
a := table.At(j)
if dic.StringTable().At(int(a.KeyStrindex())) == key && a.Value().Equal(value) {
if j > math.MaxInt32 {
return errTooManyAttributeTableEntries
}
record.AttributeIndices().SetAt(i, int32(j)) //nolint:gosec // overflow checked
return nil
}
}
if table.Len() >= math.MaxInt32 {
return errTooManyAttributeTableEntries
}
// Find the key in the StringTable, or add it
keyID, err := SetString(dic.StringTable(), key)
if err != nil {
return err
}
// Add the key/value pair as a new attribute to the table...
entry := table.AppendEmpty()
entry.SetKeyStrindex(keyID)
value.CopyTo(entry.Value())
// ...and update the existing index.
record.AttributeIndices().SetAt(i, int32(table.Len()-1)) //nolint:gosec // overflow checked
return nil
}
}
if record.AttributeIndices().Len() >= math.MaxInt32 {
return errors.New("too many entries in AttributeIndices")
}
for j := range table.Len() {
a := table.At(j)
if dic.StringTable().At(int(a.KeyStrindex())) == key && a.Value().Equal(value) {
if j > math.MaxInt32 {
return errTooManyAttributeTableEntries
}
// Add the index of the existing attribute to the indices.
record.AttributeIndices().Append(int32(j)) //nolint:gosec // overflow checked
return nil
}
}
if table.Len() >= math.MaxInt32 {
return errTooManyAttributeTableEntries
}
// Find the key in the StringTable, or add it
keyID, err := SetString(dic.StringTable(), key)
if err != nil {
return err
}
// Add the key/value pair as a new attribute to the table...
entry := table.AppendEmpty()
entry.SetKeyStrindex(keyID)
value.CopyTo(entry.Value())
// ...and add a new index to the indices.
record.AttributeIndices().Append(int32(table.Len() - 1)) //nolint:gosec // overflow checked
return nil
}
// SetAttribute updates an AttributeTable, adding or providing a value and
// returns its index.
func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, error) {
for j, a := range table.All() {
if a.Equal(attr) {
if j > math.MaxInt32 {
return 0, errTooManyAttributeTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyAttributeTableEntries
}
attr.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Function
func (fn Function) Equal(val Function) bool {
return fn.NameStrindex() == val.NameStrindex() &&
fn.SystemNameStrindex() == val.SystemNameStrindex() &&
fn.FilenameStrindex() == val.FilenameStrindex() &&
fn.StartLine() == val.StartLine()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyFunctionTableEntries = errors.New("too many entries in FunctionTable")
// SetFunction updates a FunctionTable, adding or providing a value and returns
// its index.
func SetFunction(table FunctionSlice, fn Function) (int32, error) {
for j, m := range table.All() {
if m.Equal(fn) {
if j > math.MaxInt32 {
return 0, errTooManyFunctionTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyFunctionTableEntries
}
fn.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewFunction function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Function struct {
orig *otlpprofiles.Function
state *internal.State
}
func newFunction(orig *otlpprofiles.Function, state *internal.State) Function {
return Function{orig: orig, state: state}
}
// NewFunction creates a new empty Function.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewFunction() Function {
return newFunction(internal.NewFunction(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Function) MoveTo(dest Function) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteFunction(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// NameStrindex returns the namestrindex associated with this Function.
func (ms Function) NameStrindex() int32 {
return ms.orig.NameStrindex
}
// SetNameStrindex replaces the namestrindex associated with this Function.
func (ms Function) SetNameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.NameStrindex = v
}
// SystemNameStrindex returns the systemnamestrindex associated with this Function.
func (ms Function) SystemNameStrindex() int32 {
return ms.orig.SystemNameStrindex
}
// SetSystemNameStrindex replaces the systemnamestrindex associated with this Function.
func (ms Function) SetSystemNameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.SystemNameStrindex = v
}
// FilenameStrindex returns the filenamestrindex associated with this Function.
func (ms Function) FilenameStrindex() int32 {
return ms.orig.FilenameStrindex
}
// SetFilenameStrindex replaces the filenamestrindex associated with this Function.
func (ms Function) SetFilenameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.FilenameStrindex = v
}
// StartLine returns the startline associated with this Function.
func (ms Function) StartLine() int64 {
return ms.orig.StartLine
}
// SetStartLine replaces the startline associated with this Function.
func (ms Function) SetStartLine(v int64) {
ms.state.AssertMutable()
ms.orig.StartLine = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Function) CopyTo(dest Function) {
dest.state.AssertMutable()
internal.CopyFunction(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// FunctionSlice logically represents a slice of Function.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewFunctionSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type FunctionSlice struct {
orig *[]*otlpprofiles.Function
state *internal.State
}
func newFunctionSlice(orig *[]*otlpprofiles.Function, state *internal.State) FunctionSlice {
return FunctionSlice{orig: orig, state: state}
}
// NewFunctionSlice creates a FunctionSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewFunctionSlice() FunctionSlice {
orig := []*otlpprofiles.Function(nil)
return newFunctionSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewFunctionSlice()".
func (es FunctionSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es FunctionSlice) At(i int) Function {
return newFunction((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es FunctionSlice) All() iter.Seq2[int, Function] {
return func(yield func(int, Function) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new FunctionSlice can be initialized:
//
// es := NewFunctionSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es FunctionSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Function, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Function.
// It returns the newly added Function.
func (es FunctionSlice) AppendEmpty() Function {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewFunction())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es FunctionSlice) MoveAndAppendTo(dest FunctionSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es FunctionSlice) RemoveIf(f func(Function) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteFunction((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es FunctionSlice) CopyTo(dest FunctionSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyFunctionSlice(*dest.orig, *es.orig)
}
// Sort sorts the Function elements within FunctionSlice given the
// provided less function so that two instances of FunctionSlice
// can be compared.
func (es FunctionSlice) Sort(less func(a, b Function) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// KeyValueAndUnit represents a custom 'dictionary native'
// style of encoding attributes which is more convenient
// for profiles than opentelemetry.proto.common.v1.KeyValue.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewKeyValueAndUnit function to create new instances.
// Important: zero-initialized instance is not valid for use.
type KeyValueAndUnit struct {
orig *otlpprofiles.KeyValueAndUnit
state *internal.State
}
func newKeyValueAndUnit(orig *otlpprofiles.KeyValueAndUnit, state *internal.State) KeyValueAndUnit {
return KeyValueAndUnit{orig: orig, state: state}
}
// NewKeyValueAndUnit creates a new empty KeyValueAndUnit.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewKeyValueAndUnit() KeyValueAndUnit {
return newKeyValueAndUnit(internal.NewKeyValueAndUnit(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms KeyValueAndUnit) MoveTo(dest KeyValueAndUnit) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteKeyValueAndUnit(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// KeyStrindex returns the keystrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) KeyStrindex() int32 {
return ms.orig.KeyStrindex
}
// SetKeyStrindex replaces the keystrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) SetKeyStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.KeyStrindex = v
}
// Value returns the value associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) Value() pcommon.Value {
return pcommon.Value(internal.NewValueWrapper(&ms.orig.Value, ms.state))
}
// UnitStrindex returns the unitstrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) UnitStrindex() int32 {
return ms.orig.UnitStrindex
}
// SetUnitStrindex replaces the unitstrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) SetUnitStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.UnitStrindex = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms KeyValueAndUnit) CopyTo(dest KeyValueAndUnit) {
dest.state.AssertMutable()
internal.CopyKeyValueAndUnit(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// KeyValueAndUnitSlice logically represents a slice of KeyValueAndUnit.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewKeyValueAndUnitSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type KeyValueAndUnitSlice struct {
orig *[]*otlpprofiles.KeyValueAndUnit
state *internal.State
}
func newKeyValueAndUnitSlice(orig *[]*otlpprofiles.KeyValueAndUnit, state *internal.State) KeyValueAndUnitSlice {
return KeyValueAndUnitSlice{orig: orig, state: state}
}
// NewKeyValueAndUnitSlice creates a KeyValueAndUnitSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewKeyValueAndUnitSlice() KeyValueAndUnitSlice {
orig := []*otlpprofiles.KeyValueAndUnit(nil)
return newKeyValueAndUnitSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewKeyValueAndUnitSlice()".
func (es KeyValueAndUnitSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es KeyValueAndUnitSlice) At(i int) KeyValueAndUnit {
return newKeyValueAndUnit((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es KeyValueAndUnitSlice) All() iter.Seq2[int, KeyValueAndUnit] {
return func(yield func(int, KeyValueAndUnit) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new KeyValueAndUnitSlice can be initialized:
//
// es := NewKeyValueAndUnitSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es KeyValueAndUnitSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.KeyValueAndUnit, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty KeyValueAndUnit.
// It returns the newly added KeyValueAndUnit.
func (es KeyValueAndUnitSlice) AppendEmpty() KeyValueAndUnit {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewKeyValueAndUnit())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es KeyValueAndUnitSlice) MoveAndAppendTo(dest KeyValueAndUnitSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es KeyValueAndUnitSlice) RemoveIf(f func(KeyValueAndUnit) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteKeyValueAndUnit((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es KeyValueAndUnitSlice) CopyTo(dest KeyValueAndUnitSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyKeyValueAndUnitSlice(*dest.orig, *es.orig)
}
// Sort sorts the KeyValueAndUnit elements within KeyValueAndUnitSlice given the
// provided less function so that two instances of KeyValueAndUnitSlice
// can be compared.
func (es KeyValueAndUnitSlice) Sort(less func(a, b KeyValueAndUnit) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// Line details a specific line in a source code, linked to a function.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLine function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Line struct {
orig *otlpprofiles.Line
state *internal.State
}
func newLine(orig *otlpprofiles.Line, state *internal.State) Line {
return Line{orig: orig, state: state}
}
// NewLine creates a new empty Line.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLine() Line {
return newLine(internal.NewLine(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Line) MoveTo(dest Line) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLine(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// FunctionIndex returns the functionindex associated with this Line.
func (ms Line) FunctionIndex() int32 {
return ms.orig.FunctionIndex
}
// SetFunctionIndex replaces the functionindex associated with this Line.
func (ms Line) SetFunctionIndex(v int32) {
ms.state.AssertMutable()
ms.orig.FunctionIndex = v
}
// Line returns the line associated with this Line.
func (ms Line) Line() int64 {
return ms.orig.Line
}
// SetLine replaces the line associated with this Line.
func (ms Line) SetLine(v int64) {
ms.state.AssertMutable()
ms.orig.Line = v
}
// Column returns the column associated with this Line.
func (ms Line) Column() int64 {
return ms.orig.Column
}
// SetColumn replaces the column associated with this Line.
func (ms Line) SetColumn(v int64) {
ms.state.AssertMutable()
ms.orig.Column = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Line) CopyTo(dest Line) {
dest.state.AssertMutable()
internal.CopyLine(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// LineSlice logically represents a slice of Line.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLineSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LineSlice struct {
orig *[]*otlpprofiles.Line
state *internal.State
}
func newLineSlice(orig *[]*otlpprofiles.Line, state *internal.State) LineSlice {
return LineSlice{orig: orig, state: state}
}
// NewLineSlice creates a LineSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLineSlice() LineSlice {
orig := []*otlpprofiles.Line(nil)
return newLineSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLineSlice()".
func (es LineSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LineSlice) At(i int) Line {
return newLine((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LineSlice) All() iter.Seq2[int, Line] {
return func(yield func(int, Line) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LineSlice can be initialized:
//
// es := NewLineSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LineSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Line, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Line.
// It returns the newly added Line.
func (es LineSlice) AppendEmpty() Line {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLine())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LineSlice) MoveAndAppendTo(dest LineSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LineSlice) RemoveIf(f func(Line) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLine((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LineSlice) CopyTo(dest LineSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLineSlice(*dest.orig, *es.orig)
}
// Sort sorts the Line elements within LineSlice given the
// provided less function so that two instances of LineSlice
// can be compared.
func (es LineSlice) Sort(less func(a, b Line) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Link represents a pointer from a profile Sample to a trace Span.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Link struct {
orig *otlpprofiles.Link
state *internal.State
}
func newLink(orig *otlpprofiles.Link, state *internal.State) Link {
return Link{orig: orig, state: state}
}
// NewLink creates a new empty Link.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLink() Link {
return newLink(internal.NewLink(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Link) MoveTo(dest Link) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLink(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this Link.
func (ms Link) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Link.
func (ms Link) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = data.TraceID(v)
}
// SpanID returns the spanid associated with this Link.
func (ms Link) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Link.
func (ms Link) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = data.SpanID(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Link) CopyTo(dest Link) {
dest.state.AssertMutable()
internal.CopyLink(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// LinkSlice logically represents a slice of Link.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LinkSlice struct {
orig *[]*otlpprofiles.Link
state *internal.State
}
func newLinkSlice(orig *[]*otlpprofiles.Link, state *internal.State) LinkSlice {
return LinkSlice{orig: orig, state: state}
}
// NewLinkSlice creates a LinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLinkSlice() LinkSlice {
orig := []*otlpprofiles.Link(nil)
return newLinkSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLinkSlice()".
func (es LinkSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LinkSlice) At(i int) Link {
return newLink((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LinkSlice) All() iter.Seq2[int, Link] {
return func(yield func(int, Link) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LinkSlice can be initialized:
//
// es := NewLinkSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LinkSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Link, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Link.
// It returns the newly added Link.
func (es LinkSlice) AppendEmpty() Link {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLink())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LinkSlice) MoveAndAppendTo(dest LinkSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LinkSlice) RemoveIf(f func(Link) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLink((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LinkSlice) CopyTo(dest LinkSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLinkSlice(*dest.orig, *es.orig)
}
// Sort sorts the Link elements within LinkSlice given the
// provided less function so that two instances of LinkSlice
// can be compared.
func (es LinkSlice) Sort(less func(a, b Link) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Location describes function and line table debug information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLocation function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Location struct {
orig *otlpprofiles.Location
state *internal.State
}
func newLocation(orig *otlpprofiles.Location, state *internal.State) Location {
return Location{orig: orig, state: state}
}
// NewLocation creates a new empty Location.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLocation() Location {
return newLocation(internal.NewLocation(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Location) MoveTo(dest Location) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLocation(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MappingIndex returns the mappingindex associated with this Location.
func (ms Location) MappingIndex() int32 {
return ms.orig.MappingIndex
}
// SetMappingIndex replaces the mappingindex associated with this Location.
func (ms Location) SetMappingIndex(v int32) {
ms.state.AssertMutable()
ms.orig.MappingIndex = v
}
// Address returns the address associated with this Location.
func (ms Location) Address() uint64 {
return ms.orig.Address
}
// SetAddress replaces the address associated with this Location.
func (ms Location) SetAddress(v uint64) {
ms.state.AssertMutable()
ms.orig.Address = v
}
// Line returns the Line associated with this Location.
func (ms Location) Line() LineSlice {
return newLineSlice(&ms.orig.Line, ms.state)
}
// AttributeIndices returns the AttributeIndices associated with this Location.
func (ms Location) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Location) CopyTo(dest Location) {
dest.state.AssertMutable()
internal.CopyLocation(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// LocationSlice logically represents a slice of Location.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLocationSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LocationSlice struct {
orig *[]*otlpprofiles.Location
state *internal.State
}
func newLocationSlice(orig *[]*otlpprofiles.Location, state *internal.State) LocationSlice {
return LocationSlice{orig: orig, state: state}
}
// NewLocationSlice creates a LocationSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLocationSlice() LocationSlice {
orig := []*otlpprofiles.Location(nil)
return newLocationSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLocationSlice()".
func (es LocationSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LocationSlice) At(i int) Location {
return newLocation((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LocationSlice) All() iter.Seq2[int, Location] {
return func(yield func(int, Location) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LocationSlice can be initialized:
//
// es := NewLocationSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LocationSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Location, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Location.
// It returns the newly added Location.
func (es LocationSlice) AppendEmpty() Location {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLocation())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LocationSlice) MoveAndAppendTo(dest LocationSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LocationSlice) RemoveIf(f func(Location) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLocation((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LocationSlice) CopyTo(dest LocationSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLocationSlice(*dest.orig, *es.orig)
}
// Sort sorts the Location elements within LocationSlice given the
// provided less function so that two instances of LocationSlice
// can be compared.
func (es LocationSlice) Sort(less func(a, b Location) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMapping function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Mapping struct {
orig *otlpprofiles.Mapping
state *internal.State
}
func newMapping(orig *otlpprofiles.Mapping, state *internal.State) Mapping {
return Mapping{orig: orig, state: state}
}
// NewMapping creates a new empty Mapping.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMapping() Mapping {
return newMapping(internal.NewMapping(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Mapping) MoveTo(dest Mapping) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteMapping(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MemoryStart returns the memorystart associated with this Mapping.
func (ms Mapping) MemoryStart() uint64 {
return ms.orig.MemoryStart
}
// SetMemoryStart replaces the memorystart associated with this Mapping.
func (ms Mapping) SetMemoryStart(v uint64) {
ms.state.AssertMutable()
ms.orig.MemoryStart = v
}
// MemoryLimit returns the memorylimit associated with this Mapping.
func (ms Mapping) MemoryLimit() uint64 {
return ms.orig.MemoryLimit
}
// SetMemoryLimit replaces the memorylimit associated with this Mapping.
func (ms Mapping) SetMemoryLimit(v uint64) {
ms.state.AssertMutable()
ms.orig.MemoryLimit = v
}
// FileOffset returns the fileoffset associated with this Mapping.
func (ms Mapping) FileOffset() uint64 {
return ms.orig.FileOffset
}
// SetFileOffset replaces the fileoffset associated with this Mapping.
func (ms Mapping) SetFileOffset(v uint64) {
ms.state.AssertMutable()
ms.orig.FileOffset = v
}
// FilenameStrindex returns the filenamestrindex associated with this Mapping.
func (ms Mapping) FilenameStrindex() int32 {
return ms.orig.FilenameStrindex
}
// SetFilenameStrindex replaces the filenamestrindex associated with this Mapping.
func (ms Mapping) SetFilenameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.FilenameStrindex = v
}
// AttributeIndices returns the AttributeIndices associated with this Mapping.
func (ms Mapping) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Mapping) CopyTo(dest Mapping) {
dest.state.AssertMutable()
internal.CopyMapping(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// MappingSlice logically represents a slice of Mapping.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewMappingSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type MappingSlice struct {
orig *[]*otlpprofiles.Mapping
state *internal.State
}
func newMappingSlice(orig *[]*otlpprofiles.Mapping, state *internal.State) MappingSlice {
return MappingSlice{orig: orig, state: state}
}
// NewMappingSlice creates a MappingSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewMappingSlice() MappingSlice {
orig := []*otlpprofiles.Mapping(nil)
return newMappingSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewMappingSlice()".
func (es MappingSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es MappingSlice) At(i int) Mapping {
return newMapping((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es MappingSlice) All() iter.Seq2[int, Mapping] {
return func(yield func(int, Mapping) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new MappingSlice can be initialized:
//
// es := NewMappingSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es MappingSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Mapping, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Mapping.
// It returns the newly added Mapping.
func (es MappingSlice) AppendEmpty() Mapping {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewMapping())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es MappingSlice) MoveAndAppendTo(dest MappingSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es MappingSlice) RemoveIf(f func(Mapping) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteMapping((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es MappingSlice) CopyTo(dest MappingSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyMappingSlice(*dest.orig, *es.orig)
}
// Sort sorts the Mapping elements within MappingSlice given the
// provided less function so that two instances of MappingSlice
// can be compared.
func (es MappingSlice) Sort(less func(a, b Mapping) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Profile are an implementation of the pprofextended data model.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfile function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Profile struct {
orig *otlpprofiles.Profile
state *internal.State
}
func newProfile(orig *otlpprofiles.Profile, state *internal.State) Profile {
return Profile{orig: orig, state: state}
}
// NewProfile creates a new empty Profile.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfile() Profile {
return newProfile(internal.NewProfile(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Profile) MoveTo(dest Profile) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteProfile(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// SampleType returns the sampletype associated with this Profile.
func (ms Profile) SampleType() ValueType {
return newValueType(&ms.orig.SampleType, ms.state)
}
// Sample returns the Sample associated with this Profile.
func (ms Profile) Sample() SampleSlice {
return newSampleSlice(&ms.orig.Sample, ms.state)
}
// Time returns the time associated with this Profile.
func (ms Profile) Time() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTime replaces the time associated with this Profile.
func (ms Profile) SetTime(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Duration returns the duration associated with this Profile.
func (ms Profile) Duration() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.DurationNano)
}
// SetDuration replaces the duration associated with this Profile.
func (ms Profile) SetDuration(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.DurationNano = uint64(v)
}
// PeriodType returns the periodtype associated with this Profile.
func (ms Profile) PeriodType() ValueType {
return newValueType(&ms.orig.PeriodType, ms.state)
}
// Period returns the period associated with this Profile.
func (ms Profile) Period() int64 {
return ms.orig.Period
}
// SetPeriod replaces the period associated with this Profile.
func (ms Profile) SetPeriod(v int64) {
ms.state.AssertMutable()
ms.orig.Period = v
}
// CommentStrindices returns the CommentStrindices associated with this Profile.
func (ms Profile) CommentStrindices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.CommentStrindices, ms.state))
}
// ProfileID returns the profileid associated with this Profile.
func (ms Profile) ProfileID() ProfileID {
return ProfileID(ms.orig.ProfileId)
}
// SetProfileID replaces the profileid associated with this Profile.
func (ms Profile) SetProfileID(v ProfileID) {
ms.state.AssertMutable()
ms.orig.ProfileId = data.ProfileID(v)
}
// DroppedAttributesCount returns the droppedattributescount associated with this Profile.
func (ms Profile) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Profile.
func (ms Profile) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// OriginalPayloadFormat returns the originalpayloadformat associated with this Profile.
func (ms Profile) OriginalPayloadFormat() string {
return ms.orig.OriginalPayloadFormat
}
// SetOriginalPayloadFormat replaces the originalpayloadformat associated with this Profile.
func (ms Profile) SetOriginalPayloadFormat(v string) {
ms.state.AssertMutable()
ms.orig.OriginalPayloadFormat = v
}
// OriginalPayload returns the OriginalPayload associated with this Profile.
func (ms Profile) OriginalPayload() pcommon.ByteSlice {
return pcommon.ByteSlice(internal.NewByteSliceWrapper(&ms.orig.OriginalPayload, ms.state))
}
// AttributeIndices returns the AttributeIndices associated with this Profile.
func (ms Profile) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Profile) CopyTo(dest Profile) {
dest.state.AssertMutable()
internal.CopyProfile(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
)
// Profiles is the top-level struct that is propagated through the profiles pipeline.
// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Profiles internal.ProfilesWrapper
func newProfiles(orig *otlpcollectorprofiles.ExportProfilesServiceRequest, state *internal.State) Profiles {
return Profiles(internal.NewProfilesWrapper(orig, state))
}
// NewProfiles creates a new empty Profiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfiles() Profiles {
return newProfiles(internal.NewExportProfilesServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Profiles) MoveTo(dest Profiles) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportProfilesServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceProfiles returns the ResourceProfiles associated with this Profiles.
func (ms Profiles) ResourceProfiles() ResourceProfilesSlice {
return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState())
}
// Dictionary returns the dictionary associated with this Profiles.
func (ms Profiles) Dictionary() ProfilesDictionary {
return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Profiles) CopyTo(dest Profiles) {
dest.getState().AssertMutable()
internal.CopyExportProfilesServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Profiles) getOrig() *otlpcollectorprofiles.ExportProfilesServiceRequest {
return internal.GetProfilesOrig(internal.ProfilesWrapper(ms))
}
func (ms Profiles) getState() *internal.State {
return internal.GetProfilesState(internal.ProfilesWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfilesDictionary function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ProfilesDictionary struct {
orig *otlpprofiles.ProfilesDictionary
state *internal.State
}
func newProfilesDictionary(orig *otlpprofiles.ProfilesDictionary, state *internal.State) ProfilesDictionary {
return ProfilesDictionary{orig: orig, state: state}
}
// NewProfilesDictionary creates a new empty ProfilesDictionary.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfilesDictionary() ProfilesDictionary {
return newProfilesDictionary(internal.NewProfilesDictionary(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ProfilesDictionary) MoveTo(dest ProfilesDictionary) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteProfilesDictionary(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MappingTable returns the MappingTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) MappingTable() MappingSlice {
return newMappingSlice(&ms.orig.MappingTable, ms.state)
}
// LocationTable returns the LocationTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) LocationTable() LocationSlice {
return newLocationSlice(&ms.orig.LocationTable, ms.state)
}
// FunctionTable returns the FunctionTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) FunctionTable() FunctionSlice {
return newFunctionSlice(&ms.orig.FunctionTable, ms.state)
}
// LinkTable returns the LinkTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) LinkTable() LinkSlice {
return newLinkSlice(&ms.orig.LinkTable, ms.state)
}
// StringTable returns the StringTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) StringTable() pcommon.StringSlice {
return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.orig.StringTable, ms.state))
}
// AttributeTable returns the AttributeTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) AttributeTable() KeyValueAndUnitSlice {
return newKeyValueAndUnitSlice(&ms.orig.AttributeTable, ms.state)
}
// StackTable returns the StackTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) StackTable() StackSlice {
return newStackSlice(&ms.orig.StackTable, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ProfilesDictionary) CopyTo(dest ProfilesDictionary) {
dest.state.AssertMutable()
internal.CopyProfilesDictionary(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ProfilesSlice logically represents a slice of Profile.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ProfilesSlice struct {
orig *[]*otlpprofiles.Profile
state *internal.State
}
func newProfilesSlice(orig *[]*otlpprofiles.Profile, state *internal.State) ProfilesSlice {
return ProfilesSlice{orig: orig, state: state}
}
// NewProfilesSlice creates a ProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewProfilesSlice() ProfilesSlice {
orig := []*otlpprofiles.Profile(nil)
return newProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewProfilesSlice()".
func (es ProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ProfilesSlice) At(i int) Profile {
return newProfile((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ProfilesSlice) All() iter.Seq2[int, Profile] {
return func(yield func(int, Profile) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ProfilesSlice can be initialized:
//
// es := NewProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Profile, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Profile.
// It returns the newly added Profile.
func (es ProfilesSlice) AppendEmpty() Profile {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewProfile())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ProfilesSlice) MoveAndAppendTo(dest ProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ProfilesSlice) RemoveIf(f func(Profile) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteProfile((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ProfilesSlice) CopyTo(dest ProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyProfileSlice(*dest.orig, *es.orig)
}
// Sort sorts the Profile elements within ProfilesSlice given the
// provided less function so that two instances of ProfilesSlice
// can be compared.
func (es ProfilesSlice) Sort(less func(a, b Profile) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceProfiles is a collection of profiles from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceProfiles struct {
orig *otlpprofiles.ResourceProfiles
state *internal.State
}
func newResourceProfiles(orig *otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfiles {
return ResourceProfiles{orig: orig, state: state}
}
// NewResourceProfiles creates a new empty ResourceProfiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceProfiles() ResourceProfiles {
return newResourceProfiles(internal.NewResourceProfiles(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceProfiles(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceProfiles.
func (ms ResourceProfiles) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles.
func (ms ResourceProfiles) ScopeProfiles() ScopeProfilesSlice {
return newScopeProfilesSlice(&ms.orig.ScopeProfiles, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceProfiles.
func (ms ResourceProfiles) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceProfiles.
func (ms ResourceProfiles) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) {
dest.state.AssertMutable()
internal.CopyResourceProfiles(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ResourceProfilesSlice logically represents a slice of ResourceProfiles.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceProfilesSlice struct {
orig *[]*otlpprofiles.ResourceProfiles
state *internal.State
}
func newResourceProfilesSlice(orig *[]*otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfilesSlice {
return ResourceProfilesSlice{orig: orig, state: state}
}
// NewResourceProfilesSlice creates a ResourceProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceProfilesSlice() ResourceProfilesSlice {
orig := []*otlpprofiles.ResourceProfiles(nil)
return newResourceProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceProfilesSlice()".
func (es ResourceProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceProfilesSlice) At(i int) ResourceProfiles {
return newResourceProfiles((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceProfilesSlice) All() iter.Seq2[int, ResourceProfiles] {
return func(yield func(int, ResourceProfiles) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceProfilesSlice can be initialized:
//
// es := NewResourceProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.ResourceProfiles, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceProfiles.
// It returns the newly added ResourceProfiles.
func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceProfiles())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceProfilesSlice) MoveAndAppendTo(dest ResourceProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceProfiles((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceProfilesSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the
// provided less function so that two instances of ResourceProfilesSlice
// can be compared.
func (es ResourceProfilesSlice) Sort(less func(a, b ResourceProfiles) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Sample represents each record value encountered within a profiled program.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSample function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Sample struct {
orig *otlpprofiles.Sample
state *internal.State
}
func newSample(orig *otlpprofiles.Sample, state *internal.State) Sample {
return Sample{orig: orig, state: state}
}
// NewSample creates a new empty Sample.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSample() Sample {
return newSample(internal.NewSample(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Sample) MoveTo(dest Sample) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSample(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// StackIndex returns the stackindex associated with this Sample.
func (ms Sample) StackIndex() int32 {
return ms.orig.StackIndex
}
// SetStackIndex replaces the stackindex associated with this Sample.
func (ms Sample) SetStackIndex(v int32) {
ms.state.AssertMutable()
ms.orig.StackIndex = v
}
// Values returns the Values associated with this Sample.
func (ms Sample) Values() pcommon.Int64Slice {
return pcommon.Int64Slice(internal.NewInt64SliceWrapper(&ms.orig.Values, ms.state))
}
// AttributeIndices returns the AttributeIndices associated with this Sample.
func (ms Sample) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// LinkIndex returns the linkindex associated with this Sample.
func (ms Sample) LinkIndex() int32 {
return ms.orig.LinkIndex
}
// SetLinkIndex replaces the linkindex associated with this Sample.
func (ms Sample) SetLinkIndex(v int32) {
ms.state.AssertMutable()
ms.orig.LinkIndex = v
}
// TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample.
func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.TimestampsUnixNano, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Sample) CopyTo(dest Sample) {
dest.state.AssertMutable()
internal.CopySample(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// SampleSlice logically represents a slice of Sample.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSampleSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SampleSlice struct {
orig *[]*otlpprofiles.Sample
state *internal.State
}
func newSampleSlice(orig *[]*otlpprofiles.Sample, state *internal.State) SampleSlice {
return SampleSlice{orig: orig, state: state}
}
// NewSampleSlice creates a SampleSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSampleSlice() SampleSlice {
orig := []*otlpprofiles.Sample(nil)
return newSampleSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSampleSlice()".
func (es SampleSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SampleSlice) At(i int) Sample {
return newSample((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SampleSlice) All() iter.Seq2[int, Sample] {
return func(yield func(int, Sample) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SampleSlice can be initialized:
//
// es := NewSampleSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SampleSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Sample, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Sample.
// It returns the newly added Sample.
func (es SampleSlice) AppendEmpty() Sample {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSample())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SampleSlice) MoveAndAppendTo(dest SampleSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SampleSlice) RemoveIf(f func(Sample) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSample((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SampleSlice) CopyTo(dest SampleSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySampleSlice(*dest.orig, *es.orig)
}
// Sort sorts the Sample elements within SampleSlice given the
// provided less function so that two instances of SampleSlice
// can be compared.
func (es SampleSlice) Sort(less func(a, b Sample) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeProfiles struct {
orig *otlpprofiles.ScopeProfiles
state *internal.State
}
func newScopeProfiles(orig *otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfiles {
return ScopeProfiles{orig: orig, state: state}
}
// NewScopeProfiles creates a new empty ScopeProfiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeProfiles() ScopeProfiles {
return newScopeProfiles(internal.NewScopeProfiles(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeProfiles(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeProfiles.
func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Profiles returns the Profiles associated with this ScopeProfiles.
func (ms ScopeProfiles) Profiles() ProfilesSlice {
return newProfilesSlice(&ms.orig.Profiles, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeProfiles.
func (ms ScopeProfiles) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeProfiles.
func (ms ScopeProfiles) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) {
dest.state.AssertMutable()
internal.CopyScopeProfiles(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ScopeProfilesSlice logically represents a slice of ScopeProfiles.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeProfilesSlice struct {
orig *[]*otlpprofiles.ScopeProfiles
state *internal.State
}
func newScopeProfilesSlice(orig *[]*otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfilesSlice {
return ScopeProfilesSlice{orig: orig, state: state}
}
// NewScopeProfilesSlice creates a ScopeProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeProfilesSlice() ScopeProfilesSlice {
orig := []*otlpprofiles.ScopeProfiles(nil)
return newScopeProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeProfilesSlice()".
func (es ScopeProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeProfilesSlice) At(i int) ScopeProfiles {
return newScopeProfiles((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeProfilesSlice) All() iter.Seq2[int, ScopeProfiles] {
return func(yield func(int, ScopeProfiles) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeProfilesSlice can be initialized:
//
// es := NewScopeProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.ScopeProfiles, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeProfiles.
// It returns the newly added ScopeProfiles.
func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeProfiles())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeProfilesSlice) MoveAndAppendTo(dest ScopeProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeProfiles((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeProfilesSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the
// provided less function so that two instances of ScopeProfilesSlice
// can be compared.
func (es ScopeProfilesSlice) Sort(less func(a, b ScopeProfiles) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Stack represents a stack trace as a list of locations.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewStack function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Stack struct {
orig *otlpprofiles.Stack
state *internal.State
}
func newStack(orig *otlpprofiles.Stack, state *internal.State) Stack {
return Stack{orig: orig, state: state}
}
// NewStack creates a new empty Stack.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStack() Stack {
return newStack(internal.NewStack(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Stack) MoveTo(dest Stack) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteStack(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// LocationIndices returns the LocationIndices associated with this Stack.
func (ms Stack) LocationIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.LocationIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Stack) CopyTo(dest Stack) {
dest.state.AssertMutable()
internal.CopyStack(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// StackSlice logically represents a slice of Stack.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewStackSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type StackSlice struct {
orig *[]*otlpprofiles.Stack
state *internal.State
}
func newStackSlice(orig *[]*otlpprofiles.Stack, state *internal.State) StackSlice {
return StackSlice{orig: orig, state: state}
}
// NewStackSlice creates a StackSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewStackSlice() StackSlice {
orig := []*otlpprofiles.Stack(nil)
return newStackSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewStackSlice()".
func (es StackSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es StackSlice) At(i int) Stack {
return newStack((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es StackSlice) All() iter.Seq2[int, Stack] {
return func(yield func(int, Stack) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new StackSlice can be initialized:
//
// es := NewStackSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es StackSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.Stack, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Stack.
// It returns the newly added Stack.
func (es StackSlice) AppendEmpty() Stack {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewStack())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es StackSlice) MoveAndAppendTo(dest StackSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es StackSlice) RemoveIf(f func(Stack) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteStack((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es StackSlice) CopyTo(dest StackSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyStackSlice(*dest.orig, *es.orig)
}
// Sort sorts the Stack elements within StackSlice given the
// provided less function so that two instances of StackSlice
// can be compared.
func (es StackSlice) Sort(less func(a, b Stack) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ValueType describes the type and units of a value, with an optional aggregation temporality.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewValueType function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ValueType struct {
orig *otlpprofiles.ValueType
state *internal.State
}
func newValueType(orig *otlpprofiles.ValueType, state *internal.State) ValueType {
return ValueType{orig: orig, state: state}
}
// NewValueType creates a new empty ValueType.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewValueType() ValueType {
return newValueType(internal.NewValueType(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ValueType) MoveTo(dest ValueType) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteValueType(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TypeStrindex returns the typestrindex associated with this ValueType.
func (ms ValueType) TypeStrindex() int32 {
return ms.orig.TypeStrindex
}
// SetTypeStrindex replaces the typestrindex associated with this ValueType.
func (ms ValueType) SetTypeStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.TypeStrindex = v
}
// UnitStrindex returns the unitstrindex associated with this ValueType.
func (ms ValueType) UnitStrindex() int32 {
return ms.orig.UnitStrindex
}
// SetUnitStrindex replaces the unitstrindex associated with this ValueType.
func (ms ValueType) SetUnitStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.UnitStrindex = v
}
// AggregationTemporality returns the aggregationtemporality associated with this ValueType.
func (ms ValueType) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this ValueType.
func (ms ValueType) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = otlpprofiles.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ValueType) CopyTo(dest ValueType) {
dest.state.AssertMutable()
internal.CopyValueType(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development"
)
// ValueTypeSlice logically represents a slice of ValueType.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewValueTypeSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ValueTypeSlice struct {
orig *[]*otlpprofiles.ValueType
state *internal.State
}
func newValueTypeSlice(orig *[]*otlpprofiles.ValueType, state *internal.State) ValueTypeSlice {
return ValueTypeSlice{orig: orig, state: state}
}
// NewValueTypeSlice creates a ValueTypeSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewValueTypeSlice() ValueTypeSlice {
orig := []*otlpprofiles.ValueType(nil)
return newValueTypeSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewValueTypeSlice()".
func (es ValueTypeSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ValueTypeSlice) At(i int) ValueType {
return newValueType((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ValueTypeSlice) All() iter.Seq2[int, ValueType] {
return func(yield func(int, ValueType) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ValueTypeSlice can be initialized:
//
// es := NewValueTypeSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ValueTypeSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlpprofiles.ValueType, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ValueType.
// It returns the newly added ValueType.
func (es ValueTypeSlice) AppendEmpty() ValueType {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewValueType())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ValueTypeSlice) MoveAndAppendTo(dest ValueTypeSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteValueType((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyValueTypeSlice(*dest.orig, *es.orig)
}
// Sort sorts the ValueType elements within ValueTypeSlice given the
// provided less function so that two instances of ValueTypeSlice
// can be compared.
func (es ValueTypeSlice) Sort(less func(a, b ValueType) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals pprofile.Profiles to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalProfiles to the OTLP/JSON format.
func (*JSONMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportProfilesServiceRequest(pd.getOrig(), dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pprofile.Profiles.
type JSONUnmarshaler struct{}
// UnmarshalProfiles from OTLP/JSON format into pprofile.Profiles.
func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
pd := NewProfiles()
internal.UnmarshalJSONExportProfilesServiceRequest(pd.getOrig(), iter)
if iter.Error() != nil {
return Profiles{}, iter.Error()
}
otlp.MigrateProfiles(pd.getOrig().ResourceProfiles)
return pd, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another KeyValueAndUnit
// It assumes both structs refer to the same dictionary.
func (ms KeyValueAndUnit) Equal(val KeyValueAndUnit) bool {
return ms.KeyStrindex() == val.KeyStrindex() &&
ms.UnitStrindex() == val.UnitStrindex() &&
ms.Value().Equal(val.Value())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another LineSlice
func (l LineSlice) Equal(val LineSlice) bool {
if l.Len() != val.Len() {
return false
}
for i := range l.Len() {
if !l.At(i).Equal(val.At(i)) {
return false
}
}
return true
}
// Equal checks equality with another Line
func (l Line) Equal(val Line) bool {
return l.Column() == val.Column() &&
l.FunctionIndex() == val.FunctionIndex() &&
l.Line() == val.Line()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Link
func (ms Link) Equal(val Link) bool {
return ms.TraceID() == val.TraceID() &&
ms.SpanID() == val.SpanID()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyLinkTableEntries = errors.New("too many entries in LinkTable")
// SetLink updates a LinkTable, adding or providing a value and returns its
// index.
func SetLink(table LinkSlice, li Link) (int32, error) {
for j, l := range table.All() {
if l.Equal(li) {
if j > math.MaxInt32 {
return 0, errTooManyLinkTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyLinkTableEntries
}
li.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Location
func (ms Location) Equal(val Location) bool {
return ms.MappingIndex() == val.MappingIndex() &&
ms.Address() == val.Address() &&
ms.AttributeIndices().Equal(val.AttributeIndices()) &&
ms.Line().Equal(val.Line())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"fmt"
"math"
)
// FromLocationIndices builds a slice containing all the locations of a Stack.
// Updates made to the returned map will not be applied back to the Stack.
func FromLocationIndices(table LocationSlice, record Stack) LocationSlice {
m := NewLocationSlice()
m.EnsureCapacity(record.LocationIndices().Len())
for _, idx := range record.LocationIndices().All() {
l := table.At(int(idx))
l.CopyTo(m.AppendEmpty())
}
return m
}
var (
errTooManyLocationTableEntries = errors.New("too many entries in LocationTable")
errTooManyLocationIndicesEntries = errors.New("too many entries in LocationIndices")
)
// PutLocation updates a LocationTable and a Stack's LocationIndices to
// add or update a location.
//
// Deprecated: [v0.138.0] use SetLocation instead.
func PutLocation(table LocationSlice, record Stack, loc Location) error {
for i, locIdx := range record.LocationIndices().All() {
idx := int(locIdx)
if idx < 0 || idx >= table.Len() {
return fmt.Errorf("index value %d out of range in LocationIndices[%d]", idx, i)
}
locAt := table.At(idx)
if locAt.Equal(loc) {
// Location already exists, nothing to do.
return nil
}
}
if record.LocationIndices().Len() >= math.MaxInt32 {
return errTooManyLocationIndicesEntries
}
id, err := SetLocation(table, loc)
if err != nil {
return err
}
record.LocationIndices().Append(id)
return nil
}
// SetLocation updates a LocationTable, adding or providing a value and returns
// its index.
func SetLocation(table LocationSlice, loc Location) (int32, error) {
for j, a := range table.All() {
if a.Equal(loc) {
if j > math.MaxInt32 {
return 0, errTooManyLocationTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyLocationTableEntries
}
loc.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Mapping
func (ms Mapping) Equal(val Mapping) bool {
return ms.MemoryStart() == val.MemoryStart() &&
ms.MemoryLimit() == val.MemoryLimit() &&
ms.FileOffset() == val.FileOffset() &&
ms.FilenameStrindex() == val.FilenameStrindex() &&
ms.AttributeIndices().Equal(val.AttributeIndices())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyMappingTableEntries = errors.New("too many entries in MappingTable")
// SetMapping updates a MappingTable, adding or providing a value and returns
// its index.
func SetMapping(table MappingSlice, ma Mapping) (int32, error) {
for j, m := range table.All() {
if m.Equal(ma) {
if j > math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
ma.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
size := internal.SizeProtoExportProfilesServiceRequest(pd.getOrig())
buf := make([]byte, size)
_ = internal.MarshalProtoExportProfilesServiceRequest(pd.getOrig(), buf)
return buf, nil
}
func (e *ProtoMarshaler) ProfilesSize(pd Profiles) int {
return internal.SizeProtoExportProfilesServiceRequest(pd.getOrig())
}
func (e *ProtoMarshaler) ResourceProfilesSize(pd ResourceProfiles) int {
return internal.SizeProtoResourceProfiles(pd.orig)
}
func (e *ProtoMarshaler) ScopeProfilesSize(pd ScopeProfiles) int {
return internal.SizeProtoScopeProfiles(pd.orig)
}
func (e *ProtoMarshaler) ProfileSize(pd Profile) int {
return internal.SizeProtoProfile(pd.orig)
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
pd := NewProfiles()
err := internal.UnmarshalProtoExportProfilesServiceRequest(pd.getOrig(), buf)
if err != nil {
return Profiles{}, err
}
return pd, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofileotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *otlpcollectorprofiles.ExportProfilesPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *otlpcollectorprofiles.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportProfilesPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportProfilesPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedProfiles returns the rejectedprofiles associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedProfiles() int64 {
return ms.orig.RejectedProfiles
}
// SetRejectedProfiles replaces the rejectedprofiles associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedProfiles(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedProfiles = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportProfilesPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofileotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *otlpcollectorprofiles.ExportProfilesServiceResponse
state *internal.State
}
func newExportResponse(orig *otlpcollectorprofiles.ExportProfilesServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportProfilesServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportProfilesServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportProfilesServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
_ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded.
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Profiles service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export pprofile.Profiles to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otlpcollectorprofile.NewProfilesServiceClient(cc)}
}
type grpcClient struct {
rawClient otlpcollectorprofile.ProfilesServiceClient
}
// Export implements the Client interface.
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC ProfilesService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otlpcollectorprofile.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv})
}
type rawProfilesServer struct {
srv GRPCServer
}
func (s rawProfilesServer) Export(ctx context.Context, request *otlpcollectorprofile.ExportProfilesServiceRequest) (*otlpcollectorprofile.ExportProfilesServiceResponse, error) {
otlp.MigrateProfiles(request.ResourceProfiles)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/pprofile"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for pprofile.Profiles data.
type ExportRequest struct {
orig *otlpcollectorprofile.ExportProfilesServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &otlpcollectorprofile.ExportProfilesServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromProfiles returns a ExportRequest from pprofile.Profiles.
// Because ExportRequest is a wrapper for pprofile.Profiles,
// any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest {
return ExportRequest{
orig: internal.GetProfilesOrig(internal.ProfilesWrapper(td)),
state: internal.GetProfilesState(internal.ProfilesWrapper(td)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportProfilesServiceRequest(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportProfilesServiceRequest(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := internal.UnmarshalProtoExportProfilesServiceRequest(ms.orig, data)
if err != nil {
return err
}
otlp.MigrateProfiles(ms.orig.ResourceProfiles)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportProfilesServiceRequest(ms.orig, dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportProfilesServiceRequest(ms.orig, iter)
return iter.Error()
}
func (ms ExportRequest) Profiles() pprofile.Profiles {
return pprofile.Profiles(internal.NewProfilesWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportProfilesServiceResponse(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportProfilesServiceResponse(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return internal.UnmarshalProtoExportProfilesServiceResponse(ms.orig, data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportProfilesServiceResponse(ms.orig, dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportProfilesServiceResponse(ms.orig, iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal/data"
)
var emptyProfileID = ProfileID([16]byte{})
// ProfileID is a profile identifier.
type ProfileID [16]byte
// NewProfileIDEmpty returns a new empty (all zero bytes) ProfileID.
func NewProfileIDEmpty() ProfileID {
return emptyProfileID
}
// String returns string representation of the ProfileID.
//
// Important: Don't rely on this method to get a string identifier of ProfileID.
// Use hex.EncodeToString explicitly instead.
// This method is meant to implement Stringer interface for display purposes only.
func (ms ProfileID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms ProfileID) IsEmpty() bool {
return data.ProfileID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it.
func (ms Profiles) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this ResourceProfiles instance is read-only.
func (ms Profiles) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// SampleCount calculates the total number of samples.
func (ms Profiles) SampleCount() int {
sampleCount := 0
rps := ms.ResourceProfiles()
for i := 0; i < rps.Len(); i++ {
rp := rps.At(i)
sps := rp.ScopeProfiles()
for j := 0; j < sps.Len(); j++ {
pcs := sps.At(j).Profiles()
for k := 0; k < pcs.Len(); k++ {
sampleCount += pcs.At(k).Sample().Len()
}
}
}
return sampleCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Stack
func (ms Stack) Equal(val Stack) bool {
if ms.LocationIndices().Len() != val.LocationIndices().Len() {
return false
}
for i := range ms.LocationIndices().Len() {
if ms.LocationIndices().At(i) != val.LocationIndices().At(i) {
return false
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyStackTableEntries = errors.New("too many entries in StackTable")
// SetStack updates a StackSlice, adding or providing a stack and returns its
// index.
func SetStack(table StackSlice, st Stack) (int32, error) {
for j, l := range table.All() {
if l.Equal(st) {
if j > math.MaxInt32 {
return 0, errTooManyStackTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyStackTableEntries
}
st.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
"go.opentelemetry.io/collector/pdata/pcommon"
)
var errTooManyStringTableEntries = errors.New("too many entries in StringTable")
// SetString updates a StringTable, adding or providing a value and returns its index.
func SetString(table pcommon.StringSlice, val string) (int32, error) {
for j, v := range table.All() {
if v == val {
if j > math.MaxInt32 {
return 0, errTooManyStringTableEntries
}
// Return the index of the existing value.
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
table.Append(val)
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceSpans is a collection of spans from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpans struct {
orig *otlptrace.ResourceSpans
state *internal.State
}
func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans {
return ResourceSpans{orig: orig, state: state}
}
// NewResourceSpans creates a new empty ResourceSpans.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceSpans() ResourceSpans {
return newResourceSpans(internal.NewResourceSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceSpans) MoveTo(dest ResourceSpans) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceSpans.
func (ms ResourceSpans) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeSpans returns the ScopeSpans associated with this ResourceSpans.
func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice {
return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceSpans.
func (ms ResourceSpans) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceSpans.
func (ms ResourceSpans) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceSpans) CopyTo(dest ResourceSpans) {
dest.state.AssertMutable()
internal.CopyResourceSpans(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ResourceSpansSlice logically represents a slice of ResourceSpans.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpansSlice struct {
orig *[]*otlptrace.ResourceSpans
state *internal.State
}
func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice {
return ResourceSpansSlice{orig: orig, state: state}
}
// NewResourceSpansSlice creates a ResourceSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceSpansSlice() ResourceSpansSlice {
orig := []*otlptrace.ResourceSpans(nil)
return newResourceSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceSpansSlice()".
func (es ResourceSpansSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceSpansSlice) At(i int) ResourceSpans {
return newResourceSpans((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceSpansSlice) All() iter.Seq2[int, ResourceSpans] {
return func(yield func(int, ResourceSpans) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceSpansSlice can be initialized:
//
// es := NewResourceSpansSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceSpans.
// It returns the newly added ResourceSpans.
func (es ResourceSpansSlice) AppendEmpty() ResourceSpans {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceSpans())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceSpansSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceSpans elements within ResourceSpansSlice given the
// provided less function so that two instances of ResourceSpansSlice
// can be compared.
func (es ResourceSpansSlice) Sort(less func(a, b ResourceSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeSpans is a collection of spans from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpans struct {
orig *otlptrace.ScopeSpans
state *internal.State
}
func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans {
return ScopeSpans{orig: orig, state: state}
}
// NewScopeSpans creates a new empty ScopeSpans.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeSpans() ScopeSpans {
return newScopeSpans(internal.NewScopeSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeSpans) MoveTo(dest ScopeSpans) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeSpans.
func (ms ScopeSpans) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Spans returns the Spans associated with this ScopeSpans.
func (ms ScopeSpans) Spans() SpanSlice {
return newSpanSlice(&ms.orig.Spans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeSpans.
func (ms ScopeSpans) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeSpans.
func (ms ScopeSpans) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeSpans) CopyTo(dest ScopeSpans) {
dest.state.AssertMutable()
internal.CopyScopeSpans(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// ScopeSpansSlice logically represents a slice of ScopeSpans.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpansSlice struct {
orig *[]*otlptrace.ScopeSpans
state *internal.State
}
func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice {
return ScopeSpansSlice{orig: orig, state: state}
}
// NewScopeSpansSlice creates a ScopeSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeSpansSlice() ScopeSpansSlice {
orig := []*otlptrace.ScopeSpans(nil)
return newScopeSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeSpansSlice()".
func (es ScopeSpansSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeSpansSlice) At(i int) ScopeSpans {
return newScopeSpans((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeSpansSlice) All() iter.Seq2[int, ScopeSpans] {
return func(yield func(int, ScopeSpans) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeSpansSlice can be initialized:
//
// es := NewScopeSpansSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeSpans.
// It returns the newly added ScopeSpans.
func (es ScopeSpansSlice) AppendEmpty() ScopeSpans {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeSpans())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeSpansSlice) MoveAndAppendTo(dest ScopeSpansSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeSpansSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeSpans elements within ScopeSpansSlice given the
// provided less function so that two instances of ScopeSpansSlice
// can be compared.
func (es ScopeSpansSlice) Sort(less func(a, b ScopeSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Span represents a single operation within a trace.
// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpan function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Span struct {
orig *otlptrace.Span
state *internal.State
}
func newSpan(orig *otlptrace.Span, state *internal.State) Span {
return Span{orig: orig, state: state}
}
// NewSpan creates a new empty Span.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpan() Span {
return newSpan(internal.NewSpan(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Span) MoveTo(dest Span) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpan(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this Span.
func (ms Span) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Span.
func (ms Span) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = data.TraceID(v)
}
// SpanID returns the spanid associated with this Span.
func (ms Span) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Span.
func (ms Span) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = data.SpanID(v)
}
// TraceState returns the tracestate associated with this Span.
func (ms Span) TraceState() pcommon.TraceState {
return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// ParentSpanID returns the parentspanid associated with this Span.
func (ms Span) ParentSpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.ParentSpanId)
}
// SetParentSpanID replaces the parentspanid associated with this Span.
func (ms Span) SetParentSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.ParentSpanId = data.SpanID(v)
}
// Flags returns the flags associated with this Span.
func (ms Span) Flags() uint32 {
return ms.orig.Flags
}
// SetFlags replaces the flags associated with this Span.
func (ms Span) SetFlags(v uint32) {
ms.state.AssertMutable()
ms.orig.Flags = v
}
// Name returns the name associated with this Span.
func (ms Span) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this Span.
func (ms Span) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Kind returns the kind associated with this Span.
func (ms Span) Kind() SpanKind {
return SpanKind(ms.orig.Kind)
}
// SetKind replaces the kind associated with this Span.
func (ms Span) SetKind(v SpanKind) {
ms.state.AssertMutable()
ms.orig.Kind = otlptrace.Span_SpanKind(v)
}
// StartTimestamp returns the starttimestamp associated with this Span.
func (ms Span) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this Span.
func (ms Span) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// EndTimestamp returns the endtimestamp associated with this Span.
func (ms Span) EndTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.EndTimeUnixNano)
}
// SetEndTimestamp replaces the endtimestamp associated with this Span.
func (ms Span) SetEndTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.EndTimeUnixNano = uint64(v)
}
// Attributes returns the Attributes associated with this Span.
func (ms Span) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Span.
func (ms Span) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Span.
func (ms Span) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Events returns the Events associated with this Span.
func (ms Span) Events() SpanEventSlice {
return newSpanEventSlice(&ms.orig.Events, ms.state)
}
// DroppedEventsCount returns the droppedeventscount associated with this Span.
func (ms Span) DroppedEventsCount() uint32 {
return ms.orig.DroppedEventsCount
}
// SetDroppedEventsCount replaces the droppedeventscount associated with this Span.
func (ms Span) SetDroppedEventsCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedEventsCount = v
}
// Links returns the Links associated with this Span.
func (ms Span) Links() SpanLinkSlice {
return newSpanLinkSlice(&ms.orig.Links, ms.state)
}
// DroppedLinksCount returns the droppedlinkscount associated with this Span.
func (ms Span) DroppedLinksCount() uint32 {
return ms.orig.DroppedLinksCount
}
// SetDroppedLinksCount replaces the droppedlinkscount associated with this Span.
func (ms Span) SetDroppedLinksCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedLinksCount = v
}
// Status returns the status associated with this Span.
func (ms Span) Status() Status {
return newStatus(&ms.orig.Status, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Span) CopyTo(dest Span) {
dest.state.AssertMutable()
internal.CopySpan(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs. See OTLP for event definition.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpanEvent function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEvent struct {
orig *otlptrace.Span_Event
state *internal.State
}
func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent {
return SpanEvent{orig: orig, state: state}
}
// NewSpanEvent creates a new empty SpanEvent.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanEvent() SpanEvent {
return newSpanEvent(internal.NewSpan_Event(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SpanEvent) MoveTo(dest SpanEvent) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpan_Event(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Timestamp returns the timestamp associated with this SpanEvent.
func (ms SpanEvent) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this SpanEvent.
func (ms SpanEvent) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Name returns the name associated with this SpanEvent.
func (ms SpanEvent) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this SpanEvent.
func (ms SpanEvent) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Attributes returns the Attributes associated with this SpanEvent.
func (ms SpanEvent) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent.
func (ms SpanEvent) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent.
func (ms SpanEvent) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanEvent) CopyTo(dest SpanEvent) {
dest.state.AssertMutable()
internal.CopySpan_Event(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanEventSlice logically represents a slice of SpanEvent.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanEventSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEventSlice struct {
orig *[]*otlptrace.Span_Event
state *internal.State
}
func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice {
return SpanEventSlice{orig: orig, state: state}
}
// NewSpanEventSlice creates a SpanEventSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanEventSlice() SpanEventSlice {
orig := []*otlptrace.Span_Event(nil)
return newSpanEventSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanEventSlice()".
func (es SpanEventSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanEventSlice) At(i int) SpanEvent {
return newSpanEvent((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanEventSlice) All() iter.Seq2[int, SpanEvent] {
return func(yield func(int, SpanEvent) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanEventSlice can be initialized:
//
// es := NewSpanEventSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanEventSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SpanEvent.
// It returns the newly added SpanEvent.
func (es SpanEventSlice) AppendEmpty() SpanEvent {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpan_Event())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpan_Event((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanEventSlice) CopyTo(dest SpanEventSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpan_EventSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanEvent elements within SpanEventSlice given the
// provided less function so that two instances of SpanEventSlice
// can be compared.
func (es SpanEventSlice) Sort(less func(a, b SpanEvent) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/data"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SpanLink is a pointer from the current span to another span in the same trace or in a
// different trace.
// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpanLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLink struct {
orig *otlptrace.Span_Link
state *internal.State
}
func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink {
return SpanLink{orig: orig, state: state}
}
// NewSpanLink creates a new empty SpanLink.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanLink() SpanLink {
return newSpanLink(internal.NewSpan_Link(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SpanLink) MoveTo(dest SpanLink) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpan_Link(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this SpanLink.
func (ms SpanLink) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this SpanLink.
func (ms SpanLink) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = data.TraceID(v)
}
// SpanID returns the spanid associated with this SpanLink.
func (ms SpanLink) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this SpanLink.
func (ms SpanLink) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = data.SpanID(v)
}
// TraceState returns the tracestate associated with this SpanLink.
func (ms SpanLink) TraceState() pcommon.TraceState {
return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// Attributes returns the Attributes associated with this SpanLink.
func (ms SpanLink) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink.
func (ms SpanLink) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink.
func (ms SpanLink) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Flags returns the flags associated with this SpanLink.
func (ms SpanLink) Flags() uint32 {
return ms.orig.Flags
}
// SetFlags replaces the flags associated with this SpanLink.
func (ms SpanLink) SetFlags(v uint32) {
ms.state.AssertMutable()
ms.orig.Flags = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanLink) CopyTo(dest SpanLink) {
dest.state.AssertMutable()
internal.CopySpan_Link(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanLinkSlice logically represents a slice of SpanLink.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLinkSlice struct {
orig *[]*otlptrace.Span_Link
state *internal.State
}
func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice {
return SpanLinkSlice{orig: orig, state: state}
}
// NewSpanLinkSlice creates a SpanLinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanLinkSlice() SpanLinkSlice {
orig := []*otlptrace.Span_Link(nil)
return newSpanLinkSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanLinkSlice()".
func (es SpanLinkSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanLinkSlice) At(i int) SpanLink {
return newSpanLink((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanLinkSlice) All() iter.Seq2[int, SpanLink] {
return func(yield func(int, SpanLink) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanLinkSlice can be initialized:
//
// es := NewSpanLinkSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanLinkSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SpanLink.
// It returns the newly added SpanLink.
func (es SpanLinkSlice) AppendEmpty() SpanLink {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpan_Link())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpan_Link((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpan_LinkSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanLink elements within SpanLinkSlice given the
// provided less function so that two instances of SpanLinkSlice
// can be compared.
func (es SpanLinkSlice) Sort(less func(a, b SpanLink) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanSlice logically represents a slice of Span.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanSlice struct {
orig *[]*otlptrace.Span
state *internal.State
}
func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice {
return SpanSlice{orig: orig, state: state}
}
// NewSpanSlice creates a SpanSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanSlice() SpanSlice {
orig := []*otlptrace.Span(nil)
return newSpanSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanSlice()".
func (es SpanSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanSlice) At(i int) Span {
return newSpan((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanSlice) All() iter.Seq2[int, Span] {
return func(yield func(int, Span) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanSlice can be initialized:
//
// es := NewSpanSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*otlptrace.Span, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Span.
// It returns the newly added Span.
func (es SpanSlice) AppendEmpty() Span {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpan())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanSlice) RemoveIf(f func(Span) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpan((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanSlice) CopyTo(dest SpanSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpanSlice(*dest.orig, *es.orig)
}
// Sort sorts the Span elements within SpanSlice given the
// provided less function so that two instances of SpanSlice
// can be compared.
func (es SpanSlice) Sort(less func(a, b Span) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// Status is an optional final status for this span. Semantically, when Status was not
// set, that means the span ended without errors and to assume Status.Ok (code = 0).
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewStatus function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Status struct {
orig *otlptrace.Status
state *internal.State
}
func newStatus(orig *otlptrace.Status, state *internal.State) Status {
return Status{orig: orig, state: state}
}
// NewStatus creates a new empty Status.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStatus() Status {
return newStatus(internal.NewStatus(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Status) MoveTo(dest Status) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteStatus(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Message returns the message associated with this Status.
func (ms Status) Message() string {
return ms.orig.Message
}
// SetMessage replaces the message associated with this Status.
func (ms Status) SetMessage(v string) {
ms.state.AssertMutable()
ms.orig.Message = v
}
// Code returns the code associated with this Status.
func (ms Status) Code() StatusCode {
return StatusCode(ms.orig.Code)
}
// SetCode replaces the code associated with this Status.
func (ms Status) SetCode(v StatusCode) {
ms.state.AssertMutable()
ms.orig.Code = otlptrace.Status_StatusCode(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Status) CopyTo(dest Status) {
dest.state.AssertMutable()
internal.CopyStatus(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
)
// Traces is the top-level struct that is propagated through the traces pipeline.
// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewTraces function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Traces internal.TracesWrapper
func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *internal.State) Traces {
return Traces(internal.NewTracesWrapper(orig, state))
}
// NewTraces creates a new empty Traces.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewTraces() Traces {
return newTraces(internal.NewExportTraceServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Traces) MoveTo(dest Traces) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportTraceServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceSpans returns the ResourceSpans associated with this Traces.
func (ms Traces) ResourceSpans() ResourceSpansSlice {
return newResourceSpansSlice(&ms.getOrig().ResourceSpans, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Traces) CopyTo(dest Traces) {
dest.getState().AssertMutable()
internal.CopyExportTraceServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest {
return internal.GetTracesOrig(internal.TracesWrapper(ms))
}
func (ms Traces) getState() *internal.State {
return internal.GetTracesState(internal.TracesWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals Traces to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalTraces to the OTLP/JSON format.
func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportTraceServiceRequest(td.getOrig(), dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Traces.
type JSONUnmarshaler struct{}
// UnmarshalTraces from OTLP/JSON format into Traces.
func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
td := NewTraces()
internal.UnmarshalJSONExportTraceServiceRequest(td.getOrig(), iter)
if iter.Error() != nil {
return Traces{}, iter.Error()
}
otlp.MigrateTraces(td.getOrig().ResourceSpans)
return td, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) {
size := internal.SizeProtoExportTraceServiceRequest(td.getOrig())
buf := make([]byte, size)
_ = internal.MarshalProtoExportTraceServiceRequest(td.getOrig(), buf)
return buf, nil
}
func (e *ProtoMarshaler) TracesSize(td Traces) int {
return internal.SizeProtoExportTraceServiceRequest(td.getOrig())
}
func (e *ProtoMarshaler) ResourceSpansSize(td ResourceSpans) int {
return internal.SizeProtoResourceSpans(td.orig)
}
func (e *ProtoMarshaler) ScopeSpansSize(td ScopeSpans) int {
return internal.SizeProtoScopeSpans(td.orig)
}
func (e *ProtoMarshaler) SpanSize(td Span) int {
return internal.SizeProtoSpan(td.orig)
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
td := NewTraces()
err := internal.UnmarshalProtoExportTraceServiceRequest(td.getOrig(), buf)
if err != nil {
return Traces{}, err
}
return td, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptraceotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *otlpcollectortrace.ExportTracePartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportTracePartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportTracePartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedSpans returns the rejectedspans associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedSpans() int64 {
return ms.orig.RejectedSpans
}
// SetRejectedSpans replaces the rejectedspans associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedSpans(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedSpans = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportTracePartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptraceotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *otlpcollectortrace.ExportTraceServiceResponse
state *internal.State
}
func newExportResponse(orig *otlpcollectortrace.ExportTraceServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportTraceServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportTraceServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportTraceServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
_ "go.opentelemetry.io/collector/pdata/internal/grpcencoding" // enforces custom gRPC encoding to be loaded.
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Traces service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export ptrace.Traces to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otlpcollectortrace.NewTraceServiceClient(cc)}
}
type grpcClient struct {
rawClient otlpcollectortrace.TraceServiceClient
}
// Export implements the Client interface.
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC TracesService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otlpcollectortrace.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv})
}
type rawTracesServer struct {
srv GRPCServer
}
func (s rawTracesServer) Export(ctx context.Context, request *otlpcollectortrace.ExportTraceServiceRequest) (*otlpcollectortrace.ExportTraceServiceResponse, error) {
otlp.MigrateTraces(request.ResourceSpans)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for ptrace.Traces data.
type ExportRequest struct {
orig *otlpcollectortrace.ExportTraceServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &otlpcollectortrace.ExportTraceServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromTraces returns a ExportRequest from ptrace.Traces.
// Because ExportRequest is a wrapper for ptrace.Traces,
// any changes to the provided Traces struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromTraces(td ptrace.Traces) ExportRequest {
return ExportRequest{
orig: internal.GetTracesOrig(internal.TracesWrapper(td)),
state: internal.GetTracesState(internal.TracesWrapper(td)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportTraceServiceRequest(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportTraceServiceRequest(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := internal.UnmarshalProtoExportTraceServiceRequest(ms.orig, data)
if err != nil {
return err
}
otlp.MigrateTraces(ms.orig.ResourceSpans)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportTraceServiceRequest(ms.orig, dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportTraceServiceRequest(ms.orig, iter)
return iter.Error()
}
func (ms ExportRequest) Traces() ptrace.Traces {
return ptrace.Traces(internal.NewTracesWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := internal.SizeProtoExportTraceServiceResponse(ms.orig)
buf := make([]byte, size)
_ = internal.MarshalProtoExportTraceServiceResponse(ms.orig, buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return internal.UnmarshalProtoExportTraceServiceResponse(ms.orig, data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
internal.MarshalJSONExportTraceServiceResponse(ms.orig, dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
internal.UnmarshalJSONExportTraceServiceResponse(ms.orig, iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type SpanKind int32
const (
// SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used.
SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED)
// SpanKindInternal indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL)
// SpanKindServer indicates that the span covers server-side handling of an RPC or other
// remote network request.
SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER)
// SpanKindClient indicates that the span describes a request to some remote service.
SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT)
// SpanKindProducer indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans.
// A PRODUCER span ends when the message was accepted by the broker while the logical processing of
// the message might span a much longer time.
SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER)
// SpanKindConsumer indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship between
// producer and consumer spans.
SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER)
)
// String returns the string representation of the SpanKind.
func (sk SpanKind) String() string {
switch sk {
case SpanKindUnspecified:
return "Unspecified"
case SpanKindInternal:
return "Internal"
case SpanKindServer:
return "Server"
case SpanKindClient:
return "Client"
case SpanKindProducer:
return "Producer"
case SpanKindConsumer:
return "Consumer"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1"
)
// StatusCode mirrors the codes defined at
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET)
StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK)
StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR)
)
// String returns the string representation of the StatusCode.
func (sc StatusCode) String() string {
switch sc {
case StatusCodeUnset:
return "Unset"
case StatusCodeOk:
return "Ok"
case StatusCodeError:
return "Error"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
// MarkReadOnly marks the Traces as shared so that no further modifications can be done on it.
func (ms Traces) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Traces instance is read-only.
func (ms Traces) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// SpanCount calculates the total number of spans.
func (ms Traces) SpanCount() int {
spanCount := 0
rss := ms.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ilss := rs.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
spanCount += ilss.At(j).Spans().Len()
}
}
return spanCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"encoding"
"errors"
"fmt"
"net/url"
"path"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configgrpc"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/config/configoptional"
)
type SanitizedURLPath string
var _ encoding.TextUnmarshaler = (*SanitizedURLPath)(nil)
func (s *SanitizedURLPath) UnmarshalText(text []byte) error {
u, err := url.Parse(string(text))
if err != nil {
return fmt.Errorf("invalid HTTP URL path set for signal: %w", err)
}
if !path.IsAbs(u.Path) {
u.Path = "/" + u.Path
}
*s = SanitizedURLPath(u.Path)
return nil
}
type HTTPConfig struct {
ServerConfig confighttp.ServerConfig `mapstructure:",squash"`
// The URL path to receive traces on. If omitted "/v1/traces" will be used.
TracesURLPath SanitizedURLPath `mapstructure:"traces_url_path,omitempty"`
// The URL path to receive metrics on. If omitted "/v1/metrics" will be used.
MetricsURLPath SanitizedURLPath `mapstructure:"metrics_url_path,omitempty"`
// The URL path to receive logs on. If omitted "/v1/logs" will be used.
LogsURLPath SanitizedURLPath `mapstructure:"logs_url_path,omitempty"`
// prevent unkeyed literal initialization
_ struct{}
}
// Protocols is the configuration for the supported protocols.
type Protocols struct {
GRPC configoptional.Optional[configgrpc.ServerConfig] `mapstructure:"grpc"`
HTTP configoptional.Optional[HTTPConfig] `mapstructure:"http"`
// prevent unkeyed literal initialization
_ struct{}
}
// Config defines configuration for OTLP receiver.
type Config struct {
// Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON).
Protocols `mapstructure:"protocols"`
// prevent unkeyed literal initialization
_ struct{}
}
var _ component.Config = (*Config)(nil)
// Validate checks the receiver configuration is valid
func (cfg *Config) Validate() error {
if !cfg.GRPC.HasValue() && !cfg.HTTP.HasValue() {
return errors.New("must specify at least one protocol when using the OTLP receiver")
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
)
const (
pbContentType = "application/x-protobuf"
jsonContentType = "application/json"
)
var (
pbEncoder = &protoEncoder{}
jsEncoder = &jsonEncoder{}
)
type encoder interface {
unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error)
unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error)
unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error)
unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error)
marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error)
marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error)
marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error)
marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error)
marshalStatus(rsp *spb.Status) ([]byte, error)
contentType() string
}
type protoEncoder struct{}
func (protoEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
req := ptraceotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
req := plogotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) {
req := pprofileotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) {
return proto.Marshal(resp)
}
func (protoEncoder) contentType() string {
return pbContentType
}
type jsonEncoder struct{}
func (jsonEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
req := ptraceotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
req := plogotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) {
req := pprofileotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) {
return protojson.Marshal(resp)
}
func (jsonEncoder) contentType() string {
return jsonContentType
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configgrpc"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/config/configoptional"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/internal/sharedcomponent"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata"
"go.opentelemetry.io/collector/receiver/xreceiver"
)
const (
defaultTracesURLPath = "/v1/traces"
defaultMetricsURLPath = "/v1/metrics"
defaultLogsURLPath = "/v1/logs"
defaultProfilesURLPath = "/v1development/profiles"
)
// NewFactory creates a new OTLP receiver factory.
func NewFactory() receiver.Factory {
return xreceiver.NewFactory(
metadata.Type,
createDefaultConfig,
xreceiver.WithTraces(createTraces, metadata.TracesStability),
xreceiver.WithMetrics(createMetrics, metadata.MetricsStability),
xreceiver.WithLogs(createLog, metadata.LogsStability),
xreceiver.WithProfiles(createProfiles, metadata.ProfilesStability),
)
}
// createDefaultConfig creates the default configuration for receiver.
func createDefaultConfig() component.Config {
grpcCfg := configgrpc.NewDefaultServerConfig()
grpcCfg.NetAddr.Endpoint = "localhost:4317"
// We almost write 0 bytes, so no need to tune WriteBufferSize.
grpcCfg.ReadBufferSize = 512 * 1024
httpCfg := confighttp.NewDefaultServerConfig()
httpCfg.Endpoint = "localhost:4318"
// For backward compatibility:
httpCfg.TLS = configoptional.None[configtls.ServerConfig]()
httpCfg.WriteTimeout = 0
httpCfg.ReadHeaderTimeout = 0
httpCfg.IdleTimeout = 0
return &Config{
Protocols: Protocols{
GRPC: configoptional.Default(grpcCfg),
HTTP: configoptional.Default(HTTPConfig{
ServerConfig: httpCfg,
TracesURLPath: defaultTracesURLPath,
MetricsURLPath: defaultMetricsURLPath,
LogsURLPath: defaultLogsURLPath,
}),
},
}
}
// createTraces creates a trace receiver based on provided config.
func createTraces(
_ context.Context,
set receiver.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerTraceConsumer(nextConsumer)
return r, nil
}
// createMetrics creates a metrics receiver based on provided config.
func createMetrics(
_ context.Context,
set receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerMetricsConsumer(consumer)
return r, nil
}
// createLog creates a log receiver based on provided config.
func createLog(
_ context.Context,
set receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerLogsConsumer(consumer)
return r, nil
}
// createProfiles creates a trace receiver based on provided config.
func createProfiles(
_ context.Context,
set receiver.Settings,
cfg component.Config,
nextConsumer xconsumer.Profiles,
) (xreceiver.Profiles, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerProfilesConsumer(nextConsumer)
return r, nil
}
// This is the map of already created OTLP receivers for particular configurations.
// We maintain this map because the receiver.Factory is asked trace and metric receivers separately
// when it gets CreateTraces() and CreateMetrics() but they must not
// create separate objects, they must use one otlpReceiver object per configuration.
// When the receiver is shutdown it should be removed from this map so the same configuration
// can be recreated successfully.
var receivers = sharedcomponent.NewMap[*Config, *otlpReceiver]()
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package errors // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
import (
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/consumer/consumererror"
)
func GetStatusFromError(err error) error {
s, ok := status.FromError(err)
if !ok {
// Default to a retryable error
// https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures
code := codes.Unavailable
if consumererror.IsPermanent(err) {
// If an error is permanent but doesn't have an attached gRPC status, assume it is server-side.
code = codes.Internal
}
s = status.New(code, err.Error())
}
return s.Err()
}
func GetHTTPStatusCodeFromStatus(s *status.Status) int {
// See https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures
// to see if a code is retryable.
// See https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures-1
// to see a list of retryable http status codes.
switch s.Code() {
// Retryable
case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss:
return http.StatusServiceUnavailable
// Retryable
case codes.ResourceExhausted:
return http.StatusTooManyRequests
// Not Retryable
case codes.InvalidArgument:
return http.StatusBadRequest
// Not Retryable
case codes.Unauthenticated:
return http.StatusUnauthorized
// Not Retryable
case codes.PermissionDenied:
return http.StatusForbidden
// Not Retryable
case codes.Unimplemented:
return http.StatusNotFound
// Not Retryable
default:
return http.StatusInternalServerError
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logs // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle logs from OpenTelemetry exporters.
type Receiver struct {
plogotlp.UnimplementedGRPCServer
nextConsumer consumer.Logs
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Logs, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export logs func.
func (r *Receiver) Export(ctx context.Context, req plogotlp.ExportRequest) (plogotlp.ExportResponse, error) {
ld := req.Logs()
numSpans := ld.LogRecordCount()
if numSpans == 0 {
return plogotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartLogsOp(ctx)
err := r.nextConsumer.ConsumeLogs(ctx, ld)
r.obsreport.EndLogsOp(ctx, dataFormatProtobuf, numSpans, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return plogotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return plogotlp.NewExportResponse(), nil
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/receiver"
)
// LogsBuilder provides an interface for scrapers to report logs while taking care of all the transformations
// required to produce log representation defined in metadata and user config.
type LogsBuilder struct {
logsBuffer plog.Logs
logRecordsBuffer plog.LogRecordSlice
buildInfo component.BuildInfo // contains version information.
}
// LogBuilderOption applies changes to default logs builder.
type LogBuilderOption interface {
apply(*LogsBuilder)
}
func NewLogsBuilder(settings receiver.Settings) *LogsBuilder {
lb := &LogsBuilder{
logsBuffer: plog.NewLogs(),
logRecordsBuffer: plog.NewLogRecordSlice(),
buildInfo: settings.BuildInfo,
}
return lb
}
// ResourceLogsOption applies changes to provided resource logs.
type ResourceLogsOption interface {
apply(plog.ResourceLogs)
}
type resourceLogsOptionFunc func(plog.ResourceLogs)
func (rlof resourceLogsOptionFunc) apply(rl plog.ResourceLogs) {
rlof(rl)
}
// WithLogsResource sets the provided resource on the emitted ResourceLogs.
// It's recommended to use ResourceBuilder to create the resource.
func WithLogsResource(res pcommon.Resource) ResourceLogsOption {
return resourceLogsOptionFunc(func(rl plog.ResourceLogs) {
res.CopyTo(rl.Resource())
})
}
// AppendLogRecord adds a log record to the logs builder.
func (lb *LogsBuilder) AppendLogRecord(lr plog.LogRecord) {
lr.MoveTo(lb.logRecordsBuffer.AppendEmpty())
}
// EmitForResource saves all the generated logs under a new resource and updates the internal state to be ready for
// recording another set of log records as part of another resource. This function can be helpful when one scraper
// needs to emit logs from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceLogsOption arguments.
func (lb *LogsBuilder) EmitForResource(options ...ResourceLogsOption) {
rl := plog.NewResourceLogs()
ils := rl.ScopeLogs().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(lb.buildInfo.Version)
for _, op := range options {
op.apply(rl)
}
if lb.logRecordsBuffer.Len() > 0 {
lb.logRecordsBuffer.MoveAndAppendTo(ils.LogRecords())
lb.logRecordsBuffer = plog.NewLogRecordSlice()
}
if ils.LogRecords().Len() > 0 {
rl.MoveTo(lb.logsBuffer.ResourceLogs().AppendEmpty())
}
}
// Emit returns all the logs accumulated by the logs builder and updates the internal state to be ready for
// recording another set of logs. This function will be responsible for applying all the transformations required to
// produce logs representation defined in metadata and user config.
func (lb *LogsBuilder) Emit(options ...ResourceLogsOption) plog.Logs {
lb.EmitForResource(options...)
logs := lb.logsBuffer
lb.logsBuffer = plog.NewLogs()
return logs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package metrics // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle metrics from OpenTelemetry exporters.
type Receiver struct {
pmetricotlp.UnimplementedGRPCServer
nextConsumer consumer.Metrics
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Metrics, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export metrics func.
func (r *Receiver) Export(ctx context.Context, req pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) {
md := req.Metrics()
dataPointCount := md.DataPointCount()
if dataPointCount == 0 {
return pmetricotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartMetricsOp(ctx)
err := r.nextConsumer.ConsumeMetrics(ctx, md)
r.obsreport.EndMetricsOp(ctx, dataFormatProtobuf, dataPointCount, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return pmetricotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return pmetricotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package profiles // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
import (
"context"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
)
// Receiver is the type used to handle spans from OpenTelemetry exporters.
type Receiver struct {
pprofileotlp.UnimplementedGRPCServer
nextConsumer xconsumer.Profiles
}
// New creates a new Receiver reference.
func New(nextConsumer xconsumer.Profiles) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
}
}
// Export implements the service Export profiles func.
func (r *Receiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) {
td := req.Profiles()
// We need to ensure that it propagates the receiver name as a tag
numProfiles := td.SampleCount()
if numProfiles == 0 {
return pprofileotlp.NewExportResponse(), nil
}
err := r.nextConsumer.ConsumeProfiles(ctx, td)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return pprofileotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return pprofileotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle spans from OpenTelemetry exporters.
type Receiver struct {
ptraceotlp.UnimplementedGRPCServer
nextConsumer consumer.Traces
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Traces, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export traces func.
func (r *Receiver) Export(ctx context.Context, req ptraceotlp.ExportRequest) (ptraceotlp.ExportResponse, error) {
td := req.Traces()
// We need to ensure that it propagates the receiver name as a tag
numSpans := td.SpanCount()
if numSpans == 0 {
return ptraceotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartTracesOp(ctx)
err := r.nextConsumer.ConsumeTraces(ctx, td)
r.obsreport.EndTracesOp(ctx, dataFormatProtobuf, numSpans, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return ptraceotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return ptraceotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"context"
"errors"
"net"
"net/http"
"sync"
"go.uber.org/zap"
"google.golang.org/grpc"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/internal/telemetry"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
// otlpReceiver is the type that exposes Trace and Metrics reception.
type otlpReceiver struct {
cfg *Config
serverGRPC *grpc.Server
serverHTTP *http.Server
nextTraces consumer.Traces
nextMetrics consumer.Metrics
nextLogs consumer.Logs
nextProfiles xconsumer.Profiles
shutdownWG sync.WaitGroup
obsrepGRPC *receiverhelper.ObsReport
obsrepHTTP *receiverhelper.ObsReport
settings *receiver.Settings
}
// newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's
// responsibility to invoke the respective Start*Reception methods as well
// as the various Stop*Reception methods to end it.
func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) {
set.TelemetrySettings = telemetry.DropInjectedAttributes(set.TelemetrySettings, telemetry.SignalKey)
set.Logger.Debug("created signal-agnostic logger")
r := &otlpReceiver{
cfg: cfg,
nextTraces: nil,
nextMetrics: nil,
nextLogs: nil,
nextProfiles: nil,
settings: set,
}
var err error
r.obsrepGRPC, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: set.ID,
Transport: "grpc",
ReceiverCreateSettings: *set,
})
if err != nil {
return nil, err
}
r.obsrepHTTP, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: set.ID,
Transport: "http",
ReceiverCreateSettings: *set,
})
if err != nil {
return nil, err
}
return r, nil
}
func (r *otlpReceiver) startGRPCServer(ctx context.Context, host component.Host) error {
// If GRPC is not enabled, nothing to start.
if !r.cfg.GRPC.HasValue() {
return nil
}
grpcCfg := r.cfg.GRPC.Get()
var err error
if r.serverGRPC, err = grpcCfg.ToServer(ctx, host, r.settings.TelemetrySettings); err != nil {
return err
}
if r.nextTraces != nil {
ptraceotlp.RegisterGRPCServer(r.serverGRPC, trace.New(r.nextTraces, r.obsrepGRPC))
}
if r.nextMetrics != nil {
pmetricotlp.RegisterGRPCServer(r.serverGRPC, metrics.New(r.nextMetrics, r.obsrepGRPC))
}
if r.nextLogs != nil {
plogotlp.RegisterGRPCServer(r.serverGRPC, logs.New(r.nextLogs, r.obsrepGRPC))
}
if r.nextProfiles != nil {
pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles))
}
var gln net.Listener
if gln, err = grpcCfg.NetAddr.Listen(ctx); err != nil {
return err
}
r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", gln.Addr().String()))
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc))
}
}()
return nil
}
func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) error {
// If HTTP is not enabled, nothing to start.
if !r.cfg.HTTP.HasValue() {
return nil
}
httpCfg := r.cfg.HTTP.Get()
httpMux := http.NewServeMux()
if r.nextTraces != nil {
httpTracesReceiver := trace.New(r.nextTraces, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.TracesURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleTraces(resp, req, httpTracesReceiver)
})
}
if r.nextMetrics != nil {
httpMetricsReceiver := metrics.New(r.nextMetrics, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.MetricsURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleMetrics(resp, req, httpMetricsReceiver)
})
}
if r.nextLogs != nil {
httpLogsReceiver := logs.New(r.nextLogs, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.LogsURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleLogs(resp, req, httpLogsReceiver)
})
}
if r.nextProfiles != nil {
httpProfilesReceiver := profiles.New(r.nextProfiles)
httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) {
handleProfiles(resp, req, httpProfilesReceiver)
})
}
var err error
if r.serverHTTP, err = httpCfg.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil {
return err
}
var hln net.Listener
if hln, err = httpCfg.ServerConfig.ToListener(ctx); err != nil {
return err
}
r.settings.Logger.Info("Starting HTTP server", zap.String("endpoint", hln.Addr().String()))
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Start runs the trace receiver on the gRPC server. Currently
// it also enables the metrics receiver too.
func (r *otlpReceiver) Start(ctx context.Context, host component.Host) error {
if err := r.startGRPCServer(ctx, host); err != nil {
return err
}
if err := r.startHTTPServer(ctx, host); err != nil {
// It's possible that a valid GRPC server configuration was specified,
// but an invalid HTTP configuration. If that's the case, the successfully
// started GRPC server must be shutdown to ensure no goroutines are leaked.
return errors.Join(err, r.Shutdown(ctx))
}
return nil
}
// Shutdown is a method to turn off receiving.
func (r *otlpReceiver) Shutdown(ctx context.Context) error {
var err error
if r.serverHTTP != nil {
err = r.serverHTTP.Shutdown(ctx)
}
if r.serverGRPC != nil {
r.serverGRPC.GracefulStop()
}
r.shutdownWG.Wait()
return err
}
func (r *otlpReceiver) registerTraceConsumer(tc consumer.Traces) {
r.nextTraces = tc
}
func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) {
r.nextMetrics = mc
}
func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) {
r.nextLogs = lc
}
func (r *otlpReceiver) registerProfilesConsumer(tc xconsumer.Profiles) {
r.nextProfiles = tc
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"fmt"
"io"
"mime"
"net/http"
"strconv"
"time"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/internal/statusutil"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
)
// Pre-computed status with code=Internal to be used in case of a marshaling error.
var fallbackMsg = []byte(`{"code": 13, "message": "failed to marshal error message"}`)
const fallbackContentType = "application/json"
func handleTraces(resp http.ResponseWriter, req *http.Request, tracesReceiver *trace.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalTracesRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := tracesReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalTracesResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleMetrics(resp http.ResponseWriter, req *http.Request, metricsReceiver *metrics.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalMetricsRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := metricsReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalMetricsResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleLogs(resp http.ResponseWriter, req *http.Request, logsReceiver *logs.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalLogsRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := logsReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalLogsResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceiver *profiles.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalProfilesRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := profilesReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalProfilesResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) {
if req.Method != http.MethodPost {
handleUnmatchedMethod(resp)
return nil, false
}
switch getMimeTypeFromContentType(req.Header.Get("Content-Type")) {
case pbContentType:
return pbEncoder, true
case jsonContentType:
return jsEncoder, true
default:
handleUnmatchedContentType(resp)
return nil, false
}
}
func readAndCloseBody(resp http.ResponseWriter, req *http.Request, enc encoder) ([]byte, bool) {
body, err := io.ReadAll(req.Body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return nil, false
}
if err = req.Body.Close(); err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return nil, false
}
return body, true
}
// writeError encodes the HTTP error inside a rpc.Status message as required by the OTLP protocol.
func writeError(w http.ResponseWriter, encoder encoder, err error, statusCode int) {
s, ok := status.FromError(err)
if ok {
statusCode = errors.GetHTTPStatusCodeFromStatus(s)
} else {
s = statusutil.NewStatusFromMsgAndHTTPCode(err.Error(), statusCode)
}
writeStatusResponse(w, encoder, statusCode, s)
}
// errorHandler encodes the HTTP error message inside a rpc.Status message as required
// by the OTLP protocol.
func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) {
s := statusutil.NewStatusFromMsgAndHTTPCode(errMsg, statusCode)
contentType := r.Header.Get("Content-Type")
if contentType == "" {
contentType = fallbackContentType
}
switch getMimeTypeFromContentType(contentType) {
case pbContentType:
writeStatusResponse(w, pbEncoder, statusCode, s)
return
case jsonContentType:
writeStatusResponse(w, jsEncoder, statusCode, s)
return
}
writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg)
}
func writeStatusResponse(w http.ResponseWriter, enc encoder, statusCode int, st *status.Status) {
// https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#otlphttp-throttling
if statusCode == http.StatusTooManyRequests || statusCode == http.StatusServiceUnavailable {
retryInfo := statusutil.GetRetryInfo(st)
// Check if server returned throttling information.
if retryInfo != nil {
// We are throttled. Wait before retrying as requested by the server.
// The value of Retry-After field can be either an HTTP-date or a number of
// seconds to delay after the response is received. See https://datatracker.ietf.org/doc/html/rfc7231#section-7.1.3
//
// Retry-After = HTTP-date / delay-seconds
//
// Use delay-seconds since is easier to format as well as does not require clock synchronization.
w.Header().Set("Retry-After", strconv.FormatInt(int64(retryInfo.GetRetryDelay().AsDuration()/time.Second), 10))
}
}
msg, err := enc.marshalStatus(st.Proto())
if err != nil {
writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg)
return
}
writeResponse(w, enc.contentType(), statusCode, msg)
}
func writeResponse(w http.ResponseWriter, contentType string, statusCode int, msg []byte) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(statusCode)
// Nothing we can do with the error if we cannot write to the response.
_, _ = w.Write(msg)
}
func getMimeTypeFromContentType(contentType string) string {
mediatype, _, err := mime.ParseMediaType(contentType)
if err != nil {
return ""
}
return mediatype
}
func handleUnmatchedMethod(resp http.ResponseWriter) {
hst := http.StatusMethodNotAllowed
writeResponse(resp, "text/plain", hst, fmt.Appendf(nil, "%v method not allowed, supported: [POST]", hst))
}
func handleUnmatchedContentType(resp http.ResponseWriter) {
hst := http.StatusUnsupportedMediaType
writeResponse(resp, "text/plain", hst, fmt.Appendf(nil, "%v unsupported media type, supported: [%s, %s]", hst, jsonContentType, pbContentType))
}