// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
)
type tracesGroup struct {
traces ptrace.Traces
resourceHashes [][16]byte
}
func newTracesGroup() *tracesGroup {
return &tracesGroup{traces: ptrace.NewTraces()}
}
// findOrCreateResource searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (tg *tracesGroup) findOrCreateResourceSpans(originResource pcommon.Resource, requiredAttributes pcommon.Map) ptrace.ResourceSpans {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rss := tg.traces.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
if tg.resourceHashes[i] == referenceResourceHash {
return rss.At(i)
}
}
rs := tg.traces.ResourceSpans().AppendEmpty()
referenceResource.MoveTo(rs.Resource())
tg.resourceHashes = append(tg.resourceHashes, referenceResourceHash)
return rs
}
type metricsGroup struct {
metrics pmetric.Metrics
resourceHashes [][16]byte
}
func newMetricsGroup() *metricsGroup {
return &metricsGroup{metrics: pmetric.NewMetrics()}
}
// findOrCreateResourceMetrics searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resource, requiredAttributes pcommon.Map) pmetric.ResourceMetrics {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rms := mg.metrics.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
if mg.resourceHashes[i] == referenceResourceHash {
return rms.At(i)
}
}
rm := mg.metrics.ResourceMetrics().AppendEmpty()
referenceResource.MoveTo(rm.Resource())
mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash)
return rm
}
type logsGroup struct {
logs plog.Logs
resourceHashes [][16]byte
}
// newLogsGroup returns new logsGroup with predefined capacity
func newLogsGroup() *logsGroup {
return &logsGroup{logs: plog.NewLogs()}
}
// findOrCreateResourceLogs searches for a Resource with matching attributes and returns it. If nothing is found, it is being created
func (lg *logsGroup) findOrCreateResourceLogs(originResource pcommon.Resource, requiredAttributes pcommon.Map) plog.ResourceLogs {
referenceResource := buildReferenceResource(originResource, requiredAttributes)
referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes())
rls := lg.logs.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
if lg.resourceHashes[i] == referenceResourceHash {
return rls.At(i)
}
}
rl := lg.logs.ResourceLogs().AppendEmpty()
referenceResource.MoveTo(rl.Resource())
lg.resourceHashes = append(lg.resourceHashes, referenceResourceHash)
return rl
}
func instrumentationLibrariesEqual(il1, il2 pcommon.InstrumentationScope) bool {
return il1.Name() == il2.Name() && il1.Version() == il2.Version()
}
// matchingScopeSpans searches for a ptrace.ScopeSpans instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.InstrumentationScope) ptrace.ScopeSpans {
ilss := rl.ScopeSpans()
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
if instrumentationLibrariesEqual(ils.Scope(), library) {
return ils
}
}
ils := ilss.AppendEmpty()
library.CopyTo(ils.Scope())
return ils
}
// matchingScopeLogs searches for a plog.ScopeLogs instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScope) plog.ScopeLogs {
ills := rl.ScopeLogs()
for i := 0; i < ills.Len(); i++ {
sl := ills.At(i)
if instrumentationLibrariesEqual(sl.Scope(), library) {
return sl
}
}
sl := ills.AppendEmpty()
library.CopyTo(sl.Scope())
return sl
}
// matchingScopeMetrics searches for a pmetric.ScopeMetrics instance matching
// given InstrumentationScope. If nothing is found, it creates a new one
func matchingScopeMetrics(rm pmetric.ResourceMetrics, library pcommon.InstrumentationScope) pmetric.ScopeMetrics {
ilms := rm.ScopeMetrics()
for i := 0; i < ilms.Len(); i++ {
ilm := ilms.At(i)
if instrumentationLibrariesEqual(ilm.Scope(), library) {
return ilm
}
}
ilm := ilms.AppendEmpty()
library.CopyTo(ilm.Scope())
return ilm
}
// buildReferenceResource returns a new resource that we'll be looking for in existing Resources
// as a merge of the Attributes of the original Resource with the requested Attributes.
func buildReferenceResource(originResource pcommon.Resource, requiredAttributes pcommon.Map) pcommon.Resource {
referenceResource := pcommon.NewResource()
originResource.Attributes().CopyTo(referenceResource.Attributes())
requiredAttributes.Range(func(k string, v pcommon.Value) bool {
v.CopyTo(referenceResource.Attributes().PutEmpty(k))
return true
})
return referenceResource
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata"
)
var consumerCapabilities = consumer.Capabilities{MutatesData: true}
// NewFactory returns a new factory for the Filter processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability))
}
// createDefaultConfig creates the default configuration for the processor.
func createDefaultConfig() component.Config {
return &Config{
GroupByKeys: []string{},
}
}
func createGroupByAttrsProcessor(set processor.Settings, attributes []string) (*groupByAttrsProcessor, error) {
var nonEmptyAttributes []string
presentAttributes := make(map[string]struct{})
for _, str := range attributes {
if str != "" {
_, isPresent := presentAttributes[str]
if isPresent {
set.Logger.Warn("A grouping key is already present", zap.String("key", str))
} else {
nonEmptyAttributes = append(nonEmptyAttributes, str)
presentAttributes[str] = struct{}{}
}
}
}
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
return &groupByAttrsProcessor{logger: set.Logger, groupByKeys: nonEmptyAttributes, telemetryBuilder: telemetryBuilder}, nil
}
// createTracesProcessor creates a trace processor based on this config.
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
gap.processTraces,
processorhelper.WithCapabilities(consumerCapabilities))
}
// createLogsProcessor creates a logs processor based on this config.
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
gap.processLogs,
processorhelper.WithCapabilities(consumerCapabilities))
}
// createMetricsProcessor creates a metrics processor based on this config.
func createMetricsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Metrics,
) (processor.Metrics, error) {
oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
return nil, err
}
return processorhelper.NewMetrics(
ctx,
set,
cfg,
nextConsumer,
gap.processMetrics,
processorhelper.WithCapabilities(consumerCapabilities))
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorGroupbyattrsLogGroups metric.Int64Histogram
ProcessorGroupbyattrsMetricGroups metric.Int64Histogram
ProcessorGroupbyattrsNumGroupedLogs metric.Int64Counter
ProcessorGroupbyattrsNumGroupedMetrics metric.Int64Counter
ProcessorGroupbyattrsNumGroupedSpans metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedLogs metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedMetrics metric.Int64Counter
ProcessorGroupbyattrsNumNonGroupedSpans metric.Int64Counter
ProcessorGroupbyattrsSpanGroups metric.Int64Histogram
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorGroupbyattrsLogGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_log_groups",
metric.WithDescription("Distribution of groups extracted for logs"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsMetricGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_metric_groups",
metric.WithDescription("Distribution of groups extracted for metrics"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedLogs, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_logs",
metric.WithDescription("Number of logs that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedMetrics, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_metrics",
metric.WithDescription("Number of metrics that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumGroupedSpans, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_grouped_spans",
metric.WithDescription("Number of spans that had attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedLogs, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_logs",
metric.WithDescription("Number of logs that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedMetrics, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_metrics",
metric.WithDescription("Number of metrics that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsNumNonGroupedSpans, err = builder.meter.Int64Counter(
"otelcol_processor_groupbyattrs_num_non_grouped_spans",
metric.WithDescription("Number of spans that did not have attributes grouped"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorGroupbyattrsSpanGroups, err = builder.meter.Int64Histogram(
"otelcol_processor_groupbyattrs_span_groups",
metric.WithDescription("Distribution of groups extracted for spans"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Code generated by mdatagen. DO NOT EDIT.
package metadatatest
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processortest"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
)
// Deprecated: [v0.119.0] Use componenttest.Telemetry
type Telemetry struct {
*componenttest.Telemetry
}
// Deprecated: [v0.119.0] Use componenttest.NewTelemetry
func SetupTelemetry(opts ...componenttest.TelemetryOption) Telemetry {
return Telemetry{Telemetry: componenttest.NewTelemetry(opts...)}
}
// Deprecated: [v0.119.0] Use metadatatest.NewSettings
func (tt *Telemetry) NewSettings() processor.Settings {
return NewSettings(tt.Telemetry)
}
func NewSettings(tt *componenttest.Telemetry) processor.Settings {
set := processortest.NewNopSettings()
set.ID = component.NewID(component.MustNewType("groupbyattrs"))
set.TelemetrySettings = tt.NewTelemetrySettings()
return set
}
// Deprecated: [v0.119.0] Use metadatatest.AssertEqual*
func (tt *Telemetry) AssertMetrics(t *testing.T, expected []metricdata.Metrics, opts ...metricdatatest.Option) {
var md metricdata.ResourceMetrics
require.NoError(t, tt.Reader.Collect(context.Background(), &md))
// ensure all required metrics are present
for _, want := range expected {
got := getMetricFromResource(want.Name, md)
metricdatatest.AssertEqual(t, want, got, opts...)
}
// ensure no additional metrics are emitted
require.Equal(t, len(expected), lenMetrics(md))
}
func AssertEqualProcessorGroupbyattrsLogGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_log_groups",
Description: "Distribution of groups extracted for logs",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_log_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsMetricGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_metric_groups",
Description: "Distribution of groups extracted for metrics",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_metric_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedLogs(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_logs",
Description: "Number of logs that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_logs")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedMetrics(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_metrics",
Description: "Number of metrics that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_metrics")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumGroupedSpans(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_grouped_spans",
Description: "Number of spans that had attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_grouped_spans")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedLogs(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_logs",
Description: "Number of logs that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_logs")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedMetrics(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_metrics",
Description: "Number of metrics that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_metrics")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsNumNonGroupedSpans(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.DataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_num_non_grouped_spans",
Description: "Number of spans that did not have attributes grouped",
Unit: "1",
Data: metricdata.Sum[int64]{
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_num_non_grouped_spans")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func AssertEqualProcessorGroupbyattrsSpanGroups(t *testing.T, tt *componenttest.Telemetry, dps []metricdata.HistogramDataPoint[int64], opts ...metricdatatest.Option) {
want := metricdata.Metrics{
Name: "otelcol_processor_groupbyattrs_span_groups",
Description: "Distribution of groups extracted for spans",
Unit: "1",
Data: metricdata.Histogram[int64]{
Temporality: metricdata.CumulativeTemporality,
DataPoints: dps,
},
}
got, err := tt.GetMetric("otelcol_processor_groupbyattrs_span_groups")
require.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, opts...)
}
func getMetricFromResource(name string, got metricdata.ResourceMetrics) metricdata.Metrics {
for _, sm := range got.ScopeMetrics {
for _, m := range sm.Metrics {
if m.Name == name {
return m
}
}
}
return metricdata.Metrics{}
}
func lenMetrics(got metricdata.ResourceMetrics) int {
metricsCount := 0
for _, sm := range got.ScopeMetrics {
metricsCount += len(sm.Metrics)
}
return metricsCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata"
)
type groupByAttrsProcessor struct {
logger *zap.Logger
groupByKeys []string
telemetryBuilder *metadata.TelemetryBuilder
}
// ProcessTraces process traces and groups traces by attribute.
func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
rss := td.ResourceSpans()
tg := newTracesGroup()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ilss := rs.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
ils := ilss.At(j)
for k := 0; k < ils.Spans().Len(); k++ {
span := ils.Spans().At(k)
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(span.Attributes())
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedSpans.Add(ctx, 1)
// Some attributes are going to be moved from span to resource level,
// so we can delete those on the record level
deleteAttributes(requiredAttributes, span.Attributes())
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedSpans.Add(ctx, 1)
}
// Lets combine the base resource attributes + the extracted (grouped) attributes
// and keep them in the grouping entry
groupedResourceSpans := tg.findOrCreateResourceSpans(rs.Resource(), requiredAttributes)
sp := matchingScopeSpans(groupedResourceSpans, ils.Scope()).Spans().AppendEmpty()
span.CopyTo(sp)
}
}
}
// Copy the grouped data into output
gap.telemetryBuilder.ProcessorGroupbyattrsSpanGroups.Record(ctx, int64(tg.traces.ResourceSpans().Len()))
return tg.traces, nil
}
func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) {
rl := ld.ResourceLogs()
lg := newLogsGroup()
for i := 0; i < rl.Len(); i++ {
ls := rl.At(i)
ills := ls.ScopeLogs()
for j := 0; j < ills.Len(); j++ {
sl := ills.At(j)
for k := 0; k < sl.LogRecords().Len(); k++ {
log := sl.LogRecords().At(k)
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(log.Attributes())
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedLogs.Add(ctx, 1)
// Some attributes are going to be moved from log record to resource level,
// so we can delete those on the record level
deleteAttributes(requiredAttributes, log.Attributes())
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedLogs.Add(ctx, 1)
}
// Lets combine the base resource attributes + the extracted (grouped) attributes
// and keep them in the grouping entry
groupedResourceLogs := lg.findOrCreateResourceLogs(ls.Resource(), requiredAttributes)
lr := matchingScopeLogs(groupedResourceLogs, sl.Scope()).LogRecords().AppendEmpty()
log.CopyTo(lr)
}
}
}
// Copy the grouped data into output
gap.telemetryBuilder.ProcessorGroupbyattrsLogGroups.Record(ctx, int64(lg.logs.ResourceLogs().Len()))
return lg.logs, nil
}
func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) {
rms := md.ResourceMetrics()
mg := newMetricsGroup()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
for k := 0; k < ilm.Metrics().Len(); k++ {
metric := ilm.Metrics().At(k)
//exhaustive:enforce
switch metric.Type() {
case pmetric.MetricTypeGauge:
for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Gauge().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Gauge().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeSum:
for pointIndex := 0; pointIndex < metric.Sum().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Sum().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Sum().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeSummary:
for pointIndex := 0; pointIndex < metric.Summary().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Summary().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Summary().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeHistogram:
for pointIndex := 0; pointIndex < metric.Histogram().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Histogram().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.Histogram().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeExponentialHistogram:
for pointIndex := 0; pointIndex < metric.ExponentialHistogram().DataPoints().Len(); pointIndex++ {
dataPoint := metric.ExponentialHistogram().DataPoints().At(pointIndex)
groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes())
dataPoint.CopyTo(groupedMetric.ExponentialHistogram().DataPoints().AppendEmpty())
}
case pmetric.MetricTypeEmpty:
}
}
}
}
gap.telemetryBuilder.ProcessorGroupbyattrsMetricGroups.Record(ctx, int64(mg.metrics.ResourceMetrics().Len()))
return mg.metrics, nil
}
func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) {
attrsForRemoval.Range(func(key string, _ pcommon.Value) bool {
targetAttrs.Remove(key)
return true
})
}
// extractGroupingAttributes extracts the keys and values of the specified Attributes
// that match with the attributes keys that is used for grouping
// Returns:
// - whether any attribute matched (true) or none (false)
// - the extracted AttributeMap of matching keys and their corresponding values
func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) {
groupingAttributes := pcommon.NewMap()
foundMatch := false
for _, attrKey := range gap.groupByKeys {
attrVal, found := attrMap.Get(attrKey)
if found {
attrVal.CopyTo(groupingAttributes.PutEmpty(attrKey))
foundMatch = true
}
}
return foundMatch, groupingAttributes
}
// Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it.
func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric {
// Loop through all metrics and try to find the one that matches with the one we search for
// (name and type)
for i := 0; i < ilm.Metrics().Len(); i++ {
metric := ilm.Metrics().At(i)
if metric.Name() == searchedMetric.Name() && metric.Type() == searchedMetric.Type() {
return metric
}
}
// We're here, which means that we haven't found our metric, so we need to create a new one, with the same name and type
metric := ilm.Metrics().AppendEmpty()
metric.SetDescription(searchedMetric.Description())
metric.SetName(searchedMetric.Name())
metric.SetUnit(searchedMetric.Unit())
searchedMetric.Metadata().CopyTo(metric.Metadata())
// Move other special type specific values
//exhaustive:enforce
switch searchedMetric.Type() {
case pmetric.MetricTypeHistogram:
metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality())
case pmetric.MetricTypeExponentialHistogram:
metric.SetEmptyExponentialHistogram().SetAggregationTemporality(searchedMetric.ExponentialHistogram().AggregationTemporality())
case pmetric.MetricTypeSum:
metric.SetEmptySum().SetAggregationTemporality(searchedMetric.Sum().AggregationTemporality())
metric.Sum().SetIsMonotonic(searchedMetric.Sum().IsMonotonic())
case pmetric.MetricTypeGauge:
metric.SetEmptyGauge()
case pmetric.MetricTypeSummary:
metric.SetEmptySummary()
case pmetric.MetricTypeEmpty:
}
return metric
}
// Returns the Metric in the appropriate Resource matching with the specified Attributes
func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes(
ctx context.Context,
mg *metricsGroup,
originResourceMetrics pmetric.ResourceMetrics,
ilm pmetric.ScopeMetrics,
metric pmetric.Metric,
attributes pcommon.Map,
) pmetric.Metric {
toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes)
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1)
// These attributes are going to be moved from datapoint to resource level,
// so we can delete those on the datapoint
deleteAttributes(requiredAttributes, attributes)
} else {
gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedMetrics.Add(ctx, 1)
}
// Get the ResourceMetrics matching with these attributes
groupedResourceMetrics := mg.findOrCreateResourceMetrics(originResourceMetrics.Resource(), requiredAttributes)
// Get the corresponding instrumentation library
groupedInstrumentationLibrary := matchingScopeMetrics(groupedResourceMetrics, ilm.Scope())
// Return the metric in this resource
return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package logdedupprocessor provides a processor that counts logs as metrics.
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"errors"
"fmt"
"strings"
"time"
"go.opentelemetry.io/collector/component"
)
// Config defaults
const (
// defaultInterval is the default export interval.
defaultInterval = 10 * time.Second
// defaultLogCountAttribute is the default log count attribute
defaultLogCountAttribute = "log_count"
// defaultTimezone is the default timezone
defaultTimezone = "UTC"
// bodyField is the name of the body field
bodyField = "body"
// attributeField is the name of the attribute field
attributeField = "attributes"
)
// Config errors
var (
errInvalidLogCountAttribute = errors.New("log_count_attribute must be set")
errInvalidInterval = errors.New("interval must be greater than 0")
errCannotExcludeBody = errors.New("cannot exclude the entire body")
errCannotIncludeBody = errors.New("cannot include the entire body")
)
// Config is the config of the processor.
type Config struct {
LogCountAttribute string `mapstructure:"log_count_attribute"`
Interval time.Duration `mapstructure:"interval"`
Timezone string `mapstructure:"timezone"`
ExcludeFields []string `mapstructure:"exclude_fields"`
IncludeFields []string `mapstructure:"include_fields"`
Conditions []string `mapstructure:"conditions"`
}
// createDefaultConfig returns the default config for the processor.
func createDefaultConfig() component.Config {
return &Config{
LogCountAttribute: defaultLogCountAttribute,
Interval: defaultInterval,
Timezone: defaultTimezone,
ExcludeFields: []string{},
IncludeFields: []string{},
Conditions: []string{},
}
}
// Validate validates the configuration
func (c Config) Validate() error {
if c.Interval <= 0 {
return errInvalidInterval
}
if c.LogCountAttribute == "" {
return errInvalidLogCountAttribute
}
_, err := time.LoadLocation(c.Timezone)
if err != nil {
return fmt.Errorf("timezone is invalid: %w", err)
}
if len(c.ExcludeFields) > 0 && len(c.IncludeFields) > 0 {
return errors.New("cannot define both exclude_fields and include_fields")
}
if err = c.validateExcludeFields(); err != nil {
return err
}
if err = c.validateIncludeFields(); err != nil {
return err
}
return nil
}
// validateExcludeFields validates that all the exclude fields
func (c Config) validateExcludeFields() error {
knownExcludeFields := make(map[string]struct{})
for _, field := range c.ExcludeFields {
// Special check to make sure the entire body is not excluded
if field == bodyField {
return errCannotExcludeBody
}
// Split and ensure the field starts with `body` or `attributes`
parts := strings.Split(field, fieldDelimiter)
if parts[0] != bodyField && parts[0] != attributeField {
return fmt.Errorf("an excludefield must start with %s or %s", bodyField, attributeField)
}
// If a field is valid make sure we haven't already seen it
if _, ok := knownExcludeFields[field]; ok {
return fmt.Errorf("duplicate exclude_field %s", field)
}
knownExcludeFields[field] = struct{}{}
}
return nil
}
// validateIncludeFields validates that all the exclude fields
func (c Config) validateIncludeFields() error {
knownFields := make(map[string]struct{})
for _, field := range c.IncludeFields {
// Special check to make sure the entire body is not included
if field == bodyField {
return errCannotIncludeBody
}
// Split and ensure the field starts with `body` or `attributes`
parts := strings.Split(field, fieldDelimiter)
if parts[0] != bodyField && parts[0] != attributeField {
return fmt.Errorf("an include_fields must start with %s or %s", bodyField, attributeField)
}
// If a field is valid make sure we haven't already seen it
if _, ok := knownFields[field]; ok {
return fmt.Errorf("duplicate include_fields %s", field)
}
knownFields[field] = struct{}{}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// Attributes names for first and last observed timestamps
const (
firstObservedTSAttr = "first_observed_timestamp"
lastObservedTSAttr = "last_observed_timestamp"
)
// timeNow can be reassigned for testing
var timeNow = time.Now
// logAggregator tracks the number of times a specific logRecord has been seen.
type logAggregator struct {
resources map[uint64]*resourceAggregator
logCountAttribute string
timezone *time.Location
telemetryBuilder *metadata.TelemetryBuilder
dedupFields []string
}
// newLogAggregator creates a new LogCounter.
func newLogAggregator(logCountAttribute string, timezone *time.Location, telemetryBuilder *metadata.TelemetryBuilder, dedupFields []string) *logAggregator {
return &logAggregator{
resources: make(map[uint64]*resourceAggregator),
logCountAttribute: logCountAttribute,
timezone: timezone,
telemetryBuilder: telemetryBuilder,
dedupFields: dedupFields,
}
}
// Export exports the counter as a Logs
func (l *logAggregator) Export(ctx context.Context) plog.Logs {
logs := plog.NewLogs()
for _, resourceAggregator := range l.resources {
rl := logs.ResourceLogs().AppendEmpty()
resourceAggregator.resource.CopyTo(rl.Resource())
for _, scopeAggregator := range resourceAggregator.scopeCounters {
sl := rl.ScopeLogs().AppendEmpty()
scopeAggregator.scope.CopyTo(sl.Scope())
for _, logAggregator := range scopeAggregator.logCounters {
// Record aggregated logs records
l.telemetryBuilder.DedupProcessorAggregatedLogs.Record(ctx, logAggregator.count)
lr := sl.LogRecords().AppendEmpty()
logAggregator.logRecord.CopyTo(lr)
// Set log record timestamps
lr.SetTimestamp(pcommon.NewTimestampFromTime(timeNow()))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(logAggregator.firstObservedTimestamp))
// Add attributes for log count and first/last observed timestamps
lr.Attributes().EnsureCapacity(lr.Attributes().Len() + 3)
lr.Attributes().PutInt(l.logCountAttribute, logAggregator.count)
firstTimestampStr := logAggregator.firstObservedTimestamp.In(l.timezone).Format(time.RFC3339)
lr.Attributes().PutStr(firstObservedTSAttr, firstTimestampStr)
lastTimestampStr := logAggregator.lastObservedTimestamp.In(l.timezone).Format(time.RFC3339)
lr.Attributes().PutStr(lastObservedTSAttr, lastTimestampStr)
}
}
}
return logs
}
// Add adds the logRecord to the resource aggregator that is identified by the resource attributes
func (l *logAggregator) Add(resource pcommon.Resource, scope pcommon.InstrumentationScope, logRecord plog.LogRecord) {
key := getResourceKey(resource)
resourceAggregator, ok := l.resources[key]
if !ok {
resourceAggregator = newResourceAggregator(resource, l.dedupFields)
l.resources[key] = resourceAggregator
}
resourceAggregator.Add(scope, logRecord)
}
// Reset resets the counter.
func (l *logAggregator) Reset() {
l.resources = make(map[uint64]*resourceAggregator)
}
// resourceAggregator dimensions the counter by resource.
type resourceAggregator struct {
resource pcommon.Resource
scopeCounters map[uint64]*scopeAggregator
dedupFields []string
}
// newResourceAggregator creates a new ResourceCounter.
func newResourceAggregator(resource pcommon.Resource, dedupFields []string) *resourceAggregator {
return &resourceAggregator{
resource: resource,
scopeCounters: make(map[uint64]*scopeAggregator),
dedupFields: dedupFields,
}
}
// Add increments the counter that the logRecord matches.
func (r *resourceAggregator) Add(scope pcommon.InstrumentationScope, logRecord plog.LogRecord) {
key := getScopeKey(scope)
scopeAggregator, ok := r.scopeCounters[key]
if !ok {
scopeAggregator = newScopeAggregator(scope, r.dedupFields)
r.scopeCounters[key] = scopeAggregator
}
scopeAggregator.Add(logRecord)
}
// scopeAggregator dimensions the counter by scope.
type scopeAggregator struct {
scope pcommon.InstrumentationScope
logCounters map[uint64]*logCounter
dedupFields []string
}
// newScopeAggregator creates a new ScopeCounter.
func newScopeAggregator(scope pcommon.InstrumentationScope, dedupFields []string) *scopeAggregator {
return &scopeAggregator{
scope: scope,
logCounters: make(map[uint64]*logCounter),
dedupFields: dedupFields,
}
}
// Add increments the counter that the logRecord matches.
func (s *scopeAggregator) Add(logRecord plog.LogRecord) {
key := getLogKey(logRecord, s.dedupFields)
lc, ok := s.logCounters[key]
if !ok {
lc = newLogCounter(logRecord)
s.logCounters[key] = lc
}
lc.Increment()
}
// logCounter is a counter for a log record.
type logCounter struct {
logRecord plog.LogRecord
firstObservedTimestamp time.Time
lastObservedTimestamp time.Time
count int64
}
// newLogCounter creates a new AttributeCounter.
func newLogCounter(logRecord plog.LogRecord) *logCounter {
return &logCounter{
logRecord: logRecord,
count: 0,
firstObservedTimestamp: timeNow().UTC(),
lastObservedTimestamp: timeNow().UTC(),
}
}
// Increment increments the counter.
func (a *logCounter) Increment() {
a.lastObservedTimestamp = timeNow().UTC()
a.count++
}
// getResourceKey creates a unique hash for the resource to use as a map key
func getResourceKey(resource pcommon.Resource) uint64 {
return pdatautil.Hash64(
pdatautil.WithMap(resource.Attributes()),
)
}
// getScopeKey creates a unique hash for the scope to use as a map key
func getScopeKey(scope pcommon.InstrumentationScope) uint64 {
return pdatautil.Hash64(
pdatautil.WithMap(scope.Attributes()),
pdatautil.WithString(scope.Name()),
pdatautil.WithString(scope.Version()),
)
}
// getLogKey creates a unique hash for the log record to use as a map key.
// If dedupFields is non-empty, it is used to determine the fields whose values are hashed.
func getLogKey(logRecord plog.LogRecord, dedupFields []string) uint64 {
if len(dedupFields) > 0 {
var opts []pdatautil.HashOption
for _, field := range dedupFields {
parts := splitField(field)
var m pcommon.Map
switch parts[0] {
case bodyField:
if logRecord.Body().Type() == pcommon.ValueTypeMap {
m = logRecord.Body().Map()
}
case attributeField:
m = logRecord.Attributes()
}
value, ok := getKeyValue(m, parts[1:])
if ok {
opts = append(opts, pdatautil.WithString(value.AsString()))
}
}
if len(opts) > 0 {
return pdatautil.Hash64(opts...)
}
}
return pdatautil.Hash64(
pdatautil.WithMap(logRecord.Attributes()),
pdatautil.WithValue(logRecord.Body()),
pdatautil.WithString(logRecord.SeverityNumber().String()),
pdatautil.WithString(logRecord.SeverityText()),
)
}
func getKeyValue(valueMap pcommon.Map, keyParts []string) (pcommon.Value, bool) {
nextKeyPart, remainingParts := keyParts[0], keyParts[1:]
// Look for the value associated with the next key part.
// If we don't find it then return
value, ok := valueMap.Get(nextKeyPart)
if !ok {
return pcommon.NewValueEmpty(), false
}
// No more key parts that means we have found the value
if len(remainingParts) == 0 {
return valueMap.Get(nextKeyPart)
}
// If the value is a map then recurse through with the remaining parts
if value.Type() == pcommon.ValueTypeMap {
return getKeyValue(value.Map(), remainingParts)
}
return pcommon.NewValueEmpty(), false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// NewFactory creates a new factory for the processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
)
}
// createLogsProcessor creates a log processor.
func createLogsProcessor(_ context.Context, settings processor.Settings, cfg component.Config, consumer consumer.Logs) (processor.Logs, error) {
processorCfg, ok := cfg.(*Config)
if !ok {
return nil, fmt.Errorf("invalid config type: %+v", cfg)
}
if err := processorCfg.Validate(); err != nil {
return nil, err
}
processor, err := newProcessor(processorCfg, consumer, settings)
if err != nil {
return nil, fmt.Errorf("error creating processor: %w", err)
}
if len(processorCfg.Conditions) == 0 {
processor.conditions = nil
} else {
conditions, err := filterottl.NewBoolExprForLog(
processorCfg.Conditions,
filterottl.StandardLogFuncs(),
ottl.PropagateError,
settings.TelemetrySettings,
)
if err != nil {
return nil, fmt.Errorf("invalid condition: %w", err)
}
processor.conditions = conditions
}
return processor, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"fmt"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
)
const (
// fieldDelimiter is the delimiter used to split a field key into its parts.
fieldDelimiter = "."
// fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimiters while splitting a field key.
fieldEscapeKeyReplacement = "{TEMP_REPLACE}"
)
// fieldRemover handles removing excluded fields from log records
type fieldRemover struct {
fields []*field
}
// field represents a field and it's compound key to match on
type field struct {
keyParts []string
}
// newFieldRemover creates a new field remover based on the passed in field keys
func newFieldRemover(fieldKeys []string) *fieldRemover {
fe := &fieldRemover{
fields: make([]*field, 0, len(fieldKeys)),
}
for _, f := range fieldKeys {
fe.fields = append(fe.fields, &field{
keyParts: splitField(f),
})
}
return fe
}
// RemoveFields removes any body or attribute fields that match in the log record
func (fe *fieldRemover) RemoveFields(logRecord plog.LogRecord) {
for _, field := range fe.fields {
field.removeField(logRecord)
}
}
// removeField removes the field from the log record if it exists
func (f *field) removeField(logRecord plog.LogRecord) {
firstPart, remainingParts := f.keyParts[0], f.keyParts[1:]
switch firstPart {
case bodyField:
// If body is a map then recurse through to remove the field
if logRecord.Body().Type() == pcommon.ValueTypeMap {
removeFieldFromMap(logRecord.Body().Map(), remainingParts)
}
case attributeField:
// Remove all attributes
if len(remainingParts) == 0 {
logRecord.Attributes().Clear()
return
}
// Recurse through map and remove fields
removeFieldFromMap(logRecord.Attributes(), remainingParts)
}
}
// removeFieldFromMap recurses through the map and removes the field if it's found.
func removeFieldFromMap(valueMap pcommon.Map, keyParts []string) {
nextKeyPart, remainingParts := keyParts[0], keyParts[1:]
// Look for the value associated with the next key part.
// If we don't find it then return
value, ok := valueMap.Get(nextKeyPart)
if !ok {
return
}
// No more key parts that means we have found the value and remove it
if len(remainingParts) == 0 {
valueMap.Remove(nextKeyPart)
return
}
// If the value is a map then recurse through with the remaining parts
if value.Type() == pcommon.ValueTypeMap {
removeFieldFromMap(value.Map(), remainingParts)
}
}
// splitField splits a field key into its parts.
// It replaces escaped delimiters with the full delimiter after splitting.
func splitField(fieldKey string) []string {
escapedKey := strings.ReplaceAll(fieldKey, fmt.Sprintf("\\%s", fieldDelimiter), fieldEscapeKeyReplacement)
keyParts := strings.Split(escapedKey, fieldDelimiter)
// Replace the temporarily escaped delimiters with the actual delimiter.
for i := range keyParts {
keyParts[i] = strings.ReplaceAll(keyParts[i], fieldEscapeKeyReplacement, fieldDelimiter)
}
return keyParts
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configtelemetry"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/noop"
"go.opentelemetry.io/otel/trace"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
DedupProcessorAggregatedLogs metric.Int64Histogram
level configtelemetry.Level
}
// telemetryBuilderOption applies changes to default builder.
type telemetryBuilderOption func(*TelemetryBuilder)
// WithLevel sets the current telemetry level for the component.
func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption {
return func(builder *TelemetryBuilder) {
builder.level = lvl
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{level: configtelemetry.LevelBasic}
for _, op := range options {
op(&builder)
}
var err, errs error
if builder.level >= configtelemetry.LevelBasic {
builder.meter = Meter(settings)
} else {
builder.meter = noop.Meter{}
}
builder.DedupProcessorAggregatedLogs, err = builder.meter.Int64Histogram(
"otelcol_dedup_processor_aggregated_logs",
metric.WithDescription("Number of log records that were aggregated together."),
metric.WithUnit("{records}"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor"
import (
"context"
"fmt"
"sync"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/processor"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata"
)
// logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs.
type logDedupProcessor struct {
emitInterval time.Duration
conditions *ottl.ConditionSequence[ottllog.TransformContext]
aggregator *logAggregator
remover *fieldRemover
nextConsumer consumer.Logs
logger *zap.Logger
cancel context.CancelFunc
wg sync.WaitGroup
mux sync.Mutex
}
func newProcessor(cfg *Config, nextConsumer consumer.Logs, settings processor.Settings) (*logDedupProcessor, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(settings.TelemetrySettings)
if err != nil {
return nil, fmt.Errorf("failed to create telemetry builder: %w", err)
}
// This should not happen due to config validation but we check anyways.
timezone, err := time.LoadLocation(cfg.Timezone)
if err != nil {
return nil, fmt.Errorf("invalid timezone: %w", err)
}
return &logDedupProcessor{
emitInterval: cfg.Interval,
aggregator: newLogAggregator(cfg.LogCountAttribute, timezone, telemetryBuilder, cfg.IncludeFields),
remover: newFieldRemover(cfg.ExcludeFields),
nextConsumer: nextConsumer,
logger: settings.Logger,
}, nil
}
// Start starts the processor.
func (p *logDedupProcessor) Start(ctx context.Context, _ component.Host) error {
ctx, cancel := context.WithCancel(ctx)
p.cancel = cancel
p.wg.Add(1)
go p.handleExportInterval(ctx)
return nil
}
// Capabilities returns the consumer's capabilities.
func (p *logDedupProcessor) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: true}
}
// Shutdown stops the processor.
func (p *logDedupProcessor) Shutdown(_ context.Context) error {
if p.cancel != nil {
// Call cancel to stop the export interval goroutine and wait for it to finish.
p.cancel()
p.wg.Wait()
}
return nil
}
// ConsumeLogs processes the logs.
func (p *logDedupProcessor) ConsumeLogs(ctx context.Context, pl plog.Logs) error {
p.mux.Lock()
defer p.mux.Unlock()
for i := 0; i < pl.ResourceLogs().Len(); i++ {
rl := pl.ResourceLogs().At(i)
resource := rl.Resource()
for j := 0; j < rl.ScopeLogs().Len(); j++ {
sl := rl.ScopeLogs().At(j)
scope := sl.Scope()
logs := sl.LogRecords()
logs.RemoveIf(func(logRecord plog.LogRecord) bool {
if p.conditions == nil {
p.aggregateLog(logRecord, scope, resource)
return true
}
logCtx := ottllog.NewTransformContext(logRecord, scope, resource, sl, rl)
logMatch, err := p.conditions.Eval(ctx, logCtx)
if err != nil {
p.logger.Error("error matching conditions", zap.Error(err))
return false
}
if logMatch {
p.aggregateLog(logRecord, scope, resource)
}
return logMatch
})
}
}
// immediately consume any logs that didn't match any conditions
if pl.LogRecordCount() > 0 {
err := p.nextConsumer.ConsumeLogs(ctx, pl)
if err != nil {
p.logger.Error("failed to consume logs", zap.Error(err))
}
}
return nil
}
func (p *logDedupProcessor) aggregateLog(logRecord plog.LogRecord, scope pcommon.InstrumentationScope, resource pcommon.Resource) {
p.remover.RemoveFields(logRecord)
p.aggregator.Add(resource, scope, logRecord)
}
// handleExportInterval sends metrics at the configured interval.
func (p *logDedupProcessor) handleExportInterval(ctx context.Context) {
defer p.wg.Done()
ticker := time.NewTicker(p.emitInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
// Export any remaining logs
p.exportLogs(ctx)
if err := ctx.Err(); err != context.Canceled {
p.logger.Error("context error", zap.Error(err))
}
return
case <-ticker.C:
p.exportLogs(ctx)
}
}
}
// exportLogs exports the logs to the next consumer.
func (p *logDedupProcessor) exportLogs(ctx context.Context) {
p.mux.Lock()
defer p.mux.Unlock()
logs := p.aggregator.Export(ctx)
// Only send logs if we have some
if logs.LogRecordCount() > 0 {
err := p.nextConsumer.ConsumeLogs(ctx, logs)
if err != nil {
p.logger.Error("failed to consume logs", zap.Error(err))
}
}
p.aggregator.Reset()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"fmt"
"math"
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
)
type AttributeSource string
const (
traceIDAttributeSource = AttributeSource("traceID")
recordAttributeSource = AttributeSource("record")
defaultAttributeSource = traceIDAttributeSource
)
var validAttributeSource = map[AttributeSource]bool{
traceIDAttributeSource: true,
recordAttributeSource: true,
}
// Config has the configuration guiding the sampler processor.
type Config struct {
// SamplingPercentage is the percentage rate at which traces or logs are going to be sampled. Defaults to zero, i.e.: no sample.
// Values greater or equal 100 are treated as "sample all traces/logs".
SamplingPercentage float32 `mapstructure:"sampling_percentage"`
// HashSeed allows one to configure the hashing seed. This is important in scenarios where multiple layers of collectors
// have different sampling rates: if they use the same seed all passing one layer may pass the other even if they have
// different sampling rates, configuring different seeds avoids that.
HashSeed uint32 `mapstructure:"hash_seed"`
// Mode selects the sampling behavior. Supported values:
//
// - "hash_seed": the legacy behavior of this processor.
// Using an FNV hash combined with the HashSeed value, this
// sampler performs a non-consistent probabilistic
// downsampling. The number of spans output is expected to
// equal SamplingPercentage (as a ratio) times the number of
// spans inpout, assuming good behavior from FNV and good
// entropy in the hashed attributes or TraceID.
//
// - "equalizing": Using an OTel-specified consistent sampling
// mechanism, this sampler selectively reduces the effective
// sampling probability of arriving spans. This can be
// useful to select a small fraction of complete traces from
// a stream with mixed sampling rates. The rate of spans
// passing through depends on how much sampling has already
// been applied. If an arriving span was head sampled at
// the same probability it passes through. If the span
// arrives with lower probability, a warning is logged
// because it means this sampler is configured with too
// large a sampling probability to ensure complete traces.
//
// - "proportional": Using an OTel-specified consistent sampling
// mechanism, this sampler reduces the effective sampling
// probability of each span by `SamplingProbability`.
Mode SamplerMode `mapstructure:"mode"`
// FailClosed indicates to not sample data (the processor will
// fail "closed") in case of error, such as failure to parse
// the tracestate field or missing the randomness attribute.
//
// By default, failure cases are sampled (the processor is
// fails "open"). Sampling priority-based decisions are made after
// FailClosed is processed, making it possible to sample
// despite errors using priority.
FailClosed bool `mapstructure:"fail_closed"`
// SamplingPrecision is how many hex digits of sampling
// threshold will be encoded, from 1 up to 14. Default is 4.
// 0 is treated as full precision.
SamplingPrecision int `mapstructure:"sampling_precision"`
///////
// Logs only fields below.
// AttributeSource (logs only) defines where to look for the attribute in from_attribute. The allowed values are
// `traceID` or `record`. Default is `traceID`.
AttributeSource `mapstructure:"attribute_source"`
// FromAttribute (logs only) The optional name of a log record attribute used for sampling purposes, such as a
// unique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`.
FromAttribute string `mapstructure:"from_attribute"`
// SamplingPriority (logs only) enables using a log record attribute as the sampling priority of the log record.
SamplingPriority string `mapstructure:"sampling_priority"`
}
var _ component.Config = (*Config)(nil)
// Validate checks if the processor configuration is valid
func (cfg *Config) Validate() error {
pct := float64(cfg.SamplingPercentage)
if math.IsInf(pct, 0) || math.IsNaN(pct) {
return fmt.Errorf("sampling rate is invalid: %f%%", cfg.SamplingPercentage)
}
ratio := pct / 100.0
switch {
case ratio < 0:
return fmt.Errorf("sampling rate is negative: %f%%", cfg.SamplingPercentage)
case ratio == 0:
// Special case
case ratio < sampling.MinSamplingProbability:
// Too-small case
return fmt.Errorf("sampling rate is too small: %g%%", cfg.SamplingPercentage)
default:
// Note that ratio > 1 is specifically allowed by the README, taken to mean 100%
}
if cfg.AttributeSource != "" && !validAttributeSource[cfg.AttributeSource] {
return fmt.Errorf("invalid attribute source: %v. Expected: %v or %v", cfg.AttributeSource, traceIDAttributeSource, recordAttributeSource)
}
if cfg.SamplingPrecision == 0 {
return fmt.Errorf("invalid sampling precision: 0")
} else if cfg.SamplingPrecision > sampling.NumHexDigits {
return fmt.Errorf("sampling precision is too great, should be <= 14: %d", cfg.SamplingPrecision)
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate mdatagen metadata.yaml
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
// The default precision is 4 hex digits, slightly more the original
// component logic's 14-bits of precision.
const defaultPrecision = 4
// NewFactory returns a new factory for the Probabilistic sampler processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability))
}
func createDefaultConfig() component.Config {
return &Config{
AttributeSource: defaultAttributeSource,
FailClosed: true,
Mode: modeUnset,
SamplingPrecision: defaultPrecision,
}
}
// createTracesProcessor creates a trace processor based on this config.
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
return newTracesProcessor(ctx, set, cfg.(*Config), nextConsumer)
}
// createLogsProcessor creates a log processor based on this config.
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
return newLogsProcessor(ctx, set, nextConsumer, cfg.(*Config))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"encoding/binary"
"hash/fnv"
)
// computeHash creates a hash using the FNV-1a algorithm
func computeHash(b []byte, seed uint32) uint32 {
hash := fnv.New32a()
// the implementation fnv.Write() does not return an error, see hash/fnv/fnv.go
_, _ = hash.Write(i32tob(seed))
_, _ = hash.Write(b)
return hash.Sum32()
}
// i32tob converts a seed to a byte array to be used as part of fnv.Write()
func i32tob(val uint32) []byte {
r := make([]byte, 4)
binary.LittleEndian.PutUint32(r, val)
return r
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorProbabilisticSamplerCountLogsSampled metric.Int64Counter
ProcessorProbabilisticSamplerCountTracesSampled metric.Int64Counter
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorProbabilisticSamplerCountLogsSampled, err = builder.meter.Int64Counter(
"otelcol_processor_probabilistic_sampler_count_logs_sampled",
metric.WithDescription("Count of logs that were sampled or not"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
builder.ProcessorProbabilisticSamplerCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_probabilistic_sampler_count_traces_sampled",
metric.WithDescription("Count of traces that were sampled or not"),
metric.WithUnit("1"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"errors"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
type logsProcessor struct {
sampler dataSampler
samplingPriority string
precision int
failClosed bool
logger *zap.Logger
telemetryBuilder *metadata.TelemetryBuilder
}
type recordCarrier struct {
record plog.LogRecord
parsed struct {
tvalue string
threshold sampling.Threshold
rvalue string
randomness sampling.Randomness
}
}
var _ samplingCarrier = &recordCarrier{}
func (rc *recordCarrier) get(key string) string {
val, ok := rc.record.Attributes().Get(key)
if !ok || val.Type() != pcommon.ValueTypeStr {
return ""
}
return val.Str()
}
func newLogRecordCarrier(l plog.LogRecord) (samplingCarrier, error) {
var ret error
carrier := &recordCarrier{
record: l,
}
if tvalue := carrier.get("sampling.threshold"); len(tvalue) != 0 {
th, err := sampling.TValueToThreshold(tvalue)
if err != nil {
ret = errors.Join(err, ret)
} else {
carrier.parsed.tvalue = tvalue
carrier.parsed.threshold = th
}
}
if rvalue := carrier.get("sampling.randomness"); len(rvalue) != 0 {
rnd, err := sampling.RValueToRandomness(rvalue)
if err != nil {
ret = errors.Join(err, ret)
} else {
carrier.parsed.rvalue = rvalue
carrier.parsed.randomness = rnd
}
}
return carrier, ret
}
func (rc *recordCarrier) threshold() (sampling.Threshold, bool) {
return rc.parsed.threshold, len(rc.parsed.tvalue) != 0
}
func (rc *recordCarrier) explicitRandomness() (randomnessNamer, bool) {
if len(rc.parsed.rvalue) == 0 {
return newMissingRandomnessMethod(), false
}
return newSamplingRandomnessMethod(rc.parsed.randomness), true
}
func (rc *recordCarrier) updateThreshold(th sampling.Threshold) error {
exist, has := rc.threshold()
if has && sampling.ThresholdLessThan(th, exist) {
return sampling.ErrInconsistentSampling
}
rc.record.Attributes().PutStr("sampling.threshold", th.TValue())
return nil
}
func (rc *recordCarrier) setExplicitRandomness(rnd randomnessNamer) {
rc.parsed.randomness = rnd.randomness()
rc.parsed.rvalue = rnd.randomness().RValue()
rc.record.Attributes().PutStr("sampling.randomness", rnd.randomness().RValue())
}
func (rc *recordCarrier) clearThreshold() {
rc.parsed.threshold = sampling.NeverSampleThreshold
rc.parsed.tvalue = ""
rc.record.Attributes().Remove("sampling.threshold")
}
func (rc *recordCarrier) reserialize() error {
return nil
}
func (*neverSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
// We return a fake randomness value, since it will not be used.
// This avoids a consistency check error for missing randomness.
lrc, err := newLogRecordCarrier(logRec)
return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), lrc, err
}
// randomnessFromLogRecord (hashingSampler) uses a hash function over
// the TraceID or logs attribute source.
func (th *hashingSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
rnd := newMissingRandomnessMethod()
lrc, err := newLogRecordCarrier(logRec)
if th.logsTraceIDEnabled {
value := logRec.TraceID()
if !value.IsEmpty() {
rnd = newTraceIDHashingMethod(randomnessFromBytes(value[:], th.hashSeed))
}
}
if isMissing(rnd) && th.logsRandomnessSourceAttribute != "" {
if value, ok := logRec.Attributes().Get(th.logsRandomnessSourceAttribute); ok {
by := getBytesFromValue(value)
if len(by) > 0 {
rnd = newAttributeHashingMethod(
th.logsRandomnessSourceAttribute,
randomnessFromBytes(by, th.hashSeed),
)
}
}
}
if err != nil {
// The sampling.randomness or sampling.threshold attributes
// had a parse error, in this case.
lrc = nil
} else if _, hasRnd := lrc.explicitRandomness(); hasRnd {
// If the log record contains a randomness value, do not update.
err = ErrRandomnessInUse
lrc = nil
} else if _, hasTh := lrc.threshold(); hasTh {
// If the log record contains a threshold value, do not update.
err = ErrThresholdInUse
lrc = nil
} else if !isMissing(rnd) {
// When no sampling information is already present and we have
// calculated new randomness, add it to the record.
lrc.setExplicitRandomness(rnd)
}
return rnd, lrc, err
}
// randomnessFromLogRecord (hashingSampler) uses OTEP 235 semantic
// conventions basing its decision only on the TraceID.
func (ctc *consistentTracestateCommon) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) {
lrc, err := newLogRecordCarrier(logRec)
rnd := newMissingRandomnessMethod()
if err != nil {
// Parse error in sampling.randomness or sampling.threshold
lrc = nil
} else if rv, hasRnd := lrc.explicitRandomness(); hasRnd {
rnd = rv
} else if tid := logRec.TraceID(); !tid.IsEmpty() {
rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(tid))
}
return rnd, lrc, err
}
// newLogsProcessor returns a processor.LogsProcessor that will perform head sampling according to the given
// configuration.
func newLogsProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Logs, cfg *Config) (processor.Logs, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
lsp := &logsProcessor{
sampler: makeSampler(cfg, true),
samplingPriority: cfg.SamplingPriority,
precision: cfg.SamplingPrecision,
failClosed: cfg.FailClosed,
logger: set.Logger,
telemetryBuilder: telemetryBuilder,
}
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
lsp.processLogs,
processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}))
}
func (lsp *logsProcessor) processLogs(ctx context.Context, logsData plog.Logs) (plog.Logs, error) {
logsData.ResourceLogs().RemoveIf(func(rl plog.ResourceLogs) bool {
rl.ScopeLogs().RemoveIf(func(ill plog.ScopeLogs) bool {
ill.LogRecords().RemoveIf(func(l plog.LogRecord) bool {
return !commonShouldSampleLogic(
ctx,
l,
lsp.sampler,
lsp.failClosed,
lsp.sampler.randomnessFromLogRecord,
lsp.priorityFunc,
"logs sampler",
lsp.logger,
lsp.telemetryBuilder.ProcessorProbabilisticSamplerCountLogsSampled,
)
})
// Filter out empty ScopeLogs
return ill.LogRecords().Len() == 0
})
// Filter out empty ResourceLogs
return rl.ScopeLogs().Len() == 0
})
if logsData.ResourceLogs().Len() == 0 {
return logsData, processorhelper.ErrSkipProcessingData
}
return logsData, nil
}
func (lsp *logsProcessor) priorityFunc(logRec plog.LogRecord, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) {
// Note: in logs, unlike traces, the sampling priority
// attribute is interpreted as a request to be sampled.
if lsp.samplingPriority != "" {
priorityThreshold := lsp.logRecordToPriorityThreshold(logRec)
if priorityThreshold == sampling.NeverSampleThreshold {
threshold = priorityThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
} else if sampling.ThresholdLessThan(priorityThreshold, threshold) {
threshold = priorityThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
}
}
return rnd, threshold
}
func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sampling.Threshold {
if localPriority, ok := logRec.Attributes().Get(lsp.samplingPriority); ok {
// Potentially raise the sampling probability to minProb
minProb := 0.0
switch localPriority.Type() {
case pcommon.ValueTypeDouble:
minProb = localPriority.Double() / 100.0
case pcommon.ValueTypeInt:
minProb = float64(localPriority.Int()) / 100.0
}
if minProb != 0 {
if th, err := sampling.ProbabilityToThresholdWithPrecision(minProb, lsp.precision); err == nil {
// The record has supplied a valid alternative sampling probability
return th
}
}
}
return sampling.NeverSampleThreshold
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"errors"
"fmt"
"strconv"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
)
const (
// These four can happen at runtime and be returned by
// randomnessFromXXX()
ErrInconsistentArrivingTValue samplerError = "inconsistent arriving threshold: item should not have been sampled"
ErrMissingRandomness samplerError = "missing randomness"
ErrRandomnessInUse samplerError = "item has sampling randomness, equalizing or proportional mode recommended"
ErrThresholdInUse samplerError = "item has sampling threshold, equalizing or proportional mode recommended"
)
const (
// Hashing method: The constants below help translate user friendly percentages
// to numbers direct used in sampling.
numHashBucketsLg2 = 14
numHashBuckets = 0x4000 // Using a power of 2 to avoid division.
bitMaskHashBuckets = numHashBuckets - 1
percentageScaleFactor = numHashBuckets / 100.0
)
// samplerErrors are conditions reported by the sampler that are somewhat
// ordinary and should log as info-level.
type samplerError string
var _ error = samplerError("")
func (s samplerError) Error() string {
return string(s)
}
// SamplerMode determines which of several modes is used for the
// sampling decision.
type SamplerMode string
const (
// HashSeed applies the hash/fnv hash function originally used in this component.
HashSeed SamplerMode = "hash_seed"
// Equalizing uses OpenTelemetry consistent probability
// sampling information (OTEP 235), applies an absolute
// threshold to equalize incoming sampling probabilities.
Equalizing SamplerMode = "equalizing"
// Proportional uses OpenTelemetry consistent probability
// sampling information (OTEP 235), multiplies incoming
// sampling probabilities.
Proportional SamplerMode = "proportional"
// defaultHashSeed is applied when the mode is unset.
defaultMode SamplerMode = HashSeed
// modeUnset indicates the user has not configured the mode.
modeUnset SamplerMode = ""
)
type randomnessNamer interface {
randomness() sampling.Randomness
policyName() string
}
type randomnessMethod sampling.Randomness
func (rm randomnessMethod) randomness() sampling.Randomness {
return sampling.Randomness(rm)
}
type (
traceIDHashingMethod struct{ randomnessMethod }
traceIDW3CSpecMethod struct{ randomnessMethod }
samplingRandomnessMethod struct{ randomnessMethod }
samplingPriorityMethod struct{ randomnessMethod }
)
type missingRandomnessMethod struct{}
func (rm missingRandomnessMethod) randomness() sampling.Randomness {
return sampling.AllProbabilitiesRandomness
}
func (missingRandomnessMethod) policyName() string {
return "missing_randomness"
}
type attributeHashingMethod struct {
randomnessMethod
attribute string
}
func (am attributeHashingMethod) policyName() string {
return am.attribute
}
func (traceIDHashingMethod) policyName() string {
return "trace_id_hash"
}
func (samplingRandomnessMethod) policyName() string {
return "sampling_randomness"
}
func (traceIDW3CSpecMethod) policyName() string {
return "trace_id_w3c"
}
func (samplingPriorityMethod) policyName() string {
return "sampling_priority"
}
var (
_ randomnessNamer = missingRandomnessMethod{}
_ randomnessNamer = traceIDHashingMethod{}
_ randomnessNamer = traceIDW3CSpecMethod{}
_ randomnessNamer = samplingRandomnessMethod{}
_ randomnessNamer = samplingPriorityMethod{}
)
func newMissingRandomnessMethod() randomnessNamer {
return missingRandomnessMethod{}
}
func isMissing(rnd randomnessNamer) bool {
_, ok := rnd.(missingRandomnessMethod)
return ok
}
func newSamplingRandomnessMethod(rnd sampling.Randomness) randomnessNamer {
return samplingRandomnessMethod{randomnessMethod(rnd)}
}
func newTraceIDW3CSpecMethod(rnd sampling.Randomness) randomnessNamer {
return traceIDW3CSpecMethod{randomnessMethod(rnd)}
}
func newTraceIDHashingMethod(rnd sampling.Randomness) randomnessNamer {
return traceIDHashingMethod{randomnessMethod(rnd)}
}
func newSamplingPriorityMethod(rnd sampling.Randomness) randomnessNamer {
return samplingPriorityMethod{randomnessMethod(rnd)}
}
func newAttributeHashingMethod(attribute string, rnd sampling.Randomness) randomnessNamer {
return attributeHashingMethod{
randomnessMethod: randomnessMethod(rnd),
attribute: attribute,
}
}
// samplingCarrier conveys information about the underlying data item
// (whether span or log record) through the sampling decision.
type samplingCarrier interface {
// explicitRandomness returns a randomness value and a boolean
// indicating whether the item had sampling randomness
// explicitly set.
explicitRandomness() (randomnessNamer, bool)
// setExplicitRandomness updates the item with the signal-specific
// encoding for an explicit randomness value.
setExplicitRandomness(randomnessNamer)
// clearThreshold unsets a sampling threshold, which is used to
// clear information that breaks the expected sampling invariants
// described in OTEP 235.
clearThreshold()
// threshold returns a sampling threshold and a boolean
// indicating whether the item had sampling threshold
// explicitly set.
threshold() (sampling.Threshold, bool)
// updateThreshold modifies the sampling threshold. This
// returns an error if the updated sampling threshold has a
// lower adjusted account; the only permissible updates raise
// adjusted count (i.e., reduce sampling probability).
updateThreshold(sampling.Threshold) error
// reserialize re-encodes the updated sampling information
// into the item, if necessary. For Spans, this re-encodes
// the tracestate. This is a no-op for logs records.
reserialize() error
}
// dataSampler implements the logic of a sampling mode.
type dataSampler interface {
// decide reports the result based on a probabilistic decision.
decide(carrier samplingCarrier) sampling.Threshold
// randomnessFromSpan extracts randomness and returns a carrier specific to traces data.
randomnessFromSpan(s ptrace.Span) (randomness randomnessNamer, carrier samplingCarrier, err error)
// randomnessFromLogRecord extracts randomness and returns a carrier specific to logs data.
randomnessFromLogRecord(s plog.LogRecord) (randomness randomnessNamer, carrier samplingCarrier, err error)
}
func (sm *SamplerMode) UnmarshalText(in []byte) error {
switch mode := SamplerMode(in); mode {
case HashSeed,
Equalizing,
Proportional,
modeUnset:
*sm = mode
return nil
default:
return fmt.Errorf("unsupported sampler mode %q", mode)
}
}
// hashingSampler is the original hash-based calculation. It is an
// equalizing sampler with randomness calculation that matches the
// original implementation. This hash-based implementation is limited
// to 14 bits of precision.
type hashingSampler struct {
hashSeed uint32
tvalueThreshold sampling.Threshold
// Logs only: name of attribute to obtain randomness
logsRandomnessSourceAttribute string
// Logs only: name of attribute to obtain randomness
logsTraceIDEnabled bool
}
func (th *hashingSampler) decide(_ samplingCarrier) sampling.Threshold {
return th.tvalueThreshold
}
// consistentTracestateCommon contains the common aspects of the
// Proportional and Equalizing sampler modes. These samplers sample
// using the TraceID and do not support use of logs source attribute.
type consistentTracestateCommon struct{}
// neverSampler always decides false.
type neverSampler struct{}
func (*neverSampler) decide(_ samplingCarrier) sampling.Threshold {
return sampling.NeverSampleThreshold
}
// equalizingSampler raises thresholds up to a fixed value.
type equalizingSampler struct {
// TraceID-randomness-based calculation
tvalueThreshold sampling.Threshold
consistentTracestateCommon
}
func (te *equalizingSampler) decide(carrier samplingCarrier) sampling.Threshold {
if tv, has := carrier.threshold(); has && sampling.ThresholdLessThan(te.tvalueThreshold, tv) {
return tv
}
return te.tvalueThreshold
}
// proportionalSampler raises thresholds relative to incoming value.
type proportionalSampler struct {
// ratio in the range [2**-56, 1]
ratio float64
// precision is the precision in number of hex digits
precision int
consistentTracestateCommon
}
func (tp *proportionalSampler) decide(carrier samplingCarrier) sampling.Threshold {
incoming := 1.0
if tv, has := carrier.threshold(); has {
incoming = tv.Probability()
}
// There is a potential here for the product probability to
// underflow, which is checked here.
threshold, err := sampling.ProbabilityToThresholdWithPrecision(incoming*tp.ratio, tp.precision)
// Check the only known error condition.
if errors.Is(err, sampling.ErrProbabilityRange) {
// Considered valid, a case where the sampling probability
// has fallen below the minimum supported value and simply
// becomes unsampled.
return sampling.NeverSampleThreshold
}
return threshold
}
func getBytesFromValue(value pcommon.Value) []byte {
if value.Type() == pcommon.ValueTypeBytes {
return value.Bytes().AsRaw()
}
return []byte(value.AsString())
}
func randomnessFromBytes(b []byte, hashSeed uint32) sampling.Randomness {
hashed32 := computeHash(b, hashSeed)
hashed := uint64(hashed32 & bitMaskHashBuckets)
// Ordinarily, hashed is compared against an acceptance
// threshold i.e., sampled when hashed < scaledSamplerate,
// which has the form R < T with T in [1, 2^14] and
// R in [0, 2^14-1].
//
// Here, modify R to R' and T to T', so that the sampling
// equation has identical form to the specification, i.e., T'
// <= R', using:
//
// T' = numHashBuckets-T
// R' = numHashBuckets-1-R
//
// As a result, R' has the correct most-significant 14 bits to
// use in an R-value.
rprime14 := numHashBuckets - 1 - hashed
// There are 18 unused bits from the FNV hash function.
unused18 := uint64(hashed32 >> (32 - numHashBucketsLg2))
mixed28 := unused18 ^ (unused18 << 10)
// The 56 bit quantity here consists of, most- to least-significant:
// - 14 bits: R' = numHashBuckets - 1 - hashed
// - 28 bits: mixture of unused 18 bits
// - 14 bits: original `hashed`.
rnd56 := (rprime14 << 42) | (mixed28 << 14) | hashed
// Note: by construction:
// - OTel samplers make the same probabilistic decision with this r-value,
// - only 14 out of 56 bits are used in the sampling decision,
// - there are only 32 actual random bits.
rnd, _ := sampling.UnsignedToRandomness(rnd56)
return rnd
}
func consistencyCheck(rnd randomnessNamer, carrier samplingCarrier) error {
// Without randomness, do not check the threshold.
if isMissing(rnd) {
return ErrMissingRandomness
}
// When the carrier is nil, it means there was trouble parsing the
// tracestate or trace-related attributes. In this case, skip the
// consistency check.
if carrier == nil {
return nil
}
// Consistency check: if the TraceID is out of range, the
// TValue is a lie. If inconsistent, clear it and return an error.
if tv, has := carrier.threshold(); has {
if !tv.ShouldSample(rnd.randomness()) {
// In case we fail open, the threshold is cleared as
// recommended in the OTel spec.
carrier.clearThreshold()
return ErrInconsistentArrivingTValue
}
}
return nil
}
// makeSample constructs a sampler. There are no errors, as the only
// potential error, out-of-range probability, is corrected automatically
// according to the README, which allows percents >100 to equal 100%.
//
// Extending this logic, we round very small probabilities up to the
// minimum supported value(s) which varies according to sampler mode.
func makeSampler(cfg *Config, isLogs bool) dataSampler {
// README allows percents >100 to equal 100%.
pct := cfg.SamplingPercentage
if pct > 100 {
pct = 100
}
mode := cfg.Mode
if mode == modeUnset {
// Reasons to choose the legacy behavior include:
// (a) having set the hash seed
// (b) logs signal w/o trace ID source
if cfg.HashSeed != 0 || (isLogs && cfg.AttributeSource != traceIDAttributeSource) {
mode = HashSeed
} else {
mode = defaultMode
}
}
if pct == 0 {
return &neverSampler{}
}
// Note: Convert to float64 before dividing by 100, otherwise loss of precision.
// If the probability is too small, round it up to the minimum.
ratio := float64(pct) / 100
// Like the pct > 100 test above, but for values too small to
// express in 14 bits of precision.
if ratio < sampling.MinSamplingProbability {
ratio = sampling.MinSamplingProbability
}
switch mode {
case Equalizing:
// The error case below is ignored, we have rounded the probability so
// that it is in-range
threshold, _ := sampling.ProbabilityToThresholdWithPrecision(ratio, cfg.SamplingPrecision)
return &equalizingSampler{
tvalueThreshold: threshold,
}
case Proportional:
return &proportionalSampler{
ratio: ratio,
precision: cfg.SamplingPrecision,
}
default: // i.e., HashSeed
// Note: the original hash function used in this code
// is preserved to ensure consistency across updates.
//
// uint32(pct * percentageScaleFactor)
//
// (a) carried out the multiplication in 32-bit precision
// (b) rounded to zero instead of nearest.
scaledSamplerate := uint32(pct * percentageScaleFactor)
if scaledSamplerate == 0 {
return &neverSampler{}
}
// Convert the accept threshold to a reject threshold,
// then shift it into 56-bit value.
reject := numHashBuckets - scaledSamplerate
reject56 := uint64(reject) << 42
threshold, _ := sampling.UnsignedToThreshold(reject56)
return &hashingSampler{
tvalueThreshold: threshold,
hashSeed: cfg.HashSeed,
// Logs specific:
logsTraceIDEnabled: cfg.AttributeSource == traceIDAttributeSource,
logsRandomnessSourceAttribute: cfg.FromAttribute,
}
}
}
// randFunc returns randomness (w/ named policy), a carrier, and the error.
type randFunc[T any] func(T) (randomnessNamer, samplingCarrier, error)
// priorityFunc makes changes resulting from sampling priority.
type priorityFunc[T any] func(T, randomnessNamer, sampling.Threshold) (randomnessNamer, sampling.Threshold)
// commonShouldSampleLogic implements sampling on a per-item basis
// independent of the signal type, as embodied in the functional
// parameters:
func commonShouldSampleLogic[T any](
ctx context.Context,
item T,
sampler dataSampler,
failClosed bool,
randFunc randFunc[T],
priorityFunc priorityFunc[T],
description string,
logger *zap.Logger,
counter metric.Int64Counter,
) bool {
rnd, carrier, err := randFunc(item)
if err == nil {
err = consistencyCheck(rnd, carrier)
}
var threshold sampling.Threshold
if err != nil {
var se samplerError
if errors.As(err, &se) {
logger.Debug(description, zap.Error(err))
} else {
logger.Info(description, zap.Error(err))
}
if failClosed {
threshold = sampling.NeverSampleThreshold
} else {
threshold = sampling.AlwaysSampleThreshold
}
} else {
threshold = sampler.decide(carrier)
}
rnd, threshold = priorityFunc(item, rnd, threshold)
sampled := threshold.ShouldSample(rnd.randomness())
if sampled && carrier != nil {
// Note: updateThreshold limits loss of adjusted count, by
// preventing the threshold from being lowered, only allowing
// probability to fall and never to rise.
if err := carrier.updateThreshold(threshold); err != nil {
if errors.Is(err, sampling.ErrInconsistentSampling) {
// This is working-as-intended. You can't lower
// the threshold, it's illogical.
logger.Debug(description, zap.Error(err))
} else {
logger.Info(description, zap.Error(err))
}
}
if err := carrier.reserialize(); err != nil {
logger.Info(description, zap.Error(err))
}
}
counter.Add(ctx, 1, metric.WithAttributes(attribute.String("policy", rnd.policyName()), attribute.String("sampled", strconv.FormatBool(sampled))))
return sampled
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor"
import (
"context"
"strconv"
"strings"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata"
)
// samplingPriority has the semantic result of parsing the "sampling.priority"
// attribute per OpenTracing semantic conventions.
type samplingPriority int
const (
// deferDecision means that the decision if a span will be "sampled" (ie.:
// forwarded by the collector) is made by hashing the trace ID according
// to the configured sampling rate.
deferDecision samplingPriority = iota
// mustSampleSpan indicates that the span had a "sampling.priority" attribute
// greater than zero and it is going to be sampled, ie.: forwarded by the
// collector.
mustSampleSpan
// doNotSampleSpan indicates that the span had a "sampling.priority" attribute
// equal zero and it is NOT going to be sampled, ie.: it won't be forwarded
// by the collector.
doNotSampleSpan
)
type traceProcessor struct {
sampler dataSampler
failClosed bool
logger *zap.Logger
telemetryBuilder *metadata.TelemetryBuilder
}
// tracestateCarrier conveys information about sampled spans between
// the call to parse incoming randomness/threshold and the call to
// decide.
type tracestateCarrier struct {
span ptrace.Span
sampling.W3CTraceState
}
var _ samplingCarrier = &tracestateCarrier{}
func newTracestateCarrier(s ptrace.Span) (samplingCarrier, error) {
var err error
tsc := &tracestateCarrier{
span: s,
}
tsc.W3CTraceState, err = sampling.NewW3CTraceState(s.TraceState().AsRaw())
return tsc, err
}
func (tc *tracestateCarrier) threshold() (sampling.Threshold, bool) {
return tc.W3CTraceState.OTelValue().TValueThreshold()
}
func (tc *tracestateCarrier) explicitRandomness() (randomnessNamer, bool) {
rnd, ok := tc.W3CTraceState.OTelValue().RValueRandomness()
if !ok {
return newMissingRandomnessMethod(), false
}
return newSamplingRandomnessMethod(rnd), true
}
func (tc *tracestateCarrier) updateThreshold(th sampling.Threshold) error {
return tc.W3CTraceState.OTelValue().UpdateTValueWithSampling(th)
}
func (tc *tracestateCarrier) setExplicitRandomness(rnd randomnessNamer) {
tc.W3CTraceState.OTelValue().SetRValue(rnd.randomness())
}
func (tc *tracestateCarrier) clearThreshold() {
tc.W3CTraceState.OTelValue().ClearTValue()
}
func (tc *tracestateCarrier) reserialize() error {
var w strings.Builder
err := tc.W3CTraceState.Serialize(&w)
if err == nil {
tc.span.TraceState().FromRaw(w.String())
}
return err
}
// newTracesProcessor returns a processor.TracesProcessor that will
// perform intermediate span sampling according to the given
// configuration.
func newTracesProcessor(ctx context.Context, set processor.Settings, cfg *Config, nextConsumer consumer.Traces) (processor.Traces, error) {
telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings)
if err != nil {
return nil, err
}
tp := &traceProcessor{
sampler: makeSampler(cfg, false),
failClosed: cfg.FailClosed,
logger: set.Logger,
telemetryBuilder: telemetryBuilder,
}
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
tp.processTraces,
processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true}))
}
func (th *hashingSampler) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) {
tid := s.TraceID()
tsc, err := newTracestateCarrier(s)
rnd := newMissingRandomnessMethod()
if !tid.IsEmpty() {
rnd = newTraceIDHashingMethod(randomnessFromBytes(tid[:], th.hashSeed))
}
// If the tracestate contains a proper R-value or T-value, we
// have to leave it alone. The user should not be using this
// sampler mode if they are using specified forms of consistent
// sampling in OTel.
if err != nil {
return rnd, nil, err
} else if _, has := tsc.explicitRandomness(); has {
err = ErrRandomnessInUse
tsc = nil
} else if _, has := tsc.threshold(); has {
err = ErrThresholdInUse
tsc = nil
} else {
// When no sampling information is present, add a
// Randomness value.
tsc.setExplicitRandomness(rnd)
}
return rnd, tsc, err
}
func (ctc *consistentTracestateCommon) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) {
rnd := newMissingRandomnessMethod()
tsc, err := newTracestateCarrier(s)
if err != nil {
tsc = nil
} else if rv, has := tsc.explicitRandomness(); has {
// When the tracestate is OK and has r-value, use it.
rnd = rv
} else if !s.TraceID().IsEmpty() {
rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(s.TraceID()))
}
return rnd, tsc, err
}
func (th *neverSampler) randomnessFromSpan(span ptrace.Span) (randomnessNamer, samplingCarrier, error) {
// We return a fake randomness value, since it will not be used.
// This avoids a consistency check error for missing randomness.
tsc, err := newTracestateCarrier(span)
return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), tsc, err
}
func (tp *traceProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool {
rs.ScopeSpans().RemoveIf(func(ils ptrace.ScopeSpans) bool {
ils.Spans().RemoveIf(func(s ptrace.Span) bool {
return !commonShouldSampleLogic(
ctx,
s,
tp.sampler,
tp.failClosed,
tp.sampler.randomnessFromSpan,
tp.priorityFunc,
"traces sampler",
tp.logger,
tp.telemetryBuilder.ProcessorProbabilisticSamplerCountTracesSampled,
)
})
// Filter out empty ScopeMetrics
return ils.Spans().Len() == 0
})
// Filter out empty ResourceMetrics
return rs.ScopeSpans().Len() == 0
})
if td.ResourceSpans().Len() == 0 {
return td, processorhelper.ErrSkipProcessingData
}
return td, nil
}
func (tp *traceProcessor) priorityFunc(s ptrace.Span, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) {
switch parseSpanSamplingPriority(s) {
case doNotSampleSpan:
// OpenTracing mentions this as a "hint". We take a stronger
// approach and do not sample the span since some may use it to
// remove specific spans from traces.
threshold = sampling.NeverSampleThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
case mustSampleSpan:
threshold = sampling.AlwaysSampleThreshold
rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name
case deferDecision:
// Note that the logs processor has very different logic here,
// but that in tracing the priority can only force to never or
// always.
}
return rnd, threshold
}
// parseSpanSamplingPriority checks if the span has the "sampling.priority" tag to
// decide if the span should be sampled or not. The usage of the tag follows the
// OpenTracing semantic tags:
// https://github.com/opentracing/specification/blob/main/semantic_conventions.md#span-tags-table
func parseSpanSamplingPriority(span ptrace.Span) samplingPriority {
attribMap := span.Attributes()
if attribMap.Len() <= 0 {
return deferDecision
}
samplingPriorityAttrib, ok := attribMap.Get("sampling.priority")
if !ok {
return deferDecision
}
// By default defer the decision.
decision := deferDecision
// Try check for different types since there are various client libraries
// using different conventions regarding "sampling.priority". Besides the
// client libraries it is also possible that the type was lost in translation
// between different formats.
switch samplingPriorityAttrib.Type() {
case pcommon.ValueTypeInt:
value := samplingPriorityAttrib.Int()
if value == 0 {
decision = doNotSampleSpan
} else if value > 0 {
decision = mustSampleSpan
}
case pcommon.ValueTypeDouble:
value := samplingPriorityAttrib.Double()
if value == 0.0 {
decision = doNotSampleSpan
} else if value > 0.0 {
decision = mustSampleSpan
}
case pcommon.ValueTypeStr:
attribVal := samplingPriorityAttrib.Str()
if value, err := strconv.ParseFloat(attribVal, 64); err == nil {
if value == 0.0 {
decision = doNotSampleSpan
} else if value > 0.0 {
decision = mustSampleSpan
}
}
}
return decision
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"fmt"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// aggregateAttributesProcessor
type aggregateAttributesProcessor struct {
aggregations []*aggregation
}
type aggregation struct {
attribute string
prefixes []string
}
func newAggregateAttributesProcessor(config []aggregationPair) *aggregateAttributesProcessor {
aggregations := []*aggregation{}
for i := 0; i < len(config); i++ {
pair := &aggregation{
attribute: config[i].Attribute,
prefixes: config[i].Prefixes,
}
aggregations = append(aggregations, pair)
}
return &aggregateAttributesProcessor{aggregations: aggregations}
}
func (proc *aggregateAttributesProcessor) processLogs(logs plog.Logs) error {
for i := 0; i < logs.ResourceLogs().Len(); i++ {
resourceLogs := logs.ResourceLogs().At(i)
err := proc.processAttributes(resourceLogs.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ {
scopeLogs := resourceLogs.ScopeLogs().At(j)
for k := 0; k < scopeLogs.LogRecords().Len(); k++ {
err := proc.processAttributes(scopeLogs.LogRecords().At(k).Attributes())
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error {
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
resourceMetrics := metrics.ResourceMetrics().At(i)
err := proc.processAttributes(resourceMetrics.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ {
scopeMetrics := resourceMetrics.ScopeMetrics().At(j)
for k := 0; k < scopeMetrics.Metrics().Len(); k++ {
err := processMetricLevelAttributes(proc, scopeMetrics.Metrics().At(k))
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) processTraces(traces ptrace.Traces) error {
for i := 0; i < traces.ResourceSpans().Len(); i++ {
resourceSpans := traces.ResourceSpans().At(i)
err := proc.processAttributes(resourceSpans.Resource().Attributes())
if err != nil {
return err
}
for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ {
scopeSpans := resourceSpans.ScopeSpans().At(j)
for k := 0; k < scopeSpans.Spans().Len(); k++ {
err := proc.processAttributes(scopeSpans.Spans().At(k).Attributes())
if err != nil {
return err
}
}
}
}
return nil
}
func (proc *aggregateAttributesProcessor) isEnabled() bool {
return len(proc.aggregations) != 0
}
func (*aggregateAttributesProcessor) ConfigPropertyName() string {
return "aggregate_attributes"
}
func (proc *aggregateAttributesProcessor) processAttributes(attributes pcommon.Map) error {
for i := 0; i < len(proc.aggregations); i++ {
curr := proc.aggregations[i]
names := []string{}
attrs := []pcommon.Value{}
for j := 0; j < len(curr.prefixes); j++ {
prefix := curr.prefixes[j]
// Create a new map. Unused keys will be added here,
// so we can check them against other prefixes.
newMap := pcommon.NewMap()
newMap.EnsureCapacity(attributes.Len())
attributes.Range(func(key string, value pcommon.Value) bool {
ok, trimmedKey := getNewKey(key, prefix)
if ok {
// TODO: Potential name conflict to resolve, eg.:
// pod_* matches pod_foo
// pod2_* matches pod2_foo
// both will be renamed to foo
// ref: https://github.com/SumoLogic/sumologic-otel-collector/issues/1263
names = append(names, trimmedKey)
val := pcommon.NewValueEmpty()
value.CopyTo(val)
attrs = append(attrs, val)
} else {
value.CopyTo(newMap.PutEmpty(key))
}
return true
})
newMap.CopyTo(attributes)
}
if len(names) != len(attrs) {
return fmt.Errorf(
"internal error: number of values does not equal the number of keys; len(keys) = %d, len(values) = %d",
len(names),
len(attrs),
)
}
// Add a new attribute only if there's anything that should be put under it.
if len(names) > 0 {
aggregated := attributes.PutEmptyMap(curr.attribute)
for j := 0; j < len(names); j++ {
attrs[j].CopyTo(aggregated.PutEmpty(names[j]))
}
}
}
return nil
}
// Checks if the key has given prefix and trims it if so.
func getNewKey(key string, prefix string) (bool, string) {
if strings.HasPrefix(key, prefix) {
return true, strings.TrimPrefix(key, prefix)
}
return false, ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"fmt"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
)
// This file contains some common functionalities for subprocessors that modify attributes (represented by pcommon.Map)
type attributesProcessor interface {
processAttributes(pcommon.Map) error
}
func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metric) error {
switch metric.Type() {
case pmetric.MetricTypeEmpty:
return nil
case pmetric.MetricTypeSum:
dp := metric.Sum().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeGauge:
dp := metric.Gauge().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeHistogram:
dp := metric.Histogram().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeExponentialHistogram:
dp := metric.ExponentialHistogram().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
case pmetric.MetricTypeSummary:
dp := metric.Summary().DataPoints()
for i := 0; i < dp.Len(); i++ {
err := proc.processAttributes(dp.At(i).Attributes())
if err != nil {
return err
}
}
return nil
}
return fmt.Errorf("unknown metric type: %s", metric.Type().String())
}
func mapToPcommonMap(m map[string]pcommon.Value) pcommon.Map {
attrs := pcommon.NewMap()
for k, v := range m {
v.CopyTo(attrs.PutEmpty(k))
}
return attrs
}
func mapToPcommonValue(m map[string]pcommon.Value) pcommon.Value {
attrs := pcommon.NewValueMap()
for k, v := range m {
v.CopyTo(attrs.Map().PutEmpty(k))
}
return attrs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
)
// cloudNamespaceProcessor adds the `cloud.namespace` resource attribute to logs, metrics and traces.
type cloudNamespaceProcessor struct {
addCloudNamespace bool
}
const (
cloudNamespaceAttributeName = "cloud.namespace"
cloudNamespaceAwsEc2 = "aws/ec2"
cloudNamespaceAwsEcs = "ecs"
cloudNamespaceAwsBeanstalk = "ElasticBeanstalk"
)
func newCloudNamespaceProcessor(addCloudNamespace bool) *cloudNamespaceProcessor {
return &cloudNamespaceProcessor{
addCloudNamespace: addCloudNamespace,
}
}
func (*cloudNamespaceProcessor) processLogs(logs plog.Logs) error {
for i := 0; i < logs.ResourceLogs().Len(); i++ {
addCloudNamespaceAttribute(logs.ResourceLogs().At(i).Resource().Attributes())
}
return nil
}
func (*cloudNamespaceProcessor) processMetrics(metrics pmetric.Metrics) error {
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
addCloudNamespaceAttribute(metrics.ResourceMetrics().At(i).Resource().Attributes())
}
return nil
}
func (*cloudNamespaceProcessor) processTraces(traces ptrace.Traces) error {
for i := 0; i < traces.ResourceSpans().Len(); i++ {
addCloudNamespaceAttribute(traces.ResourceSpans().At(i).Resource().Attributes())
}
return nil
}
func (proc *cloudNamespaceProcessor) isEnabled() bool {
return proc.addCloudNamespace
}
func (*cloudNamespaceProcessor) ConfigPropertyName() string {
return "add_cloud_namespace"
}
// addCloudNamespaceAttribute adds the `cloud.namespace` attribute
// to a collection of attributes that already contains a `cloud.platform` attribute.
// It does not add the `cloud.namespace` attribute for all `cloud.platform` values,
// but only for a few specific ones - namely AWS EC2, AWS ECS, and AWS Elastic Beanstalk.
func addCloudNamespaceAttribute(attributes pcommon.Map) {
cloudPlatformAttributeValue, found := attributes.Get(conventions.AttributeCloudPlatform)
if !found {
return
}
switch cloudPlatformAttributeValue.Str() {
case conventions.AttributeCloudPlatformAWSEC2:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEc2)
case conventions.AttributeCloudPlatformAWSECS:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEcs)
case conventions.AttributeCloudPlatformAWSElasticBeanstalk:
attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsBeanstalk)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/component"
)
type Config struct {
AddCloudNamespace bool `mapstructure:"add_cloud_namespace"`
TranslateAttributes bool `mapstructure:"translate_attributes"`
TranslateTelegrafAttributes bool `mapstructure:"translate_telegraf_attributes"`
NestAttributes *NestingProcessorConfig `mapstructure:"nest_attributes"`
AggregateAttributes []aggregationPair `mapstructure:"aggregate_attributes"`
LogFieldsAttributes *logFieldAttributesConfig `mapstructure:"field_attributes"`
TranslateDockerMetrics bool `mapstructure:"translate_docker_metrics"`
}
type aggregationPair struct {
Attribute string `mapstructure:"attribute"`
Prefixes []string `mapstructure:"prefixes"`
}
const (
defaultAddCloudNamespace = true
defaultTranslateAttributes = true
defaultTranslateTelegrafAttributes = true
defaultTranslateDockerMetrics = false
// Nesting processor default config
defaultNestingEnabled = false
defaultNestingSeparator = "."
defaultNestingSquashSingleValues = false
defaultAddSeverityNumberAttribute = false
defaultAddSeverityTextAttribute = false
defaultAddSpanIDAttribute = false
defaultAddTraceIDAttribute = false
)
var _ component.Config = (*Config)(nil)
func defaultNestingInclude() []string {
return []string{}
}
func defaultNestingExclude() []string {
return []string{}
}
func defaultAggregateAttributes() []aggregationPair {
return []aggregationPair{}
}
func createDefaultConfig() component.Config {
return &Config{
AddCloudNamespace: defaultAddCloudNamespace,
TranslateAttributes: defaultTranslateAttributes,
TranslateTelegrafAttributes: defaultTranslateTelegrafAttributes,
NestAttributes: &NestingProcessorConfig{
Separator: defaultNestingSeparator,
Enabled: defaultNestingEnabled,
Include: defaultNestingInclude(),
Exclude: defaultNestingExclude(),
SquashSingleValues: defaultNestingSquashSingleValues,
},
AggregateAttributes: defaultAggregateAttributes(),
LogFieldsAttributes: &logFieldAttributesConfig{
SeverityNumberAttribute: &logFieldAttribute{defaultAddSeverityNumberAttribute, SeverityNumberAttributeName},
SeverityTextAttribute: &logFieldAttribute{defaultAddSeverityTextAttribute, SeverityTextAttributeName},
SpanIDAttribute: &logFieldAttribute{defaultAddSpanIDAttribute, SpanIDAttributeName},
TraceIDAttribute: &logFieldAttribute{defaultAddTraceIDAttribute, TraceIDAttributeName},
},
TranslateDockerMetrics: defaultTranslateDockerMetrics,
}
}
// Validate config
func (cfg *Config) Validate() error {
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//
//go:generate mdatagen metadata.yaml
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor/internal/metadata"
)
var processorCapabilities = consumer.Capabilities{MutatesData: true}
// NewFactory returns a new factory for the processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability),
processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability),
processor.WithLogs(createLogsProcessor, metadata.LogsStability),
)
}
func createLogsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs,
) (processor.Logs, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewLogs(
ctx,
set,
cfg,
nextConsumer,
processor.processLogs,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
func createMetricsProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Metrics,
) (processor.Metrics, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewMetrics(
ctx,
set,
cfg,
nextConsumer,
processor.processMetrics,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
func createTracesProcessor(
ctx context.Context,
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
processor := newsumologicProcessor(set, cfg.(*Config))
return processorhelper.NewTraces(
ctx,
set,
cfg,
nextConsumer,
processor.processTraces,
processorhelper.WithCapabilities(processorCapabilities),
processorhelper.WithStart(processor.start),
processorhelper.WithShutdown(processor.shutdown))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
const (
SeverityNumberAttributeName = "loglevel"
SeverityTextAttributeName = "severitytext"
SpanIDAttributeName = "spanid"
TraceIDAttributeName = "traceid"
)
type logFieldAttribute struct {
Enabled bool `mapstructure:"enabled"`
Name string `mapstructure:"name"`
}
type logFieldAttributesConfig struct {
SeverityNumberAttribute *logFieldAttribute `mapstructure:"severity_number"`
SeverityTextAttribute *logFieldAttribute `mapstructure:"severity_text"`
SpanIDAttribute *logFieldAttribute `mapstructure:"span_id"`
TraceIDAttribute *logFieldAttribute `mapstructure:"trace_id"`
}
// spanIDToHexOrEmptyString returns a hex string from SpanID.
// An empty string is returned, if SpanID is empty.
func spanIDToHexOrEmptyString(id pcommon.SpanID) string {
if id.IsEmpty() {
return ""
}
return hex.EncodeToString(id[:])
}
// traceIDToHexOrEmptyString returns a hex string from TraceID.
// An empty string is returned, if TraceID is empty.
func traceIDToHexOrEmptyString(id pcommon.TraceID) string {
if id.IsEmpty() {
return ""
}
return hex.EncodeToString(id[:])
}
var severityNumberToLevel = map[string]string{
plog.SeverityNumberUnspecified.String(): "UNSPECIFIED",
plog.SeverityNumberTrace.String(): "TRACE",
plog.SeverityNumberTrace2.String(): "TRACE2",
plog.SeverityNumberTrace3.String(): "TRACE3",
plog.SeverityNumberTrace4.String(): "TRACE4",
plog.SeverityNumberDebug.String(): "DEBUG",
plog.SeverityNumberDebug2.String(): "DEBUG2",
plog.SeverityNumberDebug3.String(): "DEBUG3",
plog.SeverityNumberDebug4.String(): "DEBUG4",
plog.SeverityNumberInfo.String(): "INFO",
plog.SeverityNumberInfo2.String(): "INFO2",
plog.SeverityNumberInfo3.String(): "INFO3",
plog.SeverityNumberInfo4.String(): "INFO4",
plog.SeverityNumberWarn.String(): "WARN",
plog.SeverityNumberWarn2.String(): "WARN2",
plog.SeverityNumberWarn3.String(): "WARN3",
plog.SeverityNumberWarn4.String(): "WARN4",
plog.SeverityNumberError.String(): "ERROR",
plog.SeverityNumberError2.String(): "ERROR2",
plog.SeverityNumberError3.String(): "ERROR3",
plog.SeverityNumberError4.String(): "ERROR4",
plog.SeverityNumberFatal.String(): "FATAL",
plog.SeverityNumberFatal2.String(): "FATAL2",
plog.SeverityNumberFatal3.String(): "FATAL3",
plog.SeverityNumberFatal4.String(): "FATAL4",
}
// logFieldsConversionProcessor converts specific log entries to attributes which leads to presenting them as fields
// in the backend
type logFieldsConversionProcessor struct {
LogFieldsAttributes *logFieldAttributesConfig
}
func newLogFieldConversionProcessor(logFieldsAttributes *logFieldAttributesConfig) *logFieldsConversionProcessor {
return &logFieldsConversionProcessor{
logFieldsAttributes,
}
}
func (proc *logFieldsConversionProcessor) addAttributes(log plog.LogRecord) {
if log.SeverityNumber() != plog.SeverityNumberUnspecified {
if _, found := log.Attributes().Get(SeverityNumberAttributeName); !found &&
proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled {
level := severityNumberToLevel[log.SeverityNumber().String()]
log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityNumberAttribute.Name, level)
}
}
if _, found := log.Attributes().Get(SeverityTextAttributeName); !found &&
proc.LogFieldsAttributes.SeverityTextAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityTextAttribute.Name, log.SeverityText())
}
if _, found := log.Attributes().Get(SpanIDAttributeName); !found &&
proc.LogFieldsAttributes.SpanIDAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.SpanIDAttribute.Name, spanIDToHexOrEmptyString(log.SpanID()))
}
if _, found := log.Attributes().Get(TraceIDAttributeName); !found &&
proc.LogFieldsAttributes.TraceIDAttribute.Enabled {
log.Attributes().PutStr(proc.LogFieldsAttributes.TraceIDAttribute.Name, traceIDToHexOrEmptyString(log.TraceID()))
}
}
func (proc *logFieldsConversionProcessor) processLogs(logs plog.Logs) error {
if !proc.isEnabled() {
return nil
}
rls := logs.ResourceLogs()
for i := 0; i < rls.Len(); i++ {
ills := rls.At(i).ScopeLogs()
for j := 0; j < ills.Len(); j++ {
logs := ills.At(j).LogRecords()
for k := 0; k < logs.Len(); k++ {
proc.addAttributes(logs.At(k))
}
}
}
return nil
}
func (proc *logFieldsConversionProcessor) processMetrics(_ pmetric.Metrics) error {
// No-op. Metrics should not be translated.
return nil
}
func (proc *logFieldsConversionProcessor) processTraces(_ ptrace.Traces) error {
// No-op. Traces should not be translated.
return nil
}
func (proc *logFieldsConversionProcessor) isEnabled() bool {
return proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled ||
proc.LogFieldsAttributes.SeverityTextAttribute.Enabled ||
proc.LogFieldsAttributes.SpanIDAttribute.Enabled ||
proc.LogFieldsAttributes.TraceIDAttribute.Enabled
}
func (*logFieldsConversionProcessor) ConfigPropertyName() string {
return "field_attributes"
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
type NestingProcessorConfig struct {
Separator string `mapstructure:"separator"`
Enabled bool `mapstructure:"enabled"`
Include []string `mapstructure:"include"`
Exclude []string `mapstructure:"exclude"`
SquashSingleValues bool `mapstructure:"squash_single_values"`
}
type NestingProcessor struct {
separator string
enabled bool
allowlist []string
denylist []string
squashSingleValues bool
}
func newNestingProcessor(config *NestingProcessorConfig) *NestingProcessor {
proc := &NestingProcessor{
separator: config.Separator,
enabled: config.Enabled,
allowlist: config.Include,
denylist: config.Exclude,
squashSingleValues: config.SquashSingleValues,
}
return proc
}
func (proc *NestingProcessor) processLogs(logs plog.Logs) error {
if !proc.enabled {
return nil
}
for i := 0; i < logs.ResourceLogs().Len(); i++ {
rl := logs.ResourceLogs().At(i)
if err := proc.processAttributes(rl.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rl.ScopeLogs().Len(); j++ {
logsRecord := rl.ScopeLogs().At(j).LogRecords()
for k := 0; k < logsRecord.Len(); k++ {
if err := proc.processAttributes(logsRecord.At(k).Attributes()); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.enabled {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
if err := proc.processAttributes(rm.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
if err := processMetricLevelAttributes(proc, metricsSlice.At(k)); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processTraces(traces ptrace.Traces) error {
if !proc.enabled {
return nil
}
for i := 0; i < traces.ResourceSpans().Len(); i++ {
rs := traces.ResourceSpans().At(i)
if err := proc.processAttributes(rs.Resource().Attributes()); err != nil {
return err
}
for j := 0; j < rs.ScopeSpans().Len(); j++ {
spans := rs.ScopeSpans().At(j).Spans()
for k := 0; k < spans.Len(); k++ {
if err := proc.processAttributes(spans.At(k).Attributes()); err != nil {
return err
}
}
}
}
return nil
}
func (proc *NestingProcessor) processAttributes(attributes pcommon.Map) error {
newMap := pcommon.NewMap()
attributes.Range(func(k string, v pcommon.Value) bool {
// If key is not on allow list or is on deny list, skip translating it.
if !proc.shouldTranslateKey(k) {
v.CopyTo(newMap.PutEmpty(k))
return true
}
keys := strings.Split(k, proc.separator)
if len(keys) == 0 {
// Split returns empty slice only if both string and separator are empty
// set map[""] = v and return
newVal := newMap.PutEmpty(k)
v.CopyTo(newVal)
return true
}
prevValue := pcommon.NewValueMap()
nextMap := prevValue.Map()
newMap.CopyTo(nextMap)
for i := 0; i < len(keys); i++ {
if prevValue.Type() != pcommon.ValueTypeMap {
// If previous value was not a map, change it into a map.
// The former value will be set under the key "".
tempMap := pcommon.NewValueMap()
prevValue.CopyTo(tempMap.Map().PutEmpty(""))
tempMap.CopyTo(prevValue)
}
newValue, ok := prevValue.Map().Get(keys[i])
if ok {
prevValue = newValue
} else {
if i == len(keys)-1 {
// If we're checking the last key, insert empty value, to which v will be copied.
prevValue = prevValue.Map().PutEmpty(keys[i])
} else {
// If we're not checking the last key, put a map.
prevValue = prevValue.Map().PutEmpty(keys[i])
prevValue.SetEmptyMap()
}
}
}
if prevValue.Type() == pcommon.ValueTypeMap {
// Now check the value we want to copy. If it is a map, we should merge both maps.
// Else, just place the value under the key "".
if v.Type() == pcommon.ValueTypeMap {
v.Map().Range(func(k string, val pcommon.Value) bool {
val.CopyTo(prevValue.Map().PutEmpty(k))
return true
})
} else {
v.CopyTo(prevValue.Map().PutEmpty(""))
}
} else {
v.CopyTo(prevValue)
}
nextMap.CopyTo(newMap)
return true
})
if proc.squashSingleValues {
newMap = proc.squash(newMap)
}
newMap.CopyTo(attributes)
return nil
}
// Checks if given key fulfills the following conditions:
// - has a prefix that exists in the allowlist (if it's not empty)
// - does not have a prefix that exists in the denylist
func (proc *NestingProcessor) shouldTranslateKey(k string) bool {
if len(proc.allowlist) > 0 {
isOk := false
for i := 0; i < len(proc.allowlist); i++ {
if strings.HasPrefix(k, proc.allowlist[i]) {
isOk = true
break
}
}
if !isOk {
return false
}
}
if len(proc.denylist) > 0 {
for i := 0; i < len(proc.denylist); i++ {
if strings.HasPrefix(k, proc.denylist[i]) {
return false
}
}
}
return true
}
// Squashes maps that have single values, eg. map {"a": {"b": {"c": "C", "d": "D"}}}}
// gets squashes into {"a.b": {"c": "C", "d": "D"}}}
func (proc *NestingProcessor) squash(attributes pcommon.Map) pcommon.Map {
newMap := pcommon.NewValueMap()
attributes.CopyTo(newMap.Map())
key := proc.squashAttribute(newMap)
if key != "" {
retMap := pcommon.NewMap()
newMap.Map().CopyTo(retMap.PutEmptyMap(key))
return retMap
}
return newMap.Map()
}
// A function that squashes keys in a value.
// If this value contained a map with one element, it gets squished and its key gets returned.
//
// If this value contained a map with many elements, this function is called on these elements,
// and the key gets replaced if needed, "" is returned.
//
// Else, nothing happens and "" is returned.
func (proc *NestingProcessor) squashAttribute(value pcommon.Value) string {
if value.Type() != pcommon.ValueTypeMap {
return ""
}
m := value.Map()
if m.Len() == 1 {
// If the map contains only one key-value pair, squash it.
key := ""
val := pcommon.NewValueEmpty()
// This will iterate only over one value (the only one)
m.Range(func(k string, v pcommon.Value) bool {
keySuffix := proc.squashAttribute(v)
key = proc.squashKey(k, keySuffix)
val = v
return false
})
val.CopyTo(value)
return key
}
// This map doesn't get squashed, but its content might have keys replaced.
newMap := pcommon.NewMap()
m.Range(func(k string, v pcommon.Value) bool {
keySuffix := proc.squashAttribute(v)
// If "" was returned, the value was not a one-element map and did not get squashed.
if keySuffix == "" {
v.CopyTo(newMap.PutEmpty(k))
} else {
v.CopyTo(newMap.PutEmpty(proc.squashKey(k, keySuffix)))
}
return true
})
newMap.CopyTo(value.Map())
return ""
}
func (proc *NestingProcessor) squashKey(key string, keySuffix string) string {
if keySuffix == "" {
return key
}
return key + proc.separator + keySuffix
}
func (proc *NestingProcessor) isEnabled() bool {
return proc.enabled
}
func (*NestingProcessor) ConfigPropertyName() string {
return "nest_attributes"
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type sumologicSubprocessor interface {
processLogs(plog.Logs) error
processMetrics(pmetric.Metrics) error
processTraces(ptrace.Traces) error
isEnabled() bool
ConfigPropertyName() string
}
type sumologicProcessor struct {
logger *zap.Logger
subprocessors []sumologicSubprocessor
}
func newsumologicProcessor(set processor.Settings, config *Config) *sumologicProcessor {
cloudNamespaceProcessor := newCloudNamespaceProcessor(config.AddCloudNamespace)
translateAttributesProcessor := newTranslateAttributesProcessor(config.TranslateAttributes)
translateTelegrafMetricsProcessor := newTranslateTelegrafMetricsProcessor(config.TranslateTelegrafAttributes)
nestingProcessor := newNestingProcessor(config.NestAttributes)
aggregateAttributesProcessor := newAggregateAttributesProcessor(config.AggregateAttributes)
logFieldsConversionProcessor := newLogFieldConversionProcessor(config.LogFieldsAttributes)
translateDockerMetricsProcessor := newTranslateDockerMetricsProcessor(config.TranslateDockerMetrics)
processors := []sumologicSubprocessor{
cloudNamespaceProcessor,
translateAttributesProcessor,
translateTelegrafMetricsProcessor,
nestingProcessor,
aggregateAttributesProcessor,
logFieldsConversionProcessor,
translateDockerMetricsProcessor,
}
processor := &sumologicProcessor{
logger: set.Logger,
subprocessors: processors,
}
return processor
}
func (processor *sumologicProcessor) start(_ context.Context, _ component.Host) error {
enabledSubprocessors := []zapcore.Field{}
for _, proc := range processor.subprocessors {
enabledSubprocessors = append(enabledSubprocessors, zap.Bool(proc.ConfigPropertyName(), proc.isEnabled()))
}
processor.logger.Info("Sumo Logic Processor has started.", enabledSubprocessors...)
return nil
}
func (processor *sumologicProcessor) shutdown(_ context.Context) error {
processor.logger.Info("Sumo Logic Processor has shut down.")
return nil
}
func (processor *sumologicProcessor) processLogs(_ context.Context, logs plog.Logs) (plog.Logs, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processLogs(logs); err != nil {
return logs, fmt.Errorf("failed to process logs for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return logs, nil
}
func (processor *sumologicProcessor) processMetrics(_ context.Context, metrics pmetric.Metrics) (pmetric.Metrics, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processMetrics(metrics); err != nil {
return metrics, fmt.Errorf("failed to process metrics for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return metrics, nil
}
func (processor *sumologicProcessor) processTraces(_ context.Context, traces ptrace.Traces) (ptrace.Traces, error) {
for _, subprocessor := range processor.subprocessors {
if err := subprocessor.processTraces(traces); err != nil {
return traces, fmt.Errorf("failed to process traces for property %s: %w", subprocessor.ConfigPropertyName(), err)
}
}
return traces, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateAttributesProcessor translates attribute names from OpenTelemetry to Sumo Logic convention
type translateAttributesProcessor struct {
shouldTranslate bool
}
// attributeTranslations maps OpenTelemetry attribute names to Sumo Logic attribute names
var attributeTranslations = map[string]string{
"cloud.account.id": "AccountId",
"cloud.availability_zone": "AvailabilityZone",
"cloud.platform": "aws_service",
"cloud.region": "Region",
"host.id": "InstanceId",
"host.name": "host",
"host.type": "InstanceType",
"k8s.cluster.name": "Cluster",
"k8s.container.name": "container",
"k8s.daemonset.name": "daemonset",
"k8s.deployment.name": "deployment",
"k8s.namespace.name": "namespace",
"k8s.node.name": "node",
"k8s.service.name": "service",
"k8s.pod.hostname": "host",
"k8s.pod.name": "pod",
"k8s.pod.uid": "pod_id",
"k8s.replicaset.name": "replicaset",
"k8s.statefulset.name": "statefulset",
"service.name": "service",
"log.file.path_resolved": "_sourceName",
}
func newTranslateAttributesProcessor(shouldTranslate bool) *translateAttributesProcessor {
return &translateAttributesProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateAttributesProcessor) processLogs(logs plog.Logs) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < logs.ResourceLogs().Len(); i++ {
translateAttributes(logs.ResourceLogs().At(i).Resource().Attributes())
}
return nil
}
func (proc *translateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
translateAttributes(metrics.ResourceMetrics().At(i).Resource().Attributes())
}
return nil
}
func (proc *translateAttributesProcessor) processTraces(_ ptrace.Traces) error {
// No-op. Traces should not be translated.
return nil
}
func (proc *translateAttributesProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateAttributesProcessor) ConfigPropertyName() string {
return "translate_attributes"
}
func translateAttributes(attributes pcommon.Map) {
result := pcommon.NewMap()
result.EnsureCapacity(attributes.Len())
attributes.Range(func(otKey string, value pcommon.Value) bool {
if sumoKey, ok := attributeTranslations[otKey]; ok {
// Only insert if it doesn't exist yet to prevent overwriting.
// We have to do it this way since the final return value is not
// ready yet to rely on .Insert() not overwriting.
if _, exists := attributes.Get(sumoKey); !exists {
if _, ok := result.Get(sumoKey); !ok {
value.CopyTo(result.PutEmpty(sumoKey))
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
return true
})
result.CopyTo(attributes)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention
type translateDockerMetricsProcessor struct {
shouldTranslate bool
}
// metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention
var dockerMetricsTranslations = map[string]string{
"container.cpu.percent": "cpu_percentage",
"container.cpu.usage.system": "system_cpu_usage",
"container.cpu.usage.percpu": "cpu_usage.percpu_usage",
"container.cpu.usage.total": "cpu_usage.total_usage",
"container.cpu.usage.kernelmode": "cpu_usage.usage_in_kernelmode",
"container.cpu.usage.usermode": "cpu_usage.usage_in_usermode",
"container.cpu.throttling_data.periods": "throttling_data.periods",
"container.cpu.throttling_data.throttled_periods": "throttling_data.throttled_periods",
"container.cpu.throttling_data.throttled_time": "throttling_data.throttled_time",
"container.memory.usage.limit": "limit",
"container.memory.usage.max": "max_usage",
"container.memory.percent": "memory_percentage",
"container.memory.usage.total": "usage",
"container.memory.active_anon": "stats.active_anon",
"container.memory.active_file": "stats.active_file",
"container.memory.cache": "stats.cache",
"container.memory.hierarchical_memory_limit": "stats.hierarchical_memory_limit",
"container.memory.inactive_anon": "stats.inactive_anon",
"container.memory.inactive_file": "stats.inactive_file",
"container.memory.mapped_file": "stats.mapped_file",
"container.memory.pgfault": "stats.pgfault",
"container.memory.pgmajfault": "stats.pgmajfault",
"container.memory.pgpgin": "stats.pgpgin",
"container.memory.pgpgout": "stats.pgpgout",
"container.memory.rss": "stats.rss",
"container.memory.rss_huge": "stats.rss_huge",
"container.memory.unevictable": "stats.unevictable",
"container.memory.writeback": "stats.writeback",
"container.memory.total_active_anon": "stats.total_active_anon",
"container.memory.total_active_file": "stats.total_active_file",
"container.memory.total_cache": "stats.total_cache",
"container.memory.total_inactive_anon": "stats.total_inactive_anon",
"container.memory.total_mapped_file": "stats.total_mapped_file",
"container.memory.total_pgfault": "stats.total_pgfault",
"container.memory.total_pgmajfault": "stats.total_pgmajfault",
"container.memory.total_pgpgin": "stats.total_pgpgin",
"container.memory.total_pgpgout": "stats.total_pgpgout",
"container.memory.total_rss": "stats.total_rss",
"container.memory.total_rss_huge": "stats.total_rss_huge",
"container.memory.total_unevictable": "stats.total_unevictable",
"container.memory.total_writeback": "stats.total_writeback",
"container.blockio.io_merged_recursive": "io_merged_recursive",
"container.blockio.io_queued_recursive": "io_queue_recursive",
"container.blockio.io_service_bytes_recursive": "io_service_bytes_recursive",
"container.blockio.io_service_time_recursive": "io_service_time_recursive",
"container.blockio.io_serviced_recursive": "io_serviced_recursive",
"container.blockio.io_time_recursive": "io_time_recursive",
"container.blockio.io_wait_time_recursive": "io_wait_time_recursive",
"container.blockio.sectors_recursive": "sectors_recursive",
}
var dockerResourceAttributeTranslations = map[string]string{
"container.id": "container.FullID",
"container.image.name": "container.ImageName",
"container.name": "container.Name",
}
func newTranslateDockerMetricsProcessor(shouldTranslate bool) *translateDockerMetricsProcessor {
return &translateDockerMetricsProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateDockerMetricsProcessor) processLogs(_ plog.Logs) error {
// No-op, this subprocessor doesn't process logs.
return nil
}
func (proc *translateDockerMetricsProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
translateDockerResourceAttributes(rm.Resource().Attributes())
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
translateDockerMetric(metricsSlice.At(k))
}
}
}
return nil
}
func (proc *translateDockerMetricsProcessor) processTraces(_ ptrace.Traces) error {
// No-op, this subprocessor doesn't process traces.
return nil
}
func (proc *translateDockerMetricsProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateDockerMetricsProcessor) ConfigPropertyName() string {
return "translate_docker_metrics"
}
func translateDockerMetric(m pmetric.Metric) {
name, exists := dockerMetricsTranslations[m.Name()]
if exists {
m.SetName(name)
}
}
func translateDockerResourceAttributes(attributes pcommon.Map) {
result := pcommon.NewMap()
result.EnsureCapacity(attributes.Len())
attributes.Range(func(otKey string, value pcommon.Value) bool {
if sumoKey, ok := dockerResourceAttributeTranslations[otKey]; ok {
// Only insert if it doesn't exist yet to prevent overwriting.
// We have to do it this way since the final return value is not
// ready yet to rely on .Insert() not overwriting.
if _, exists := attributes.Get(sumoKey); !exists {
if _, ok := result.Get(sumoKey); !ok {
value.CopyTo(result.PutEmpty(sumoKey))
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
} else {
if _, ok := result.Get(otKey); !ok {
value.CopyTo(result.PutEmpty(otKey))
}
}
return true
})
result.CopyTo(attributes)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor"
import (
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention
type translateTelegrafMetricsProcessor struct {
shouldTranslate bool
}
// metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention
var metricsTranslations = map[string]string{
// CPU metrics
"cpu_usage_active": "CPU_Total",
"cpu_usage_idle": "CPU_Idle",
"cpu_usage_iowait": "CPU_IOWait",
"cpu_usage_irq": "CPU_Irq",
"cpu_usage_nice": "CPU_Nice",
"cpu_usage_softirq": "CPU_SoftIrq",
"cpu_usage_steal": "CPU_Stolen",
"cpu_usage_System": "CPU_Sys",
"cpu_usage_user": "CPU_User",
"system_load1": "CPU_LoadAvg_1min",
"system_load5": "CPU_LoadAvg_5min",
"system_load15": "CPU_LoadAvg_15min",
// Disk metrics
"disk_used": "Disk_Used",
"disk_used_percent": "Disk_UsedPercent",
"disk_inodes_free": "Disk_InodesAvailable",
// Disk IO metrics
"diskio_reads": "Disk_Reads",
"diskio_read_bytes": "Disk_ReadBytes",
"diskio_writes": "Disk_Writes",
"diskio_write_bytes": "Disk_WriteBytes",
// Memory metrics
"mem_total": "Mem_Total",
"mem_free": "Mem_free",
"mem_available": "Mem_ActualFree",
"mem_used": "Mem_ActualUsed",
"mem_used_percent": "Mem_UsedPercent",
"mem_available_percent": "Mem_FreePercent",
// Procstat metrics
"procstat_num_threads": "Proc_Threads",
"procstat_memory_vms": "Proc_VMSize",
"procstat_memory_rss": "Proc_RSSize",
"procstat_cpu_usage": "Proc_CPU",
"procstat_major_faults": "Proc_MajorFaults",
"procstat_minor_faults": "Proc_MinorFaults",
// Net metrics
"net_bytes_sent": "Net_OutBytes",
"net_bytes_recv": "Net_InBytes",
"net_packets_sent": "Net_OutPackets",
"net_packets_recv": "Net_InPackets",
// Netstat metrics
"netstat_tcp_close": "TCP_Close",
"netstat_tcp_close_wait": "TCP_CloseWait",
"netstat_tcp_closing": "TCP_Closing",
"netstat_tcp_established": "TCP_Established",
"netstat_tcp_listen": "TCP_Listen",
"netstat_tcp_time_wait": "TCP_TimeWait",
}
func newTranslateTelegrafMetricsProcessor(shouldTranslate bool) *translateTelegrafMetricsProcessor {
return &translateTelegrafMetricsProcessor{
shouldTranslate: shouldTranslate,
}
}
func (proc *translateTelegrafMetricsProcessor) processLogs(_ plog.Logs) error {
// No-op, this subprocessor doesn't process logs.
return nil
}
func (proc *translateTelegrafMetricsProcessor) processMetrics(metrics pmetric.Metrics) error {
if !proc.shouldTranslate {
return nil
}
for i := 0; i < metrics.ResourceMetrics().Len(); i++ {
rm := metrics.ResourceMetrics().At(i)
for j := 0; j < rm.ScopeMetrics().Len(); j++ {
metricsSlice := rm.ScopeMetrics().At(j).Metrics()
for k := 0; k < metricsSlice.Len(); k++ {
translateTelegrafMetric(metricsSlice.At(k))
}
}
}
return nil
}
func (proc *translateTelegrafMetricsProcessor) processTraces(_ ptrace.Traces) error {
// No-op, this subprocessor doesn't process traces.
return nil
}
func (proc *translateTelegrafMetricsProcessor) isEnabled() bool {
return proc.shouldTranslate
}
func (*translateTelegrafMetricsProcessor) ConfigPropertyName() string {
return "translate_telegraf_attributes"
}
func translateTelegrafMetric(m pmetric.Metric) {
name, exists := metricsTranslations[m.Name()]
if exists {
m.SetName(name)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
)
func getNewAndPolicy(settings component.TelemetrySettings, config *AndCfg) (sampling.PolicyEvaluator, error) {
subPolicyEvaluators := make([]sampling.PolicyEvaluator, len(config.SubPolicyCfg))
for i := range config.SubPolicyCfg {
policyCfg := &config.SubPolicyCfg[i]
policy, err := getAndSubPolicyEvaluator(settings, policyCfg)
if err != nil {
return nil, err
}
subPolicyEvaluators[i] = policy
}
return sampling.NewAnd(settings.Logger, subPolicyEvaluators), nil
}
// Return instance of and sub-policy
func getAndSubPolicyEvaluator(settings component.TelemetrySettings, cfg *AndSubPolicyCfg) (sampling.PolicyEvaluator, error) {
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"go.opentelemetry.io/collector/component"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
)
func getNewCompositePolicy(settings component.TelemetrySettings, config *CompositeCfg) (sampling.PolicyEvaluator, error) {
subPolicyEvalParams := make([]sampling.SubPolicyEvalParams, len(config.SubPolicyCfg))
rateAllocationsMap := getRateAllocationMap(config)
for i := range config.SubPolicyCfg {
policyCfg := &config.SubPolicyCfg[i]
policy, err := getCompositeSubPolicyEvaluator(settings, policyCfg)
if err != nil {
return nil, err
}
evalParams := sampling.SubPolicyEvalParams{
Evaluator: policy,
MaxSpansPerSecond: int64(rateAllocationsMap[policyCfg.Name]),
}
subPolicyEvalParams[i] = evalParams
}
return sampling.NewComposite(settings.Logger, config.MaxTotalSpansPerSecond, subPolicyEvalParams, sampling.MonotonicClock{}), nil
}
// Apply rate allocations to the sub-policies
func getRateAllocationMap(config *CompositeCfg) map[string]float64 {
rateAllocationsMap := make(map[string]float64)
maxTotalSPS := float64(config.MaxTotalSpansPerSecond)
// Default SPS determined by equally diving number of sub policies
defaultSPS := maxTotalSPS / float64(len(config.SubPolicyCfg))
for _, rAlloc := range config.RateAllocation {
if rAlloc.Percent > 0 {
rateAllocationsMap[rAlloc.Policy] = (float64(rAlloc.Percent) / 100) * maxTotalSPS
} else {
rateAllocationsMap[rAlloc.Policy] = defaultSPS
}
}
return rateAllocationsMap
}
// Return instance of composite sub-policy
func getCompositeSubPolicyEvaluator(settings component.TelemetrySettings, cfg *CompositeSubPolicyCfg) (sampling.PolicyEvaluator, error) {
switch cfg.Type {
case And:
return getNewAndPolicy(settings, &cfg.AndCfg)
default:
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:generate mdatagen metadata.yaml
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"context"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
)
// NewFactory returns a new factory for the Tail Sampling processor.
func NewFactory() processor.Factory {
return processor.NewFactory(
metadata.Type,
createDefaultConfig,
processor.WithTraces(createTracesProcessor, metadata.TracesStability))
}
func createDefaultConfig() component.Config {
return &Config{
DecisionWait: 30 * time.Second,
NumTraces: 50000,
}
}
func createTracesProcessor(
ctx context.Context,
params processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (processor.Traces, error) {
tCfg := cfg.(*Config)
return newTracesProcessor(ctx, params, nextConsumer, *tCfg)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cache // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
import (
"encoding/binary"
lru "github.com/hashicorp/golang-lru/v2"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// lruDecisionCache implements Cache as a simple LRU cache.
// It holds trace IDs that had sampling decisions made on them.
// It does not specify the type of sampling decision that was made, only that
// a decision was made for an ID. You need separate DecisionCaches for caching
// sampled and not sampled trace IDs.
type lruDecisionCache[V any] struct {
cache *lru.Cache[uint64, V]
}
var _ Cache[any] = (*lruDecisionCache[any])(nil)
// NewLRUDecisionCache returns a new lruDecisionCache.
// The size parameter indicates the amount of keys the cache will hold before it
// starts evicting the least recently used key.
func NewLRUDecisionCache[V any](size int) (Cache[V], error) {
c, err := lru.New[uint64, V](size)
if err != nil {
return nil, err
}
return &lruDecisionCache[V]{cache: c}, nil
}
func (c *lruDecisionCache[V]) Get(id pcommon.TraceID) (V, bool) {
return c.cache.Get(rightHalfTraceID(id))
}
func (c *lruDecisionCache[V]) Put(id pcommon.TraceID, v V) {
_ = c.cache.Add(rightHalfTraceID(id), v)
}
// Delete is no-op since LRU relies on least recently used key being evicting automatically
func (c *lruDecisionCache[V]) Delete(_ pcommon.TraceID) {}
func rightHalfTraceID(id pcommon.TraceID) uint64 {
return binary.LittleEndian.Uint64(id[8:])
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cache // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
import "go.opentelemetry.io/collector/pdata/pcommon"
type nopDecisionCache[V any] struct{}
var _ Cache[any] = (*nopDecisionCache[any])(nil)
func NewNopDecisionCache[V any]() Cache[V] {
return &nopDecisionCache[V]{}
}
func (n *nopDecisionCache[V]) Get(_ pcommon.TraceID) (V, bool) {
var v V
return v, false
}
func (n *nopDecisionCache[V]) Put(_ pcommon.TraceID, _ V) {
}
func (n *nopDecisionCache[V]) Delete(_ pcommon.TraceID) {}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package idbatcher defines a pipeline of fixed size in which the
// elements are batches of ids.
package idbatcher // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher"
import (
"errors"
"sync"
"go.opentelemetry.io/collector/pdata/pcommon"
)
var (
// ErrInvalidNumBatches occurs when an invalid number of batches is specified.
ErrInvalidNumBatches = errors.New("invalid number of batches, it must be greater than zero")
// ErrInvalidBatchChannelSize occurs when an invalid batch channel size is specified.
ErrInvalidBatchChannelSize = errors.New("invalid batch channel size, it must be greater than zero")
)
// Batch is the type of batches held by the Batcher.
type Batch []pcommon.TraceID
// Batcher behaves like a pipeline of batches that has a fixed number of batches in the pipe
// and a new batch being built outside of the pipe. Items can be concurrently added to the batch
// currently being built. When the batch being built is closed, the oldest batch in the pipe
// is pushed out so the one just closed can be put on the end of the pipe (this is done as an
// atomic operation). The caller is in control of when a batch is completed and a new one should
// be started.
type Batcher interface {
// AddToCurrentBatch puts the given id on the batch being currently built. The client is in charge
// of limiting the growth of the current batch if appropriate for its scenario. It can
// either call CloseCurrentAndTakeFirstBatch earlier or stop adding new items depending on what is
// required by the scenario.
AddToCurrentBatch(id pcommon.TraceID)
// CloseCurrentAndTakeFirstBatch takes the batch at the front of the pipe, and moves the current
// batch to the end of the pipe, creating a new batch to receive new items. This operation should
// be atomic.
// It returns the batch that was in front of the pipe and a boolean that if true indicates that
// there are more batches to be retrieved.
CloseCurrentAndTakeFirstBatch() (Batch, bool)
// Stop informs that no more items are going to be batched and the pipeline can be read until it
// is empty. After this method is called attempts to enqueue new items will panic.
Stop()
}
var _ Batcher = (*batcher)(nil)
type batcher struct {
pendingIDs chan pcommon.TraceID // Channel for the ids to be added to the next batch.
batches chan Batch // Channel with already captured batches.
// cbMutex protects the currentBatch storing ids.
cbMutex sync.Mutex
currentBatch Batch
newBatchesInitialCapacity uint64
stopchan chan bool
stopped bool
stopLock sync.RWMutex
}
// New creates a Batcher that will hold numBatches in its pipeline, having a channel with
// batchChannelSize to receive new items. New batches will be created with capacity set to
// newBatchesInitialCapacity.
func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batcher, error) {
if numBatches < 1 {
return nil, ErrInvalidNumBatches
}
if batchChannelSize < 1 {
return nil, ErrInvalidBatchChannelSize
}
batches := make(chan Batch, numBatches)
// First numBatches batches will be empty in order to simplify clients that are running
// CloseCurrentAndTakeFirstBatch on a timer and want to delay the processing of the first
// batch with actual data. This way there is no need for accounting on the client side and
// a single timer can be started immediately.
for i := uint64(0); i < numBatches; i++ {
batches <- nil
}
batcher := &batcher{
pendingIDs: make(chan pcommon.TraceID, batchChannelSize),
batches: batches,
currentBatch: make(Batch, 0, newBatchesInitialCapacity),
newBatchesInitialCapacity: newBatchesInitialCapacity,
stopchan: make(chan bool),
}
// Single goroutine that keeps filling the current batch, contention is expected only
// when the current batch is being switched.
go func() {
for id := range batcher.pendingIDs {
batcher.cbMutex.Lock()
batcher.currentBatch = append(batcher.currentBatch, id)
batcher.cbMutex.Unlock()
}
batcher.stopchan <- true
}()
return batcher, nil
}
func (b *batcher) AddToCurrentBatch(id pcommon.TraceID) {
b.pendingIDs <- id
}
func (b *batcher) CloseCurrentAndTakeFirstBatch() (Batch, bool) {
if readBatch, ok := <-b.batches; ok {
b.stopLock.RLock()
if !b.stopped {
nextBatch := make(Batch, 0, b.newBatchesInitialCapacity)
b.cbMutex.Lock()
b.batches <- b.currentBatch
b.currentBatch = nextBatch
b.cbMutex.Unlock()
}
b.stopLock.RUnlock()
return readBatch, true
}
readBatch := b.currentBatch
b.currentBatch = nil
return readBatch, false
}
func (b *batcher) Stop() {
close(b.pendingIDs)
b.stopLock.Lock()
b.stopped = <-b.stopchan
b.stopLock.Unlock()
close(b.batches)
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"errors"
"sync"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/collector/component"
)
func Meter(settings component.TelemetrySettings) metric.Meter {
return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor")
}
func Tracer(settings component.TelemetrySettings) trace.Tracer {
return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor")
}
// TelemetryBuilder provides an interface for components to report telemetry
// as defined in metadata and user config.
type TelemetryBuilder struct {
meter metric.Meter
mu sync.Mutex
registrations []metric.Registration
ProcessorTailSamplingCountSpansSampled metric.Int64Counter
ProcessorTailSamplingCountTracesSampled metric.Int64Counter
ProcessorTailSamplingEarlyReleasesFromCacheDecision metric.Int64Counter
ProcessorTailSamplingGlobalCountTracesSampled metric.Int64Counter
ProcessorTailSamplingNewTraceIDReceived metric.Int64Counter
ProcessorTailSamplingSamplingDecisionLatency metric.Int64Histogram
ProcessorTailSamplingSamplingDecisionTimerLatency metric.Int64Histogram
ProcessorTailSamplingSamplingLateSpanAge metric.Int64Histogram
ProcessorTailSamplingSamplingPolicyEvaluationError metric.Int64Counter
ProcessorTailSamplingSamplingTraceDroppedTooEarly metric.Int64Counter
ProcessorTailSamplingSamplingTraceRemovalAge metric.Int64Histogram
ProcessorTailSamplingSamplingTracesOnMemory metric.Int64Gauge
}
// TelemetryBuilderOption applies changes to default builder.
type TelemetryBuilderOption interface {
apply(*TelemetryBuilder)
}
type telemetryBuilderOptionFunc func(mb *TelemetryBuilder)
func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) {
tbof(mb)
}
// Shutdown unregister all registered callbacks for async instruments.
func (builder *TelemetryBuilder) Shutdown() {
builder.mu.Lock()
defer builder.mu.Unlock()
for _, reg := range builder.registrations {
reg.Unregister()
}
}
// NewTelemetryBuilder provides a struct with methods to update all internal telemetry
// for a component
func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) {
builder := TelemetryBuilder{}
for _, op := range options {
op.apply(&builder)
}
builder.meter = Meter(settings)
var err, errs error
builder.ProcessorTailSamplingCountSpansSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_count_spans_sampled",
metric.WithDescription("Count of spans that were sampled or not per sampling policy"),
metric.WithUnit("{spans}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_count_traces_sampled",
metric.WithDescription("Count of traces that were sampled or not per sampling policy"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingEarlyReleasesFromCacheDecision, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_early_releases_from_cache_decision",
metric.WithDescription("Number of spans that were able to be immediately released due to a decision cache hit."),
metric.WithUnit("{spans}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingGlobalCountTracesSampled, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_global_count_traces_sampled",
metric.WithDescription("Global count of traces that were sampled or not by at least one policy"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingNewTraceIDReceived, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_new_trace_id_received",
metric.WithDescription("Counts the arrival of new traces"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingDecisionLatency, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_decision_latency",
metric.WithDescription("Latency (in microseconds) of a given sampling policy"),
metric.WithUnit("µs"),
metric.WithExplicitBucketBoundaries([]float64{1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000}...),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingDecisionTimerLatency, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_decision_timer_latency",
metric.WithDescription("Latency (in microseconds) of each run of the sampling decision timer"),
metric.WithUnit("µs"),
metric.WithExplicitBucketBoundaries([]float64{1, 2, 5, 10, 25, 50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 50000}...),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingLateSpanAge, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_late_span_age",
metric.WithDescription("Time (in seconds) from the sampling decision was taken and the arrival of a late span"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingPolicyEvaluationError, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_sampling_policy_evaluation_error",
metric.WithDescription("Count of sampling policy evaluation errors"),
metric.WithUnit("{errors}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTraceDroppedTooEarly, err = builder.meter.Int64Counter(
"otelcol_processor_tail_sampling_sampling_trace_dropped_too_early",
metric.WithDescription("Count of traces that needed to be dropped before the configured wait time"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTraceRemovalAge, err = builder.meter.Int64Histogram(
"otelcol_processor_tail_sampling_sampling_trace_removal_age",
metric.WithDescription("Time (in seconds) from arrival of a new trace until its removal from memory"),
metric.WithUnit("s"),
)
errs = errors.Join(errs, err)
builder.ProcessorTailSamplingSamplingTracesOnMemory, err = builder.meter.Int64Gauge(
"otelcol_processor_tail_sampling_sampling_traces_on_memory",
metric.WithDescription("Tracks the number of traces current on memory"),
metric.WithUnit("{traces}"),
)
errs = errors.Join(errs, err)
return &builder, errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type alwaysSample struct {
logger *zap.Logger
}
var _ PolicyEvaluator = (*alwaysSample)(nil)
// NewAlwaysSample creates a policy evaluator the samples all traces.
func NewAlwaysSample(settings component.TelemetrySettings) PolicyEvaluator {
return &alwaysSample{
logger: settings.Logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (as *alwaysSample) Evaluate(context.Context, pcommon.TraceID, *TraceData) (Decision, error) {
as.logger.Debug("Evaluating spans in always-sample filter")
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type And struct {
// the subpolicy evaluators
subpolicies []PolicyEvaluator
logger *zap.Logger
}
func NewAnd(
logger *zap.Logger,
subpolicies []PolicyEvaluator,
) PolicyEvaluator {
return &And{
subpolicies: subpolicies,
logger: logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *And) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
// The policy iterates over all sub-policies and returns Sampled if all sub-policies returned a Sampled Decision.
// If any subpolicy returns NotSampled or InvertNotSampled, it returns NotSampled Decision.
for _, sub := range c.subpolicies {
decision, err := sub.Evaluate(ctx, traceID, trace)
if err != nil {
return Unspecified, err
}
if decision == NotSampled || decision == InvertNotSampled {
return NotSampled, nil
}
}
return Sampled, nil
}
// OnDroppedSpans is called when the trace needs to be dropped, due to memory
// pressure, before the decision_wait time has been reached.
func (c *And) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) {
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type booleanAttributeFilter struct {
key string
value bool
logger *zap.Logger
invertMatch bool
}
var _ PolicyEvaluator = (*booleanAttributeFilter)(nil)
// NewBooleanAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute that match the supplied boolean value.
func NewBooleanAttributeFilter(settings component.TelemetrySettings, key string, value bool, invertMatch bool) PolicyEvaluator {
return &booleanAttributeFilter{
key: key,
value: value,
logger: settings.Logger,
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (baf *booleanAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if baf.invertMatch {
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(baf.key); ok {
value := v.Bool()
return value != baf.value
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(baf.key); ok {
value := v.Bool()
return value != baf.value
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(baf.key); ok {
value := v.Bool()
return value == baf.value
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(baf.key); ok {
value := v.Bool()
return value == baf.value
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type subpolicy struct {
// the subpolicy evaluator
evaluator PolicyEvaluator
// spans per second allocated to each subpolicy
allocatedSPS int64
// spans per second that each subpolicy sampled in this period
sampledSPS int64
}
// Composite evaluator and its internal data
type Composite struct {
// the subpolicy evaluators
subpolicies []*subpolicy
// maximum total spans per second that must be sampled
maxTotalSPS int64
// current unix timestamp second
currentSecond int64
// The time provider (can be different from clock for testing purposes)
timeProvider TimeProvider
logger *zap.Logger
}
var _ PolicyEvaluator = (*Composite)(nil)
// SubPolicyEvalParams defines the evaluator and max rate for a sub-policy
type SubPolicyEvalParams struct {
Evaluator PolicyEvaluator
MaxSpansPerSecond int64
}
// NewComposite creates a policy evaluator that samples all subpolicies.
func NewComposite(
logger *zap.Logger,
maxTotalSpansPerSecond int64,
subPolicyParams []SubPolicyEvalParams,
timeProvider TimeProvider,
) PolicyEvaluator {
var subpolicies []*subpolicy
for i := 0; i < len(subPolicyParams); i++ {
sub := &subpolicy{}
sub.evaluator = subPolicyParams[i].Evaluator
sub.allocatedSPS = subPolicyParams[i].MaxSpansPerSecond
// We are just starting, so there is no previous input, set it to 0
sub.sampledSPS = 0
subpolicies = append(subpolicies, sub)
}
return &Composite{
maxTotalSPS: maxTotalSpansPerSecond,
subpolicies: subpolicies,
timeProvider: timeProvider,
logger: logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *Composite) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
// Rate limiting works by counting spans that are sampled during each 1 second
// time period. Until the total number of spans during a particular second
// exceeds the allocated number of spans-per-second the traces are sampled,
// once the limit is exceeded the traces are no longer sampled. The counter
// restarts at the beginning of each second.
// Current counters and rate limits are kept separately for each subpolicy.
currSecond := c.timeProvider.getCurSecond()
if c.currentSecond != currSecond {
// This is a new second
c.currentSecond = currSecond
// Reset counters
for i := range c.subpolicies {
c.subpolicies[i].sampledSPS = 0
}
}
for _, sub := range c.subpolicies {
decision, err := sub.evaluator.Evaluate(ctx, traceID, trace)
if err != nil {
return Unspecified, err
}
if decision == Sampled || decision == InvertSampled {
// The subpolicy made a decision to Sample. Now we need to make our decision.
// Calculate resulting SPS counter if we decide to sample this trace
spansInSecondIfSampled := sub.sampledSPS + trace.SpanCount.Load()
// Check if the rate will be within the allocated bandwidth.
if spansInSecondIfSampled <= sub.allocatedSPS && spansInSecondIfSampled <= c.maxTotalSPS {
sub.sampledSPS = spansInSecondIfSampled
// Let the sampling happen
return Sampled, nil
}
// We exceeded the rate limit. Don't sample this trace.
// Note that we will continue evaluating new incoming traces against
// allocated SPS, we do not update sub.sampledSPS here in order to give
// chance to another smaller trace to be accepted later.
return NotSampled, nil
}
}
return NotSampled, nil
}
// OnDroppedSpans is called when the trace needs to be dropped, due to memory
// pressure, before the decision_wait time has been reached.
func (c *Composite) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) {
// Here we have a number of possible solutions:
// 1. Random sample traces based on maxTotalSPS.
// 2. Perform full composite sampling logic by calling Composite.Evaluate(), essentially
// using partial trace data for sampling.
// 3. Sample everything.
//
// It seems that #2 may be the best choice from end user perspective, but
// it is not certain and it is also additional performance penalty when we are
// already under a memory (and possibly CPU) pressure situation.
//
// For now we are playing safe and go with #3. Investigating alternate options
// should be a future task.
return Sampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type latency struct {
logger *zap.Logger
thresholdMs int64
upperThresholdMs int64
}
var _ PolicyEvaluator = (*latency)(nil)
// NewLatency creates a policy evaluator sampling traces with a duration greater than a configured threshold
func NewLatency(settings component.TelemetrySettings, thresholdMs int64, upperThresholdMs int64) PolicyEvaluator {
return &latency{
logger: settings.Logger,
thresholdMs: thresholdMs,
upperThresholdMs: upperThresholdMs,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (l *latency) Evaluate(_ context.Context, _ pcommon.TraceID, traceData *TraceData) (Decision, error) {
l.logger.Debug("Evaluating spans in latency filter")
traceData.Lock()
defer traceData.Unlock()
batches := traceData.ReceivedBatches
var minTime pcommon.Timestamp
var maxTime pcommon.Timestamp
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
if minTime == 0 || span.StartTimestamp() < minTime {
minTime = span.StartTimestamp()
}
if maxTime == 0 || span.EndTimestamp() > maxTime {
maxTime = span.EndTimestamp()
}
duration := maxTime.AsTime().Sub(minTime.AsTime())
if l.upperThresholdMs == 0 {
return duration.Milliseconds() >= l.thresholdMs
}
return (l.thresholdMs < duration.Milliseconds() && duration.Milliseconds() <= l.upperThresholdMs)
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type numericAttributeFilter struct {
key string
minValue, maxValue int64
logger *zap.Logger
invertMatch bool
}
var _ PolicyEvaluator = (*numericAttributeFilter)(nil)
// NewNumericAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute in the given numeric range.
func NewNumericAttributeFilter(settings component.TelemetrySettings, key string, minValue, maxValue int64, invertMatch bool) PolicyEvaluator {
return &numericAttributeFilter{
key: key,
minValue: minValue,
maxValue: maxValue,
logger: settings.Logger,
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (naf *numericAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if naf.invertMatch {
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return false
}
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return false
}
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return true
}
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(naf.key); ok {
value := v.Int()
if value >= naf.minValue && value <= naf.maxValue {
return true
}
}
return false
},
), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"errors"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
)
type ottlConditionFilter struct {
sampleSpanExpr *ottl.ConditionSequence[ottlspan.TransformContext]
sampleSpanEventExpr *ottl.ConditionSequence[ottlspanevent.TransformContext]
errorMode ottl.ErrorMode
logger *zap.Logger
}
var _ PolicyEvaluator = (*ottlConditionFilter)(nil)
// NewOTTLConditionFilter looks at the trace data and returns a corresponding SamplingDecision.
func NewOTTLConditionFilter(settings component.TelemetrySettings, spanConditions, spanEventConditions []string, errMode ottl.ErrorMode) (PolicyEvaluator, error) {
filter := &ottlConditionFilter{
errorMode: errMode,
logger: settings.Logger,
}
var err error
if len(spanConditions) == 0 && len(spanEventConditions) == 0 {
return nil, errors.New("expected at least one OTTL condition to filter on")
}
if len(spanConditions) > 0 {
if filter.sampleSpanExpr, err = filterottl.NewBoolExprForSpan(spanConditions, filterottl.StandardSpanFuncs(), errMode, settings); err != nil {
return nil, err
}
}
if len(spanEventConditions) > 0 {
if filter.sampleSpanEventExpr, err = filterottl.NewBoolExprForSpanEvent(spanEventConditions, filterottl.StandardSpanEventFuncs(), errMode, settings); err != nil {
return nil, err
}
}
return filter, nil
}
func (ocf *ottlConditionFilter) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *TraceData) (Decision, error) {
ocf.logger.Debug("Evaluating with OTTL conditions filter", zap.String("traceID", traceID.String()))
if ocf.sampleSpanExpr == nil && ocf.sampleSpanEventExpr == nil {
return NotSampled, nil
}
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
for i := 0; i < batches.ResourceSpans().Len(); i++ {
rs := batches.ResourceSpans().At(i)
resource := rs.Resource()
for j := 0; j < rs.ScopeSpans().Len(); j++ {
ss := rs.ScopeSpans().At(j)
scope := ss.Scope()
for k := 0; k < ss.Spans().Len(); k++ {
span := ss.Spans().At(k)
var (
ok bool
err error
)
// Now we reach span level and begin evaluation with parsed expr.
// The evaluation will break when:
// 1. error happened.
// 2. "Sampled" decision made.
// Otherwise, it will keep evaluating and finally exit with "NotSampled" decision.
// Span evaluation
if ocf.sampleSpanExpr != nil {
ok, err = ocf.sampleSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ss, rs))
if err != nil {
return Error, err
}
if ok {
return Sampled, nil
}
}
// Span event evaluation
if ocf.sampleSpanEventExpr != nil {
spanEvents := span.Events()
for l := 0; l < spanEvents.Len(); l++ {
ok, err = ocf.sampleSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvents.At(l), span, scope, resource, ss, rs))
if err != nil {
return Error, err
}
if ok {
return Sampled, nil
}
}
}
}
}
}
return NotSampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"hash/fnv"
"math"
"math/big"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
const (
defaultHashSalt = "default-hash-seed"
)
type probabilisticSampler struct {
logger *zap.Logger
threshold uint64
hashSalt string
}
var _ PolicyEvaluator = (*probabilisticSampler)(nil)
// NewProbabilisticSampler creates a policy evaluator that samples a percentage of
// traces.
func NewProbabilisticSampler(settings component.TelemetrySettings, hashSalt string, samplingPercentage float64) PolicyEvaluator {
if hashSalt == "" {
hashSalt = defaultHashSalt
}
return &probabilisticSampler{
logger: settings.Logger,
// calculate threshold once
threshold: calculateThreshold(samplingPercentage / 100),
hashSalt: hashSalt,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (s *probabilisticSampler) Evaluate(_ context.Context, traceID pcommon.TraceID, _ *TraceData) (Decision, error) {
s.logger.Debug("Evaluating spans in probabilistic filter")
if hashTraceID(s.hashSalt, traceID[:]) <= s.threshold {
return Sampled, nil
}
return NotSampled, nil
}
// calculateThreshold converts a ratio into a value between 0 and MaxUint64
func calculateThreshold(ratio float64) uint64 {
// Use big.Float and big.Int to calculate threshold because directly convert
// math.MaxUint64 to float64 will cause digits/bits to be cut off if the converted value
// doesn't fit into bits that are used to store digits for float64 in Golang
boundary := new(big.Float).SetInt(new(big.Int).SetUint64(math.MaxUint64))
res, _ := boundary.Mul(boundary, big.NewFloat(ratio)).Uint64()
return res
}
// hashTraceID creates a hash using the FNV-1a algorithm.
func hashTraceID(salt string, b []byte) uint64 {
hasher := fnv.New64a()
// the implementation fnv.Write() never returns an error, see hash/fnv/fnv.go
_, _ = hasher.Write([]byte(salt))
_, _ = hasher.Write(b)
return hasher.Sum64()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type rateLimiting struct {
currentSecond int64
spansInCurrentSecond int64
spansPerSecond int64
logger *zap.Logger
}
var _ PolicyEvaluator = (*rateLimiting)(nil)
// NewRateLimiting creates a policy evaluator the samples all traces.
func NewRateLimiting(settings component.TelemetrySettings, spansPerSecond int64) PolicyEvaluator {
return &rateLimiting{
spansPerSecond: spansPerSecond,
logger: settings.Logger,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (r *rateLimiting) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
r.logger.Debug("Evaluating spans in rate-limiting filter")
currSecond := time.Now().Unix()
if r.currentSecond != currSecond {
r.currentSecond = currSecond
r.spansInCurrentSecond = 0
}
spansInSecondIfSampled := r.spansInCurrentSecond + trace.SpanCount.Load()
if spansInSecondIfSampled < r.spansPerSecond {
r.spansInCurrentSecond = spansInSecondIfSampled
return Sampled, nil
}
return NotSampled, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.uber.org/zap"
)
type spanCount struct {
logger *zap.Logger
minSpans int32
maxSpans int32
}
var _ PolicyEvaluator = (*spanCount)(nil)
// NewSpanCount creates a policy evaluator sampling traces with more than one span per trace
func NewSpanCount(settings component.TelemetrySettings, minSpans, maxSpans int32) PolicyEvaluator {
return &spanCount{
logger: settings.Logger,
minSpans: minSpans,
maxSpans: maxSpans,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (c *spanCount) Evaluate(_ context.Context, _ pcommon.TraceID, traceData *TraceData) (Decision, error) {
c.logger.Debug("Evaluating spans counts in filter")
spanCount := int(traceData.SpanCount.Load())
switch {
case c.maxSpans == 0 && spanCount >= int(c.minSpans):
return Sampled, nil
case spanCount >= int(c.minSpans) && spanCount <= int(c.maxSpans):
return Sampled, nil
default:
return NotSampled, nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"errors"
"fmt"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
type statusCodeFilter struct {
logger *zap.Logger
statusCodes []ptrace.StatusCode
}
var _ PolicyEvaluator = (*statusCodeFilter)(nil)
// NewStatusCodeFilter creates a policy evaluator that samples all traces with
// a given status code.
func NewStatusCodeFilter(settings component.TelemetrySettings, statusCodeString []string) (PolicyEvaluator, error) {
if len(statusCodeString) == 0 {
return nil, errors.New("expected at least one status code to filter on")
}
statusCodes := make([]ptrace.StatusCode, len(statusCodeString))
for i := range statusCodeString {
switch statusCodeString[i] {
case "OK":
statusCodes[i] = ptrace.StatusCodeOk
case "ERROR":
statusCodes[i] = ptrace.StatusCodeError
case "UNSET":
statusCodes[i] = ptrace.StatusCodeUnset
default:
return nil, fmt.Errorf("unknown status code %q, supported: OK, ERROR, UNSET", statusCodeString[i])
}
}
return &statusCodeFilter{
logger: settings.Logger,
statusCodes: statusCodes,
}, nil
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (r *statusCodeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
r.logger.Debug("Evaluating spans in status code filter")
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
for _, statusCode := range r.statusCodes {
if span.Status().Code() == statusCode {
return true
}
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"regexp"
"github.com/golang/groupcache/lru"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.uber.org/zap"
)
const defaultCacheSize = 128
type stringAttributeFilter struct {
key string
logger *zap.Logger
// matcher defines the func to match the attribute values in strict string
// or in regular expression
matcher func(string) bool
invertMatch bool
}
type regexStrSetting struct {
matchedAttrs *lru.Cache
filterList []*regexp.Regexp
}
var _ PolicyEvaluator = (*stringAttributeFilter)(nil)
// NewStringAttributeFilter creates a policy evaluator that samples all traces with
// the given attribute in the given numeric range.
func NewStringAttributeFilter(settings component.TelemetrySettings, key string, values []string, regexMatchEnabled bool, evictSize int, invertMatch bool) PolicyEvaluator {
// initialize regex filter rules and LRU cache for matched results
if regexMatchEnabled {
if evictSize <= 0 {
evictSize = defaultCacheSize
}
filterList := addFilters(values)
regexStrSetting := ®exStrSetting{
matchedAttrs: lru.New(evictSize),
filterList: filterList,
}
return &stringAttributeFilter{
key: key,
logger: settings.Logger,
// matcher returns true if the given string matches the regex rules defined in string attribute filters
matcher: func(toMatch string) bool {
if v, ok := regexStrSetting.matchedAttrs.Get(toMatch); ok {
return v.(bool)
}
for _, r := range regexStrSetting.filterList {
if r.MatchString(toMatch) {
regexStrSetting.matchedAttrs.Add(toMatch, true)
return true
}
}
regexStrSetting.matchedAttrs.Add(toMatch, false)
return false
},
invertMatch: invertMatch,
}
}
// initialize the exact value map
valuesMap := make(map[string]struct{})
for _, value := range values {
if value != "" {
valuesMap[value] = struct{}{}
}
}
return &stringAttributeFilter{
key: key,
logger: settings.Logger,
// matcher returns true if the given string matches any of the string attribute filters
matcher: func(toMatch string) bool {
_, matched := valuesMap[toMatch]
return matched
},
invertMatch: invertMatch,
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
// The SamplingDecision is made by comparing the attribute values with the matching values,
// which might be static strings or regular expressions.
func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
saf.logger.Debug("Evaluating spans in string-tag filter")
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
if saf.invertMatch {
// Invert Match returns true by default, except when key and value are matched
return invertHasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(saf.key); ok {
if ok := saf.matcher(v.Str()); ok {
return false
}
}
return true
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(saf.key); ok {
truncatableStr := v.Str()
if len(truncatableStr) > 0 {
if ok := saf.matcher(v.Str()); ok {
return false
}
}
}
return true
},
), nil
}
return hasResourceOrSpanWithCondition(
batches,
func(resource pcommon.Resource) bool {
if v, ok := resource.Attributes().Get(saf.key); ok {
if ok := saf.matcher(v.Str()); ok {
return true
}
}
return false
},
func(span ptrace.Span) bool {
if v, ok := span.Attributes().Get(saf.key); ok {
truncatableStr := v.Str()
if len(truncatableStr) > 0 {
if ok := saf.matcher(v.Str()); ok {
return true
}
}
}
return false
},
), nil
}
// addFilters compiles all the given filters and stores them as regexes.
// All regexes are automatically anchored to enforce full string matches.
func addFilters(exprs []string) []*regexp.Regexp {
list := make([]*regexp.Regexp, 0, len(exprs))
for _, entry := range exprs {
rule := regexp.MustCompile(entry)
list = append(list, rule)
}
return list
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"time"
)
// TimeProvider allows to get current Unix second
type TimeProvider interface {
getCurSecond() int64
}
// MonotonicClock provides monotonic real clock-based current Unix second.
// Use it when creating a NewComposite which should measure sample rates
// against a realtime clock (this is almost always what you want to do,
// the exception is usually only automated testing where you may want
// to have fake clocks).
type MonotonicClock struct{}
func (c MonotonicClock) getCurSecond() int64 {
return time.Now().Unix()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
tracesdk "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
type traceStateFilter struct {
key string
logger *zap.Logger
matcher func(string) bool
}
var _ PolicyEvaluator = (*traceStateFilter)(nil)
// NewTraceStateFilter creates a policy evaluator that samples all traces with
// the given value by the specific key in the trace_state.
func NewTraceStateFilter(settings component.TelemetrySettings, key string, values []string) PolicyEvaluator {
// initialize the exact value map
valuesMap := make(map[string]struct{})
for _, value := range values {
// the key-value pair("=" will take one character) in trace_state can't exceed 256 characters
if value != "" && len(key)+len(value) < 256 {
valuesMap[value] = struct{}{}
}
}
return &traceStateFilter{
key: key,
logger: settings.Logger,
matcher: func(toMatch string) bool {
_, matched := valuesMap[toMatch]
return matched
},
}
}
// Evaluate looks at the trace data and returns a corresponding SamplingDecision.
func (tsf *traceStateFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) {
trace.Lock()
defer trace.Unlock()
batches := trace.ReceivedBatches
return hasSpanWithCondition(batches, func(span ptrace.Span) bool {
traceState, err := tracesdk.ParseTraceState(span.TraceState().AsRaw())
if err != nil {
return false
}
if ok := tsf.matcher(traceState.Get(tsf.key)); ok {
return true
}
return false
}), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// hasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any
// callback returns true.
func hasResourceOrSpanWithCondition(
td ptrace.Traces,
shouldSampleResource func(resource pcommon.Resource) bool,
shouldSampleSpan func(span ptrace.Span) bool,
) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
resource := rs.Resource()
if shouldSampleResource(resource) {
return Sampled
}
if hasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSampleSpan) {
return Sampled
}
}
return NotSampled
}
// invertHasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any
// callback returns false.
func invertHasResourceOrSpanWithCondition(
td ptrace.Traces,
shouldSampleResource func(resource pcommon.Resource) bool,
shouldSampleSpan func(span ptrace.Span) bool,
) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
resource := rs.Resource()
if !shouldSampleResource(resource) {
return InvertNotSampled
}
if !invertHasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSampleSpan) {
return InvertNotSampled
}
}
return InvertSampled
}
// hasSpanWithCondition iterates through all the instrumentation library spans until any callback returns true.
func hasSpanWithCondition(td ptrace.Traces, shouldSample func(span ptrace.Span) bool) Decision {
for i := 0; i < td.ResourceSpans().Len(); i++ {
rs := td.ResourceSpans().At(i)
if hasInstrumentationLibrarySpanWithCondition(rs.ScopeSpans(), shouldSample) {
return Sampled
}
}
return NotSampled
}
func hasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool {
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
for j := 0; j < ils.Spans().Len(); j++ {
span := ils.Spans().At(j)
if check(span) {
return true
}
}
}
return false
}
func invertHasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool {
for i := 0; i < ilss.Len(); i++ {
ils := ilss.At(i)
for j := 0; j < ils.Spans().Len(); j++ {
span := ils.Spans().At(j)
if !check(span) {
return false
}
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package telemetry // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/telemetry"
import "go.opentelemetry.io/collector/featuregate"
var metricStatCountSpansSampledFeatureGate = featuregate.GlobalRegistry().MustRegister(
"processor.tailsamplingprocessor.metricstatcountspanssampled",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("When enabled, a new metric stat_count_spans_sampled will be available in the tail sampling processor. Differently from stat_count_traces_sampled, this metric will count the number of spans sampled or not per sampling policy, where the original counts traces."),
)
func IsMetricStatCountSpansSampledEnabled() bool {
return metricStatCountSpansSampledFeatureGate.IsEnabled()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor"
import (
"context"
"fmt"
"math"
"runtime"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling"
"github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/telemetry"
)
// policy combines a sampling policy evaluator with the destinations to be
// used for that policy.
type policy struct {
// name used to identify this policy instance.
name string
// evaluator that decides if a trace is sampled or not by this policy instance.
evaluator sampling.PolicyEvaluator
// attribute to use in the telemetry to denote the policy.
attribute metric.MeasurementOption
}
// tailSamplingSpanProcessor handles the incoming trace data and uses the given sampling
// policy to sample traces.
type tailSamplingSpanProcessor struct {
ctx context.Context
set processor.Settings
telemetry *metadata.TelemetryBuilder
logger *zap.Logger
nextConsumer consumer.Traces
maxNumTraces uint64
policies []*policy
idToTrace sync.Map
policyTicker timeutils.TTicker
tickerFrequency time.Duration
decisionBatcher idbatcher.Batcher
sampledIDCache cache.Cache[bool]
nonSampledIDCache cache.Cache[bool]
deleteChan chan pcommon.TraceID
numTracesOnMap *atomic.Uint64
setPolicyMux sync.Mutex
pendingPolicy []PolicyCfg
}
// spanAndScope a structure for holding information about span and its instrumentation scope.
// required for preserving the instrumentation library information while sampling.
// We use pointers there to fast find the span in the map.
type spanAndScope struct {
span *ptrace.Span
instrumentationScope *pcommon.InstrumentationScope
}
var (
attrSampledTrue = metric.WithAttributes(attribute.String("sampled", "true"))
attrSampledFalse = metric.WithAttributes(attribute.String("sampled", "false"))
decisionToAttribute = map[sampling.Decision]metric.MeasurementOption{
sampling.Sampled: attrSampledTrue,
sampling.NotSampled: attrSampledFalse,
sampling.InvertNotSampled: attrSampledFalse,
sampling.InvertSampled: attrSampledTrue,
}
)
type Option func(*tailSamplingSpanProcessor)
// newTracesProcessor returns a processor.TracesProcessor that will perform tail sampling according to the given
// configuration.
func newTracesProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Traces, cfg Config, opts ...Option) (processor.Traces, error) {
telemetrySettings := set.TelemetrySettings
telemetry, err := metadata.NewTelemetryBuilder(telemetrySettings)
if err != nil {
return nil, err
}
nopCache := cache.NewNopDecisionCache[bool]()
sampledDecisions := nopCache
nonSampledDecisions := nopCache
if cfg.DecisionCache.SampledCacheSize > 0 {
sampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.SampledCacheSize)
if err != nil {
return nil, err
}
}
if cfg.DecisionCache.NonSampledCacheSize > 0 {
nonSampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.NonSampledCacheSize)
if err != nil {
return nil, err
}
}
tsp := &tailSamplingSpanProcessor{
ctx: ctx,
set: set,
telemetry: telemetry,
nextConsumer: nextConsumer,
maxNumTraces: cfg.NumTraces,
sampledIDCache: sampledDecisions,
nonSampledIDCache: nonSampledDecisions,
logger: telemetrySettings.Logger,
numTracesOnMap: &atomic.Uint64{},
deleteChan: make(chan pcommon.TraceID, cfg.NumTraces),
}
tsp.policyTicker = &timeutils.PolicyTicker{OnTickFunc: tsp.samplingPolicyOnTick}
for _, opt := range opts {
opt(tsp)
}
if tsp.tickerFrequency == 0 {
tsp.tickerFrequency = time.Second
}
if tsp.policies == nil {
err := tsp.loadSamplingPolicy(cfg.PolicyCfgs)
if err != nil {
return nil, err
}
}
if tsp.decisionBatcher == nil {
// this will start a goroutine in the background, so we run it only if everything went
// well in creating the policies
numDecisionBatches := math.Max(1, cfg.DecisionWait.Seconds())
inBatcher, err := idbatcher.New(uint64(numDecisionBatches), cfg.ExpectedNewTracesPerSec, uint64(2*runtime.NumCPU()))
if err != nil {
return nil, err
}
tsp.decisionBatcher = inBatcher
}
return tsp, nil
}
// withDecisionBatcher sets the batcher used to batch trace IDs for policy evaluation.
func withDecisionBatcher(batcher idbatcher.Batcher) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.decisionBatcher = batcher
}
}
// withPolicies sets the sampling policies to be used by the processor.
func withPolicies(policies []*policy) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.policies = policies
}
}
// withTickerFrequency sets the frequency at which the processor will evaluate the sampling policies.
func withTickerFrequency(frequency time.Duration) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.tickerFrequency = frequency
}
}
// withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs.
func withSampledDecisionCache(c cache.Cache[bool]) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.sampledIDCache = c
}
}
// withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs.
func withNonSampledDecisionCache(c cache.Cache[bool]) Option {
return func(tsp *tailSamplingSpanProcessor) {
tsp.nonSampledIDCache = c
}
}
func getPolicyEvaluator(settings component.TelemetrySettings, cfg *PolicyCfg) (sampling.PolicyEvaluator, error) {
switch cfg.Type {
case Composite:
return getNewCompositePolicy(settings, &cfg.CompositeCfg)
case And:
return getNewAndPolicy(settings, &cfg.AndCfg)
default:
return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg)
}
}
func getSharedPolicyEvaluator(settings component.TelemetrySettings, cfg *sharedPolicyCfg) (sampling.PolicyEvaluator, error) {
settings.Logger = settings.Logger.With(zap.Any("policy", cfg.Type))
switch cfg.Type {
case AlwaysSample:
return sampling.NewAlwaysSample(settings), nil
case Latency:
lfCfg := cfg.LatencyCfg
return sampling.NewLatency(settings, lfCfg.ThresholdMs, lfCfg.UpperThresholdmsMs), nil
case NumericAttribute:
nafCfg := cfg.NumericAttributeCfg
return sampling.NewNumericAttributeFilter(settings, nafCfg.Key, nafCfg.MinValue, nafCfg.MaxValue, nafCfg.InvertMatch), nil
case Probabilistic:
pCfg := cfg.ProbabilisticCfg
return sampling.NewProbabilisticSampler(settings, pCfg.HashSalt, pCfg.SamplingPercentage), nil
case StringAttribute:
safCfg := cfg.StringAttributeCfg
return sampling.NewStringAttributeFilter(settings, safCfg.Key, safCfg.Values, safCfg.EnabledRegexMatching, safCfg.CacheMaxSize, safCfg.InvertMatch), nil
case StatusCode:
scfCfg := cfg.StatusCodeCfg
return sampling.NewStatusCodeFilter(settings, scfCfg.StatusCodes)
case RateLimiting:
rlfCfg := cfg.RateLimitingCfg
return sampling.NewRateLimiting(settings, rlfCfg.SpansPerSecond), nil
case SpanCount:
spCfg := cfg.SpanCountCfg
return sampling.NewSpanCount(settings, spCfg.MinSpans, spCfg.MaxSpans), nil
case TraceState:
tsfCfg := cfg.TraceStateCfg
return sampling.NewTraceStateFilter(settings, tsfCfg.Key, tsfCfg.Values), nil
case BooleanAttribute:
bafCfg := cfg.BooleanAttributeCfg
return sampling.NewBooleanAttributeFilter(settings, bafCfg.Key, bafCfg.Value, bafCfg.InvertMatch), nil
case OTTLCondition:
ottlfCfg := cfg.OTTLConditionCfg
return sampling.NewOTTLConditionFilter(settings, ottlfCfg.SpanConditions, ottlfCfg.SpanEventConditions, ottlfCfg.ErrorMode)
default:
return nil, fmt.Errorf("unknown sampling policy type %s", cfg.Type)
}
}
type policyMetrics struct {
idNotFoundOnMapCount, evaluateErrorCount, decisionSampled, decisionNotSampled int64
}
func (tsp *tailSamplingSpanProcessor) loadSamplingPolicy(cfgs []PolicyCfg) error {
telemetrySettings := tsp.set.TelemetrySettings
componentID := tsp.set.ID.Name()
cLen := len(cfgs)
policies := make([]*policy, 0, cLen)
policyNames := make(map[string]struct{}, cLen)
for _, cfg := range cfgs {
if cfg.Name == "" {
return fmt.Errorf("policy name cannot be empty")
}
if _, exists := policyNames[cfg.Name]; exists {
return fmt.Errorf("duplicate policy name %q", cfg.Name)
}
policyNames[cfg.Name] = struct{}{}
eval, err := getPolicyEvaluator(telemetrySettings, &cfg)
if err != nil {
return fmt.Errorf("failed to create policy evaluator for %q: %w", cfg.Name, err)
}
uniquePolicyName := cfg.Name
if componentID != "" {
uniquePolicyName = fmt.Sprintf("%s.%s", componentID, cfg.Name)
}
policies = append(policies, &policy{
name: cfg.Name,
evaluator: eval,
attribute: metric.WithAttributes(attribute.String("policy", uniquePolicyName)),
})
}
tsp.policies = policies
tsp.logger.Debug("Loaded sampling policy", zap.Int("policies.len", len(policies)))
return nil
}
func (tsp *tailSamplingSpanProcessor) SetSamplingPolicy(cfgs []PolicyCfg) {
tsp.logger.Debug("Setting pending sampling policy", zap.Int("pending.len", len(cfgs)))
tsp.setPolicyMux.Lock()
defer tsp.setPolicyMux.Unlock()
tsp.pendingPolicy = cfgs
}
func (tsp *tailSamplingSpanProcessor) loadPendingSamplingPolicy() {
tsp.setPolicyMux.Lock()
defer tsp.setPolicyMux.Unlock()
// Nothing pending, do nothing.
pLen := len(tsp.pendingPolicy)
if pLen == 0 {
return
}
tsp.logger.Debug("Loading pending sampling policy", zap.Int("pending.len", pLen))
err := tsp.loadSamplingPolicy(tsp.pendingPolicy)
// Empty pending regardless of error. If policy is invalid, it will fail on
// every tick, no need to do extra work and flood the log with errors.
tsp.pendingPolicy = nil
if err != nil {
tsp.logger.Error("Failed to load pending sampling policy", zap.Error(err))
tsp.logger.Debug("Continuing to use the previously loaded sampling policy")
}
}
func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() {
tsp.logger.Debug("Sampling Policy Evaluation ticked")
tsp.loadPendingSamplingPolicy()
ctx := context.Background()
metrics := policyMetrics{}
startTime := time.Now()
batch, _ := tsp.decisionBatcher.CloseCurrentAndTakeFirstBatch()
batchLen := len(batch)
for _, id := range batch {
d, ok := tsp.idToTrace.Load(id)
if !ok {
metrics.idNotFoundOnMapCount++
continue
}
trace := d.(*sampling.TraceData)
trace.DecisionTime = time.Now()
decision := tsp.makeDecision(id, trace, &metrics)
tsp.telemetry.ProcessorTailSamplingSamplingDecisionTimerLatency.Record(tsp.ctx, int64(time.Since(startTime)/time.Microsecond))
tsp.telemetry.ProcessorTailSamplingGlobalCountTracesSampled.Add(tsp.ctx, 1, decisionToAttribute[decision])
// Sampled or not, remove the batches
trace.Lock()
allSpans := trace.ReceivedBatches
trace.FinalDecision = decision
trace.ReceivedBatches = ptrace.NewTraces()
trace.Unlock()
switch decision {
case sampling.Sampled:
tsp.releaseSampledTrace(ctx, id, allSpans)
case sampling.NotSampled:
tsp.releaseNotSampledTrace(id)
}
}
tsp.telemetry.ProcessorTailSamplingSamplingTracesOnMemory.Record(tsp.ctx, int64(tsp.numTracesOnMap.Load()))
tsp.telemetry.ProcessorTailSamplingSamplingTraceDroppedTooEarly.Add(tsp.ctx, metrics.idNotFoundOnMapCount)
tsp.telemetry.ProcessorTailSamplingSamplingPolicyEvaluationError.Add(tsp.ctx, metrics.evaluateErrorCount)
tsp.logger.Debug("Sampling policy evaluation completed",
zap.Int("batch.len", batchLen),
zap.Int64("sampled", metrics.decisionSampled),
zap.Int64("notSampled", metrics.decisionNotSampled),
zap.Int64("droppedPriorToEvaluation", metrics.idNotFoundOnMapCount),
zap.Int64("policyEvaluationErrors", metrics.evaluateErrorCount),
)
}
func (tsp *tailSamplingSpanProcessor) makeDecision(id pcommon.TraceID, trace *sampling.TraceData, metrics *policyMetrics) sampling.Decision {
var decisions [8]bool
ctx := context.Background()
startTime := time.Now()
// Check all policies before making a final decision.
for _, p := range tsp.policies {
decision, err := p.evaluator.Evaluate(ctx, id, trace)
latency := time.Since(startTime)
tsp.telemetry.ProcessorTailSamplingSamplingDecisionLatency.Record(ctx, int64(latency/time.Microsecond), p.attribute)
if err != nil {
decisions[sampling.Error] = true
metrics.evaluateErrorCount++
tsp.logger.Debug("Sampling policy error", zap.Error(err))
continue
}
tsp.telemetry.ProcessorTailSamplingCountTracesSampled.Add(ctx, 1, p.attribute, decisionToAttribute[decision])
if telemetry.IsMetricStatCountSpansSampledEnabled() {
tsp.telemetry.ProcessorTailSamplingCountSpansSampled.Add(ctx, trace.SpanCount.Load(), p.attribute, decisionToAttribute[decision])
}
decisions[decision] = true
}
var finalDecision sampling.Decision
switch {
case decisions[sampling.InvertNotSampled]: // InvertNotSampled takes precedence
finalDecision = sampling.NotSampled
case decisions[sampling.Sampled]:
finalDecision = sampling.Sampled
case decisions[sampling.InvertSampled] && !decisions[sampling.NotSampled]:
finalDecision = sampling.Sampled
default:
finalDecision = sampling.NotSampled
}
if finalDecision == sampling.Sampled {
metrics.decisionSampled++
} else {
metrics.decisionNotSampled++
}
return finalDecision
}
// ConsumeTraces is required by the processor.Traces interface.
func (tsp *tailSamplingSpanProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error {
resourceSpans := td.ResourceSpans()
for i := 0; i < resourceSpans.Len(); i++ {
tsp.processTraces(resourceSpans.At(i))
}
return nil
}
func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans ptrace.ResourceSpans) map[pcommon.TraceID][]spanAndScope {
idToSpans := make(map[pcommon.TraceID][]spanAndScope)
ilss := resourceSpans.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
scope := ilss.At(j)
spans := scope.Spans()
is := scope.Scope()
spansLen := spans.Len()
for k := 0; k < spansLen; k++ {
span := spans.At(k)
key := span.TraceID()
idToSpans[key] = append(idToSpans[key], spanAndScope{
span: &span,
instrumentationScope: &is,
})
}
}
return idToSpans
}
func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans ptrace.ResourceSpans) {
currTime := time.Now()
// Group spans per their traceId to minimize contention on idToTrace
idToSpansAndScope := tsp.groupSpansByTraceKey(resourceSpans)
var newTraceIDs int64
for id, spans := range idToSpansAndScope {
// If the trace ID is in the sampled cache, short circuit the decision
if _, ok := tsp.sampledIDCache.Get(id); ok {
tsp.logger.Debug("Trace ID is in the sampled cache", zap.Stringer("id", id))
traceTd := ptrace.NewTraces()
appendToTraces(traceTd, resourceSpans, spans)
tsp.releaseSampledTrace(tsp.ctx, id, traceTd)
metric.WithAttributeSet(attribute.NewSet())
tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision.
Add(tsp.ctx, int64(len(spans)), attrSampledTrue)
continue
}
// If the trace ID is in the non-sampled cache, short circuit the decision
if _, ok := tsp.nonSampledIDCache.Get(id); ok {
tsp.logger.Debug("Trace ID is in the non-sampled cache", zap.Stringer("id", id))
tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision.
Add(tsp.ctx, int64(len(spans)), attrSampledFalse)
continue
}
lenSpans := int64(len(spans))
d, loaded := tsp.idToTrace.Load(id)
if !loaded {
spanCount := &atomic.Int64{}
spanCount.Store(lenSpans)
td := &sampling.TraceData{
ArrivalTime: currTime,
SpanCount: spanCount,
ReceivedBatches: ptrace.NewTraces(),
}
if d, loaded = tsp.idToTrace.LoadOrStore(id, td); !loaded {
newTraceIDs++
tsp.decisionBatcher.AddToCurrentBatch(id)
tsp.numTracesOnMap.Add(1)
postDeletion := false
for !postDeletion {
select {
case tsp.deleteChan <- id:
postDeletion = true
default:
traceKeyToDrop := <-tsp.deleteChan
tsp.dropTrace(traceKeyToDrop, currTime)
}
}
}
}
actualData := d.(*sampling.TraceData)
if loaded {
actualData.SpanCount.Add(lenSpans)
}
actualData.Lock()
finalDecision := actualData.FinalDecision
if finalDecision == sampling.Unspecified {
// If the final decision hasn't been made, add the new spans under the lock.
appendToTraces(actualData.ReceivedBatches, resourceSpans, spans)
actualData.Unlock()
continue
}
actualData.Unlock()
switch finalDecision {
case sampling.Sampled:
traceTd := ptrace.NewTraces()
appendToTraces(traceTd, resourceSpans, spans)
tsp.releaseSampledTrace(tsp.ctx, id, traceTd)
case sampling.NotSampled:
tsp.releaseNotSampledTrace(id)
default:
tsp.logger.Warn("Unexpected sampling decision", zap.Int("decision", int(finalDecision)))
}
if !actualData.DecisionTime.IsZero() {
tsp.telemetry.ProcessorTailSamplingSamplingLateSpanAge.Record(tsp.ctx, int64(time.Since(actualData.DecisionTime)/time.Second))
}
}
tsp.telemetry.ProcessorTailSamplingNewTraceIDReceived.Add(tsp.ctx, newTraceIDs)
}
func (tsp *tailSamplingSpanProcessor) Capabilities() consumer.Capabilities {
return consumer.Capabilities{MutatesData: false}
}
// Start is invoked during service startup.
func (tsp *tailSamplingSpanProcessor) Start(context.Context, component.Host) error {
tsp.policyTicker.Start(tsp.tickerFrequency)
return nil
}
// Shutdown is invoked during service shutdown.
func (tsp *tailSamplingSpanProcessor) Shutdown(context.Context) error {
tsp.decisionBatcher.Stop()
tsp.policyTicker.Stop()
return nil
}
func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pcommon.TraceID, deletionTime time.Time) {
var trace *sampling.TraceData
if d, ok := tsp.idToTrace.Load(traceID); ok {
trace = d.(*sampling.TraceData)
tsp.idToTrace.Delete(traceID)
// Subtract one from numTracesOnMap per https://godoc.org/sync/atomic#AddUint64
tsp.numTracesOnMap.Add(^uint64(0))
}
if trace == nil {
tsp.logger.Debug("Attempt to delete trace ID not on table", zap.Stringer("id", traceID))
return
}
tsp.telemetry.ProcessorTailSamplingSamplingTraceRemovalAge.Record(tsp.ctx, int64(deletionTime.Sub(trace.ArrivalTime)/time.Second))
}
// releaseSampledTrace sends the trace data to the next consumer. It
// additionally adds the trace ID to the cache of sampled trace IDs. If the
// trace ID is cached, it deletes the spans from the internal map.
func (tsp *tailSamplingSpanProcessor) releaseSampledTrace(ctx context.Context, id pcommon.TraceID, td ptrace.Traces) {
tsp.sampledIDCache.Put(id, true)
if err := tsp.nextConsumer.ConsumeTraces(ctx, td); err != nil {
tsp.logger.Warn(
"Error sending spans to destination",
zap.Error(err))
}
_, ok := tsp.sampledIDCache.Get(id)
if ok {
tsp.dropTrace(id, time.Now())
}
}
// releaseNotSampledTrace adds the trace ID to the cache of not sampled trace
// IDs. If the trace ID is cached, it deletes the spans from the internal map.
func (tsp *tailSamplingSpanProcessor) releaseNotSampledTrace(id pcommon.TraceID) {
tsp.nonSampledIDCache.Put(id, true)
_, ok := tsp.nonSampledIDCache.Get(id)
if ok {
tsp.dropTrace(id, time.Now())
}
}
func appendToTraces(dest ptrace.Traces, rss ptrace.ResourceSpans, spanAndScopes []spanAndScope) {
rs := dest.ResourceSpans().AppendEmpty()
rss.Resource().CopyTo(rs.Resource())
scopePointerToNewScope := make(map[*pcommon.InstrumentationScope]*ptrace.ScopeSpans)
for _, spanAndScope := range spanAndScopes {
// If the scope of the spanAndScope is not in the map, add it to the map and the destination.
if scope, ok := scopePointerToNewScope[spanAndScope.instrumentationScope]; !ok {
is := rs.ScopeSpans().AppendEmpty()
spanAndScope.instrumentationScope.CopyTo(is.Scope())
scopePointerToNewScope[spanAndScope.instrumentationScope] = &is
sp := is.Spans().AppendEmpty()
spanAndScope.span.CopyTo(sp)
} else {
sp := scope.Spans().AppendEmpty()
spanAndScope.span.CopyTo(sp)
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"errors"
"fmt"
"net"
"go.opentelemetry.io/collector/config/configtls"
"go.uber.org/multierr"
)
// Config holds all the parameters to start an HTTP server that can be sent logs from CloudFlare
type Config struct {
Logs LogsConfig `mapstructure:"logs"`
}
type LogsConfig struct {
Secret string `mapstructure:"secret"`
Endpoint string `mapstructure:"endpoint"`
TLS *configtls.ServerConfig `mapstructure:"tls"`
Attributes map[string]string `mapstructure:"attributes"`
TimestampField string `mapstructure:"timestamp_field"`
}
var (
errNoEndpoint = errors.New("an endpoint must be specified")
errNoCert = errors.New("tls was configured, but no cert file was specified")
errNoKey = errors.New("tls was configured, but no key file was specified")
defaultTimestampField = "EdgeStartTimestamp"
)
func (c *Config) Validate() error {
if c.Logs.Endpoint == "" {
return errNoEndpoint
}
var errs error
if c.Logs.TLS != nil {
// Missing key
if c.Logs.TLS.KeyFile == "" {
errs = multierr.Append(errs, errNoKey)
}
// Missing cert
if c.Logs.TLS.CertFile == "" {
errs = multierr.Append(errs, errNoCert)
}
}
_, _, err := net.SplitHostPort(c.Logs.Endpoint)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err))
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata"
)
// NewFactory returns the component factory for the cloudflarereceiver
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithLogs(createLogsReceiver, metadata.LogsStability),
)
}
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
cfg := rConf.(*Config)
return newLogsReceiver(params, cfg, consumer)
}
func createDefaultConfig() component.Config {
return &Config{
Logs: LogsConfig{
TimestampField: defaultTimestampField,
},
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver"
import (
"bytes"
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"strconv"
"sync"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata"
)
type logsReceiver struct {
logger *zap.Logger
cfg *LogsConfig
server *http.Server
consumer consumer.Logs
wg *sync.WaitGroup
id component.ID // ID of the receiver component
telemetrySettings component.TelemetrySettings
}
const secretHeaderName = "X-CF-Secret"
func newLogsReceiver(params rcvr.Settings, cfg *Config, consumer consumer.Logs) (*logsReceiver, error) {
recv := &logsReceiver{
cfg: &cfg.Logs,
consumer: consumer,
logger: params.Logger,
wg: &sync.WaitGroup{},
telemetrySettings: params.TelemetrySettings,
id: params.ID,
}
recv.server = &http.Server{
Handler: http.HandlerFunc(recv.handleRequest),
ReadHeaderTimeout: 20 * time.Second,
}
if recv.cfg.TLS != nil {
tlsConfig, err := recv.cfg.TLS.LoadTLSConfig(context.Background())
if err != nil {
return nil, err
}
recv.server.TLSConfig = tlsConfig
}
return recv, nil
}
func (l *logsReceiver) Start(ctx context.Context, host component.Host) error {
return l.startListening(ctx, host)
}
func (l *logsReceiver) Shutdown(ctx context.Context) error {
l.logger.Debug("Shutting down server")
err := l.server.Shutdown(ctx)
if err != nil {
return err
}
l.logger.Debug("Waiting for shutdown to complete.")
l.wg.Wait()
return nil
}
func (l *logsReceiver) startListening(ctx context.Context, host component.Host) error {
l.logger.Debug("starting receiver HTTP server")
// We use l.server.Serve* over l.server.ListenAndServe*
// So that we can catch and return errors relating to binding to network interface on start.
var lc net.ListenConfig
listener, err := lc.Listen(ctx, "tcp", l.cfg.Endpoint)
if err != nil {
return err
}
l.wg.Add(1)
go func() {
defer l.wg.Done()
if l.cfg.TLS != nil {
l.logger.Debug("Starting ServeTLS",
zap.String("address", l.cfg.Endpoint),
zap.String("certfile", l.cfg.TLS.CertFile),
zap.String("keyfile", l.cfg.TLS.KeyFile))
err := l.server.ServeTLS(listener, l.cfg.TLS.CertFile, l.cfg.TLS.KeyFile)
l.logger.Debug("ServeTLS done")
if !errors.Is(err, http.ErrServerClosed) {
l.logger.Error("ServeTLS failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
} else {
l.logger.Debug("Starting Serve",
zap.String("address", l.cfg.Endpoint))
err := l.server.Serve(listener)
l.logger.Debug("Serve done")
if !errors.Is(err, http.ErrServerClosed) {
l.logger.Error("Serve failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}
}()
return nil
}
func (l *logsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) {
if l.cfg.Secret != "" {
secretHeader := req.Header.Get(secretHeaderName)
if secretHeader == "" {
rw.WriteHeader(http.StatusUnauthorized)
l.logger.Debug("Got payload with no Secret when it was specified in config, dropping...")
return
} else if secretHeader != l.cfg.Secret {
rw.WriteHeader(http.StatusUnauthorized)
l.logger.Debug("Got payload with invalid Secret, dropping...")
return
}
}
var payload []byte
if req.Header.Get("Content-Encoding") == "gzip" {
reader, err := gzip.NewReader(req.Body)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err))
return
}
defer reader.Close()
// Read the decompressed response body
payload, err = io.ReadAll(reader)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err))
return
}
} else {
var err error
payload, err = io.ReadAll(req.Body)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
}
if string(payload) == "test" {
l.logger.Info("Received test request from Cloudflare")
rw.WriteHeader(http.StatusOK)
return
}
logs, err := parsePayload(payload)
if err != nil {
rw.WriteHeader(http.StatusUnprocessableEntity)
l.logger.Error("Failed to convert cloudflare request payload to maps", zap.Error(err))
return
}
if err := l.consumer.ConsumeLogs(req.Context(), l.processLogs(pcommon.NewTimestampFromTime(time.Now()), logs)); err != nil {
errorutil.HTTPError(rw, err)
l.logger.Error("Failed to consumer alert as log", zap.Error(err))
return
}
rw.WriteHeader(http.StatusOK)
}
func parsePayload(payload []byte) ([]map[string]any, error) {
lines := bytes.Split(payload, []byte("\n"))
logs := make([]map[string]any, 0, len(lines))
for _, line := range lines {
if len(line) == 0 {
continue
}
var log map[string]any
err := json.Unmarshal(line, &log)
if err != nil {
return logs, err
}
logs = append(logs, log)
}
return logs, nil
}
func (l *logsReceiver) processLogs(now pcommon.Timestamp, logs []map[string]any) plog.Logs {
pLogs := plog.NewLogs()
// Group logs by ZoneName field if it was configured so it can be used as a resource attribute
groupedLogs := make(map[string][]map[string]any)
for _, log := range logs {
zone := ""
if v, ok := log["ZoneName"]; ok {
if stringV, ok := v.(string); ok {
zone = stringV
}
}
groupedLogs[zone] = append(groupedLogs[zone], log)
}
for zone, logGroup := range groupedLogs {
resourceLogs := pLogs.ResourceLogs().AppendEmpty()
if zone != "" {
resource := resourceLogs.Resource()
resource.Attributes().PutStr("cloudflare.zone", zone)
}
scopeLogs := resourceLogs.ScopeLogs().AppendEmpty()
scopeLogs.Scope().SetName(metadata.ScopeName)
for _, log := range logGroup {
logRecord := scopeLogs.LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
if v, ok := log[l.cfg.TimestampField]; ok {
if stringV, ok := v.(string); ok {
ts, err := time.Parse(time.RFC3339, stringV)
if err != nil {
l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Error(err), zap.String("value", stringV))
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
} else {
l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Any("value", v))
}
}
if v, ok := log["EdgeResponseStatus"]; ok {
sev := plog.SeverityNumberUnspecified
switch v := v.(type) {
case string:
intV, err := strconv.ParseInt(v, 10, 64)
if err != nil {
l.logger.Warn("unable to parse EdgeResponseStatus", zap.Error(err), zap.String("value", v))
} else {
sev = severityFromStatusCode(intV)
}
case int64:
sev = severityFromStatusCode(v)
case float64:
sev = severityFromStatusCode(int64(v))
}
if sev != plog.SeverityNumberUnspecified {
logRecord.SetSeverityNumber(sev)
logRecord.SetSeverityText(sev.String())
}
}
attrs := logRecord.Attributes()
for field, attribute := range l.cfg.Attributes {
if v, ok := log[field]; ok {
switch v := v.(type) {
case string:
attrs.PutStr(attribute, v)
case int:
attrs.PutInt(attribute, int64(v))
case int64:
attrs.PutInt(attribute, v)
case float64:
attrs.PutDouble(attribute, v)
case bool:
attrs.PutBool(attribute, v)
default:
l.logger.Warn("unable to translate field to attribute, unsupported type", zap.String("field", field), zap.Any("value", v), zap.String("type", fmt.Sprintf("%T", v)))
}
}
}
err := logRecord.Body().SetEmptyMap().FromRaw(log)
if err != nil {
l.logger.Warn("unable to set body", zap.Error(err))
}
}
}
return pLogs
}
// severityFromStatusCode translates HTTP status code to OpenTelemetry severity number.
func severityFromStatusCode(statusCode int64) plog.SeverityNumber {
switch {
case statusCode < 300:
return plog.SeverityNumberInfo
case statusCode < 400:
return plog.SeverityNumberInfo2
case statusCode < 500:
return plog.SeverityNumberWarn
case statusCode < 600:
return plog.SeverityNumberError
default:
return plog.SeverityNumberUnspecified
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"compress/flate"
"compress/gzip"
"fmt"
"io"
"math"
"mime"
"net/http"
"github.com/grafana/loki/pkg/push"
)
var (
contentType = http.CanonicalHeaderKey("Content-Type")
contentEnc = http.CanonicalHeaderKey("Content-Encoding")
)
const applicationJSON = "application/json"
func ParseRequest(req *http.Request) (*push.PushRequest, error) {
var body io.Reader
contentEncoding := req.Header.Get(contentEnc)
switch contentEncoding {
case "", "snappy":
body = req.Body
case "gzip":
gzipReader, err := gzip.NewReader(req.Body)
if err != nil {
return nil, err
}
defer gzipReader.Close()
body = gzipReader
case "deflate":
flateReader := flate.NewReader(req.Body)
defer flateReader.Close()
body = flateReader
default:
return nil, fmt.Errorf("Content-Encoding %q not supported", contentEncoding)
}
var pushRequest push.PushRequest
reqContentType := req.Header.Get(contentType)
reqContentType, _ /* params */, err := mime.ParseMediaType(reqContentType)
if err != nil {
return nil, err
}
switch reqContentType {
case applicationJSON:
if err = decodePushRequest(body, &pushRequest); err != nil {
return nil, err
}
default:
// When no content-type header is set or when it is set to
// `application/x-protobuf`: expect snappy compression.
if err := parseProtoReader(body, int(req.ContentLength), math.MaxInt32, &pushRequest); err != nil {
return nil, err
}
return &pushRequest, nil
}
return &pushRequest, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"io"
"sort"
"strconv"
"strings"
"time"
"unsafe"
"github.com/buger/jsonparser"
"github.com/grafana/loki/pkg/push"
jsoniter "github.com/json-iterator/go"
)
// PushRequest models a log stream push but is unmarshalled to proto push format.
type PushRequest struct {
Streams []Stream `json:"streams"`
}
// Stream helps with unmarshalling of each log stream for push request.
type Stream push.Stream
func (s *Stream) UnmarshalJSON(data []byte) error {
err := jsonparser.ObjectEach(data, func(key, val []byte, ty jsonparser.ValueType, _ int) error {
switch string(key) {
case "stream":
var labels LabelSet
if err := labels.UnmarshalJSON(val); err != nil {
return err
}
s.Labels = labels.String()
case "values":
if ty == jsonparser.Null {
return nil
}
entries, err := unmarshalHTTPToLogProtoEntries(val)
if err != nil {
return err
}
s.Entries = entries
}
return nil
})
return err
}
func unmarshalHTTPToLogProtoEntries(data []byte) ([]push.Entry, error) {
var (
entries []push.Entry
parseError error
)
if _, err := jsonparser.ArrayEach(data, func(value []byte, ty jsonparser.ValueType, _ int, err error) {
if err != nil || parseError != nil {
return
}
if ty == jsonparser.Null {
return
}
e, err := unmarshalHTTPToLogProtoEntry(value)
if err != nil {
parseError = err
return
}
entries = append(entries, e)
}); err != nil {
parseError = err
}
if parseError != nil {
return nil, parseError
}
return entries, nil
}
func unmarshalHTTPToLogProtoEntry(data []byte) (push.Entry, error) {
var (
i int
parseError error
e push.Entry
)
_, err := jsonparser.ArrayEach(data, func(value []byte, t jsonparser.ValueType, _ int, _ error) {
// assert that both items in array are of type string
if (i == 0 || i == 1) && t != jsonparser.String {
parseError = jsonparser.MalformedStringError
return
} else if i == 2 && t != jsonparser.Object {
parseError = jsonparser.MalformedObjectError
return
}
switch i {
case 0: // timestamp
ts, err := jsonparser.ParseInt(value)
if err != nil {
parseError = err
return
}
e.Timestamp = time.Unix(0, ts)
case 1: // value
v, err := jsonparser.ParseString(value)
if err != nil {
parseError = err
return
}
e.Line = v
case 2: // structuredMetadata
var structuredMetadata []push.LabelAdapter
err := jsonparser.ObjectEach(value, func(key, val []byte, dataType jsonparser.ValueType, _ int) error {
if dataType != jsonparser.String {
return jsonparser.MalformedStringError
}
structuredMetadata = append(structuredMetadata, push.LabelAdapter{
Name: string(key),
Value: string(val),
})
return nil
})
if err != nil {
parseError = err
return
}
e.StructuredMetadata = structuredMetadata
}
i++
})
if parseError != nil {
return e, parseError
}
return e, err
}
// LabelSet is a key/value pair mapping of labels
type LabelSet map[string]string
func (l *LabelSet) UnmarshalJSON(data []byte) error {
if *l == nil {
*l = make(LabelSet)
}
return jsonparser.ObjectEach(data, func(key, val []byte, _ jsonparser.ValueType, _ int) error {
v, err := jsonparser.ParseString(val)
if err != nil {
return err
}
k, err := jsonparser.ParseString(key)
if err != nil {
return err
}
(*l)[k] = v
return nil
})
}
// String implements the Stringer interface. It returns a formatted/sorted set of label key/value pairs.
func (l LabelSet) String() string {
var b strings.Builder
keys := make([]string, 0, len(l))
for k := range l {
keys = append(keys, k)
}
sort.Strings(keys)
b.WriteByte('{')
for i, k := range keys {
if i > 0 {
b.WriteByte(',')
b.WriteByte(' ')
}
b.WriteString(k)
b.WriteByte('=')
b.WriteString(strconv.Quote(l[k]))
}
b.WriteByte('}')
return b.String()
}
// decodePushRequest directly decodes json to a push.PushRequest
func decodePushRequest(b io.Reader, r *push.PushRequest) error {
var request PushRequest
if err := jsoniter.NewDecoder(b).Decode(&request); err != nil {
return err
}
*r = push.PushRequest{
Streams: *(*[]push.Stream)(unsafe.Pointer(&request.Streams)),
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal"
import (
"bytes"
"fmt"
"io"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
)
const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)"
// parseProtoReader parses a compressed proto from an io.Reader.
func parseProtoReader(reader io.Reader, expectedSize, maxSize int, req proto.Message) error {
body, err := decompressRequest(reader, expectedSize, maxSize)
if err != nil {
return err
}
// We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first,
// which we can't override without upsetting golint.
req.Reset()
if u, ok := req.(proto.Unmarshaler); ok {
err = u.Unmarshal(body)
} else {
err = proto.NewBuffer(body).Unmarshal(req)
}
if err != nil {
return err
}
return nil
}
func decompressRequest(reader io.Reader, expectedSize, maxSize int) (body []byte, err error) {
defer func() {
if err != nil && len(body) > maxSize {
err = fmt.Errorf(messageSizeLargerErrFmt, len(body), maxSize)
}
}()
if expectedSize > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, expectedSize, maxSize)
}
buffer, ok := tryBufferFromReader(reader)
if ok {
body, err = decompressFromBuffer(buffer, maxSize)
return
}
body, err = decompressFromReader(reader, expectedSize, maxSize)
return
}
func decompressFromReader(reader io.Reader, expectedSize, maxSize int) ([]byte, error) {
var (
buf bytes.Buffer
body []byte
err error
)
if expectedSize > 0 {
buf.Grow(expectedSize + bytes.MinRead) // extra space guarantees no reallocation
}
// Read from LimitReader with limit max+1. So if the underlying
// reader is over limit, the result will be bigger than max.
reader = io.LimitReader(reader, int64(maxSize)+1)
_, err = buf.ReadFrom(reader)
if err != nil {
return nil, err
}
body, err = decompressFromBuffer(&buf, maxSize)
return body, err
}
func decompressFromBuffer(buffer *bytes.Buffer, maxSize int) ([]byte, error) {
if len(buffer.Bytes()) > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, len(buffer.Bytes()), maxSize)
}
size, err := snappy.DecodedLen(buffer.Bytes())
if err != nil {
return nil, err
}
if size > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, size, maxSize)
}
body, err := snappy.Decode(nil, buffer.Bytes())
if err != nil {
return nil, err
}
return body, nil
}
// tryBufferFromReader attempts to cast the reader to a `*bytes.Buffer` this is possible when using httpgrpc.
// If it fails it will return nil and false.
func tryBufferFromReader(reader io.Reader) (*bytes.Buffer, bool) {
if bufReader, ok := reader.(interface {
BytesBuffer() *bytes.Buffer
}); ok && bufReader != nil {
return bufReader.BytesBuffer(), true
}
return nil, false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
)
const (
accessLogStorageKey = "last_endtime_access_logs_%s"
defaultAccessLogsPollInterval = 5 * time.Minute
defaultAccessLogsPageSize = 20000
defaultAccessLogsMaxPages = 10
)
type accessLogStorageRecord struct {
ClusterName string `json:"cluster_name"`
NextPollStartTime time.Time `json:"next_poll_start_time"`
}
type accessLogClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetClusters(ctx context.Context, groupID string) ([]mongodbatlas.Cluster, error)
GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *internal.GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error)
}
type accessLogsReceiver struct {
client accessLogClient
logger *zap.Logger
storageClient storage.Client
cfg *Config
consumer consumer.Logs
record map[string][]*accessLogStorageRecord
authResult *bool
wg *sync.WaitGroup
cancel context.CancelFunc
}
func newAccessLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *accessLogsReceiver {
r := &accessLogsReceiver{
cancel: func() {},
client: internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger),
cfg: cfg,
logger: settings.Logger,
consumer: consumer,
wg: &sync.WaitGroup{},
storageClient: storage.NewNopClient(),
record: make(map[string][]*accessLogStorageRecord),
}
for _, p := range cfg.Logs.Projects {
p.populateIncludesAndExcludes()
if p.AccessLogs != nil && p.AccessLogs.IsEnabled() {
if p.AccessLogs.PageSize <= 0 {
p.AccessLogs.PageSize = defaultAccessLogsPageSize
}
if p.AccessLogs.MaxPages <= 0 {
p.AccessLogs.MaxPages = defaultAccessLogsMaxPages
}
if p.AccessLogs.PollInterval == 0 {
p.AccessLogs.PollInterval = defaultAccessLogsPollInterval
}
}
}
return r
}
func (alr *accessLogsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error {
alr.logger.Debug("Starting up access log receiver")
cancelCtx, cancel := context.WithCancel(ctx)
alr.cancel = cancel
alr.storageClient = storageClient
return alr.startPolling(cancelCtx)
}
func (alr *accessLogsReceiver) Shutdown(_ context.Context) error {
alr.logger.Debug("Shutting down accessLog receiver")
alr.cancel()
alr.wg.Wait()
return nil
}
func (alr *accessLogsReceiver) startPolling(ctx context.Context) error {
for _, pc := range alr.cfg.Logs.Projects {
if pc.AccessLogs == nil || !pc.AccessLogs.IsEnabled() {
continue
}
t := time.NewTicker(pc.AccessLogs.PollInterval)
alr.wg.Add(1)
go func() {
defer alr.wg.Done()
for {
select {
case <-t.C:
if err := alr.pollAccessLogs(ctx, pc); err != nil {
alr.logger.Error("error while polling for accessLog", zap.Error(err))
}
case <-ctx.Done():
return
}
}
}()
}
return nil
}
func (alr *accessLogsReceiver) pollAccessLogs(ctx context.Context, pc *LogsProjectConfig) error {
st := pcommon.NewTimestampFromTime(time.Now().Add(-1 * pc.AccessLogs.PollInterval)).AsTime()
et := time.Now()
project, err := alr.client.GetProject(ctx, pc.Name)
if err != nil {
alr.logger.Error("error retrieving project information", zap.Error(err), zap.String("project", pc.Name))
return err
}
alr.loadCheckpoint(ctx, project.ID)
clusters, err := alr.client.GetClusters(ctx, project.ID)
if err != nil {
alr.logger.Error("error retrieving cluster information", zap.Error(err), zap.String("project", pc.Name))
return err
}
filteredClusters, err := filterClusters(clusters, pc.ProjectConfig)
if err != nil {
alr.logger.Error("error filtering clusters", zap.Error(err), zap.String("project", pc.Name))
return err
}
for _, cluster := range filteredClusters {
clusterCheckpoint := alr.getClusterCheckpoint(project.ID, cluster.Name)
if clusterCheckpoint == nil {
clusterCheckpoint = &accessLogStorageRecord{
ClusterName: cluster.Name,
NextPollStartTime: st,
}
alr.setClusterCheckpoint(project.ID, clusterCheckpoint)
}
clusterCheckpoint.NextPollStartTime = alr.pollCluster(ctx, pc, project, cluster, clusterCheckpoint.NextPollStartTime, et)
if err = alr.checkpoint(ctx, project.ID); err != nil {
alr.logger.Warn("error checkpointing", zap.Error(err), zap.String("project", pc.Name))
}
}
return nil
}
func (alr *accessLogsReceiver) pollCluster(ctx context.Context, pc *LogsProjectConfig, project *mongodbatlas.Project, cluster mongodbatlas.Cluster, startTime, now time.Time) time.Time {
nowTimestamp := pcommon.NewTimestampFromTime(now)
opts := &internal.GetAccessLogsOptions{
MaxDate: now,
MinDate: startTime,
AuthResult: alr.authResult,
NLogs: int(pc.AccessLogs.PageSize),
}
pageCount := 0
// Assume failure, in which case we poll starting with the same startTime
// unless we successfully make request(s) for access logs and they are successfully sent to the consumer
nextPollStartTime := startTime
for {
accessLogs, err := alr.client.GetAccessLogs(ctx, project.ID, cluster.Name, opts)
pageCount++
if err != nil {
alr.logger.Error("unable to get access logs", zap.Error(err), zap.String("project", project.Name),
zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
// No logs retrieved, try again on next interval with the same start time as the API may not have
// all logs for the given time available to be queried yet (undocumented behavior)
if len(accessLogs) == 0 {
return nextPollStartTime
}
logs := transformAccessLogs(nowTimestamp, accessLogs, project, cluster, alr.logger)
if err = alr.consumer.ConsumeLogs(ctx, logs); err != nil {
alr.logger.Error("error consuming project cluster log", zap.Error(err), zap.String("project", project.Name),
zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
// The first page of results will have the latest data, so we want to update the nextPollStartTime
// There is risk of data loss at this point if we are unable to then process the remaining pages
// of data, but that is a limitation of the API that we can't work around.
if pageCount == 1 {
// This slice access is safe as we have previously confirmed that the slice is not empty
mostRecentLogTimestamp, tsErr := getTimestamp(accessLogs[0])
if tsErr != nil {
alr.logger.Error("error getting latest log timestamp for calculating next poll timestamps", zap.Error(tsErr),
zap.String("project", project.Name), zap.String("clusterName", cluster.Name))
// If we are not able to get the latest log timestamp, we have to assume that we are collecting all
// data and don't want to risk duplicated data by re-polling the same data again.
nextPollStartTime = now
} else {
nextPollStartTime = mostRecentLogTimestamp.Add(100 * time.Millisecond)
}
}
// If we get back less than the maximum number of logs, we can assume that we've retrieved all of the logs
// that are currently available for this time period, though some logs may not be available in the API yet.
if len(accessLogs) < int(pc.AccessLogs.PageSize) {
return nextPollStartTime
}
if pageCount >= int(pc.AccessLogs.MaxPages) {
alr.logger.Warn(`reached maximum number of pages of access logs, increase 'max_pages' or
frequency of 'poll_interval' to ensure all access logs are retrieved`, zap.Int("maxPages", int(pc.AccessLogs.MaxPages)))
return nextPollStartTime
}
// If we get back the maximum number of logs, we need to re-query with a new end time. While undocumented, the API
// returns the most recent logs first. If we get the maximum number of logs back, we can assume that
// there are more logs to be retrieved. We'll re-query with the same start time, but the end
// time set to just before the timestamp of the oldest log entry returned.
oldestLogTimestampFromPage, err := getTimestamp(accessLogs[len(accessLogs)-1])
if err != nil {
alr.logger.Error("error getting oldest log timestamp for calculating next request timestamps", zap.Error(err),
zap.String("project", project.Name), zap.String("clusterName", cluster.Name))
return nextPollStartTime
}
opts.MaxDate = oldestLogTimestampFromPage.Add(-1 * time.Millisecond)
// If the new max date is before the min date, we've retrieved all of the logs for this time period
// and receiving the maximum number of logs back is a coincidence.
if opts.MaxDate.Before(opts.MinDate) {
break
}
}
return now
}
func getTimestamp(log *mongodbatlas.AccessLogs) (time.Time, error) {
body, err := parseLogMessage(log)
if err != nil {
// If body couldn't be parsed, we'll still use the outer Timestamp field to determine the new max date.
body = map[string]any{}
}
return getTimestampPreparsedBody(log, body)
}
func getTimestampPreparsedBody(log *mongodbatlas.AccessLogs, body map[string]any) (time.Time, error) {
// If the log message has a timestamp, use that. When present, it has more precision than the timestamp from the access log entry.
if tMap, ok := body["t"]; ok {
if dateMap, ok := tMap.(map[string]any); ok {
if v, ok := dateMap["$date"]; ok {
if dateStr, ok := v.(string); ok {
return time.Parse(time.RFC3339, dateStr)
}
}
}
}
// If the log message doesn't have a timestamp, use the timestamp from the outer access log entry.
t, err := time.Parse(time.RFC3339, log.Timestamp)
if err != nil {
// The documentation claims ISO8601/RFC3339, but the API has been observed returning timestamps in UnixDate format
// UnixDate looks like Wed Apr 26 02:38:56 GMT 2023
unixDate, err2 := time.Parse(time.UnixDate, log.Timestamp)
if err2 != nil {
// Return the original error as the documentation claims ISO8601
return time.Time{}, err
}
return unixDate, nil
}
return t, nil
}
func parseLogMessage(log *mongodbatlas.AccessLogs) (map[string]any, error) {
var body map[string]any
if err := json.Unmarshal([]byte(log.LogLine), &body); err != nil {
return nil, err
}
return body, nil
}
func transformAccessLogs(now pcommon.Timestamp, accessLogs []*mongodbatlas.AccessLogs, p *mongodbatlas.Project, c mongodbatlas.Cluster, logger *zap.Logger) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.project.name", p.Name)
ra.PutStr("mongodbatlas.project.id", p.ID)
ra.PutStr("mongodbatlas.region.name", c.ProviderSettings.RegionName)
ra.PutStr("mongodbatlas.provider.name", c.ProviderSettings.ProviderName)
ra.PutStr("mongodbatlas.org.id", p.OrgID)
ra.PutStr("mongodbatlas.cluster.name", c.Name)
// Expected format documented https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Access-Tracking/operation/listAccessLogsByClusterName
logRecords := resourceLogs.ScopeLogs().AppendEmpty().LogRecords()
for _, accessLog := range accessLogs {
logRecord := logRecords.AppendEmpty()
logBody, err := parseLogMessage(accessLog)
if err != nil {
logger.Error("unable to unmarshal access log into body string", zap.Error(err))
continue
}
err = logRecord.Body().SetEmptyMap().FromRaw(logBody)
if err != nil {
logger.Error("unable to set log record body as map", zap.Error(err))
logRecord.Body().SetStr(accessLog.LogLine)
}
ts, err := getTimestampPreparsedBody(accessLog, logBody)
if err != nil {
logger.Warn("unable to interpret when an access log event was recorded, timestamp not parsed", zap.Error(err), zap.String("timestamp", accessLog.Timestamp))
logRecord.SetTimestamp(now)
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
logRecord.SetObservedTimestamp(now)
attrs := logRecord.Attributes()
attrs.PutStr("event.domain", "mongodbatlas")
logRecord.SetSeverityNumber(plog.SeverityNumberInfo)
logRecord.SetSeverityText(plog.SeverityNumberInfo.String())
if accessLog.AuthResult != nil {
status := "success"
if !*accessLog.AuthResult {
logRecord.SetSeverityNumber(plog.SeverityNumberWarn)
logRecord.SetSeverityText(plog.SeverityNumberWarn.String())
status = "failure"
}
attrs.PutStr("auth.result", status)
}
if accessLog.FailureReason != "" {
attrs.PutStr("auth.failure_reason", accessLog.FailureReason)
}
attrs.PutStr("auth.source", accessLog.AuthSource)
attrs.PutStr("username", accessLog.Username)
attrs.PutStr("hostname", accessLog.Hostname)
attrs.PutStr("remote.ip", accessLog.IPAddress)
}
return logs
}
func accessLogsCheckpointKey(groupID string) string {
return fmt.Sprintf(accessLogStorageKey, groupID)
}
func (alr *accessLogsReceiver) checkpoint(ctx context.Context, groupID string) error {
marshalBytes, err := json.Marshal(alr.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return alr.storageClient.Set(ctx, accessLogsCheckpointKey(groupID), marshalBytes)
}
func (alr *accessLogsReceiver) loadCheckpoint(ctx context.Context, groupID string) {
cBytes, err := alr.storageClient.Get(ctx, accessLogsCheckpointKey(groupID))
if err != nil {
alr.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err))
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
return
}
if cBytes == nil {
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
return
}
var record []*accessLogStorageRecord
if err = json.Unmarshal(cBytes, &record); err != nil {
alr.logger.Error("unable to decode stored record for access logs, continuing without a checkpoint", zap.Error(err))
if _, ok := alr.record[groupID]; !ok {
alr.record[groupID] = []*accessLogStorageRecord{}
}
}
}
func (alr *accessLogsReceiver) getClusterCheckpoint(groupID, clusterName string) *accessLogStorageRecord {
for key, value := range alr.record {
if key == groupID {
for _, v := range value {
if v.ClusterName == clusterName {
return v
}
}
}
}
return nil
}
func (alr *accessLogsReceiver) setClusterCheckpoint(groupID string, clusterCheckpoint *accessLogStorageRecord) {
groupCheckpoints, ok := alr.record[groupID]
if !ok {
alr.record[groupID] = []*accessLogStorageRecord{clusterCheckpoint}
}
var found bool
for idx, v := range groupCheckpoints {
if v.ClusterName == clusterCheckpoint.ClusterName {
found = true
alr.record[groupID][idx] = clusterCheckpoint
}
}
if !found {
alr.record[groupID] = append(alr.record[groupID], clusterCheckpoint)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha1" // #nosec G505 -- SHA1 is the algorithm mongodbatlas uses, it must be used to calculate the HMAC signature
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"strconv"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/multierr"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
// maxContentLength is the maximum payload size we will accept from incoming requests.
// Requests are generally ~1000 bytes, so we overshoot that by an order of magnitude.
// This is to protect from overly large requests.
const (
maxContentLength int64 = 16384
signatureHeaderName string = "X-MMS-Signature"
alertModeListen = "listen"
alertModePoll = "poll"
alertCacheKey = "last_recorded_alert"
defaultAlertsPollInterval = 5 * time.Minute
// defaults were based off API docs https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/
defaultAlertsPageSize = 100
defaultAlertsMaxPages = 10
)
type alertsClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetAlerts(ctx context.Context, groupID string, opts *internal.AlertPollOptions) ([]mongodbatlas.Alert, bool, error)
}
type alertsReceiver struct {
addr string
secret string
server *http.Server
mode string
tlsSettings *configtls.ServerConfig
consumer consumer.Logs
wg *sync.WaitGroup
// only relevant in `poll` mode
projects []*ProjectConfig
client alertsClient
privateKey string
publicKey string
backoffConfig configretry.BackOffConfig
pollInterval time.Duration
record *alertRecord
pageSize int64
maxPages int64
doneChan chan bool
storageClient storage.Client
telemetrySettings component.TelemetrySettings
}
func newAlertsReceiver(params rcvr.Settings, baseConfig *Config, consumer consumer.Logs) (*alertsReceiver, error) {
cfg := baseConfig.Alerts
var tlsConfig *tls.Config
if cfg.TLS != nil {
var err error
tlsConfig, err = cfg.TLS.LoadTLSConfig(context.Background())
if err != nil {
return nil, err
}
}
for _, p := range cfg.Projects {
p.populateIncludesAndExcludes()
}
recv := &alertsReceiver{
addr: cfg.Endpoint,
secret: string(cfg.Secret),
tlsSettings: cfg.TLS,
consumer: consumer,
mode: cfg.Mode,
projects: cfg.Projects,
backoffConfig: baseConfig.BackOffConfig,
publicKey: baseConfig.PublicKey,
privateKey: string(baseConfig.PrivateKey),
wg: &sync.WaitGroup{},
pollInterval: baseConfig.Alerts.PollInterval,
maxPages: baseConfig.Alerts.MaxPages,
pageSize: baseConfig.Alerts.PageSize,
doneChan: make(chan bool, 1),
telemetrySettings: params.TelemetrySettings,
}
if recv.mode == alertModePoll {
recv.client = internal.NewMongoDBAtlasClient(recv.publicKey, recv.privateKey, recv.backoffConfig, recv.telemetrySettings.Logger)
return recv, nil
}
s := &http.Server{
TLSConfig: tlsConfig,
Handler: http.HandlerFunc(recv.handleRequest),
ReadHeaderTimeout: 20 * time.Second,
}
recv.server = s
return recv, nil
}
func (a *alertsReceiver) Start(ctx context.Context, host component.Host, storageClient storage.Client) error {
if a.mode == alertModePoll {
return a.startPolling(ctx, storageClient)
}
return a.startListening(ctx, host)
}
func (a *alertsReceiver) startPolling(ctx context.Context, storageClient storage.Client) error {
a.telemetrySettings.Logger.Debug("starting alerts receiver in retrieval mode")
a.storageClient = storageClient
err := a.syncPersistence(ctx)
if err != nil {
a.telemetrySettings.Logger.Error("there was an error syncing the receiver with checkpoint", zap.Error(err))
}
t := time.NewTicker(a.pollInterval)
a.wg.Add(1)
go func() {
defer a.wg.Done()
for {
select {
case <-t.C:
if err := a.retrieveAndProcessAlerts(ctx); err != nil {
a.telemetrySettings.Logger.Error("unable to retrieve alerts", zap.Error(err))
}
case <-a.doneChan:
return
case <-ctx.Done():
return
}
}
}()
return nil
}
func (a *alertsReceiver) retrieveAndProcessAlerts(ctx context.Context) error {
for _, p := range a.projects {
project, err := a.client.GetProject(ctx, p.Name)
if err != nil {
a.telemetrySettings.Logger.Error("error retrieving project "+p.Name+":", zap.Error(err))
continue
}
a.pollAndProcess(ctx, p, project)
}
return a.writeCheckpoint(ctx)
}
func (a *alertsReceiver) pollAndProcess(ctx context.Context, pc *ProjectConfig, project *mongodbatlas.Project) {
for pageNum := 1; pageNum <= int(a.maxPages); pageNum++ {
projectAlerts, hasNext, err := a.client.GetAlerts(ctx, project.ID, &internal.AlertPollOptions{
PageNum: pageNum,
PageSize: int(a.pageSize),
})
if err != nil {
a.telemetrySettings.Logger.Error("unable to get alerts for project", zap.Error(err))
break
}
filteredAlerts := a.applyFilters(pc, projectAlerts)
now := pcommon.NewTimestampFromTime(time.Now())
logs, err := a.convertAlerts(now, filteredAlerts, project)
if err != nil {
a.telemetrySettings.Logger.Error("error processing alerts", zap.Error(err))
break
}
if logs.LogRecordCount() > 0 {
if err = a.consumer.ConsumeLogs(ctx, logs); err != nil {
a.telemetrySettings.Logger.Error("error consuming alerts", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (a *alertsReceiver) startListening(ctx context.Context, host component.Host) error {
a.telemetrySettings.Logger.Debug("starting alerts receiver in listening mode")
// We use a.server.Serve* over a.server.ListenAndServe*
// So that we can catch and return errors relating to binding to network interface on start.
var lc net.ListenConfig
l, err := lc.Listen(ctx, "tcp", a.addr)
if err != nil {
return err
}
a.wg.Add(1)
if a.tlsSettings != nil {
go func() {
defer a.wg.Done()
a.telemetrySettings.Logger.Debug("Starting ServeTLS",
zap.String("address", a.addr),
zap.String("certfile", a.tlsSettings.CertFile),
zap.String("keyfile", a.tlsSettings.KeyFile))
err := a.server.ServeTLS(l, a.tlsSettings.CertFile, a.tlsSettings.KeyFile)
a.telemetrySettings.Logger.Debug("Serve TLS done")
if err != http.ErrServerClosed {
a.telemetrySettings.Logger.Error("ServeTLS failed", zap.Error(err))
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}()
} else {
go func() {
defer a.wg.Done()
a.telemetrySettings.Logger.Debug("Starting Serve", zap.String("address", a.addr))
err := a.server.Serve(l)
a.telemetrySettings.Logger.Debug("Serve done")
if err != http.ErrServerClosed {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err))
}
}()
}
return nil
}
func (a *alertsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) {
if req.ContentLength < 0 {
rw.WriteHeader(http.StatusLengthRequired)
a.telemetrySettings.Logger.Debug("Got request with no Content-Length specified", zap.String("remote", req.RemoteAddr))
return
}
if req.ContentLength > maxContentLength {
rw.WriteHeader(http.StatusRequestEntityTooLarge)
a.telemetrySettings.Logger.Debug("Got request with large Content-Length specified",
zap.String("remote", req.RemoteAddr),
zap.Int64("content-length", req.ContentLength),
zap.Int64("max-content-length", maxContentLength))
return
}
payloadSigHeader := req.Header.Get(signatureHeaderName)
if payloadSigHeader == "" {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Got payload with no HMAC signature, dropping...")
return
}
payload := make([]byte, req.ContentLength)
_, err := io.ReadFull(req.Body, payload)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
if err = verifyHMACSignature(a.secret, payload, payloadSigHeader); err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Debug("Got payload with invalid HMAC signature, dropping...", zap.Error(err), zap.String("remote", req.RemoteAddr))
return
}
logs, err := payloadToLogs(time.Now(), payload)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
a.telemetrySettings.Logger.Error("Failed to convert log payload to log record", zap.Error(err))
return
}
if err := a.consumer.ConsumeLogs(req.Context(), logs); err != nil {
rw.WriteHeader(http.StatusInternalServerError)
a.telemetrySettings.Logger.Error("Failed to consumer alert as log", zap.Error(err))
return
}
rw.WriteHeader(http.StatusOK)
}
func (a *alertsReceiver) Shutdown(ctx context.Context) error {
if a.mode == alertModePoll {
return a.shutdownPoller(ctx)
}
return a.shutdownListener(ctx)
}
func (a *alertsReceiver) shutdownListener(ctx context.Context) error {
a.telemetrySettings.Logger.Debug("Shutting down server")
err := a.server.Shutdown(ctx)
if err != nil {
return err
}
a.telemetrySettings.Logger.Debug("Waiting for shutdown to complete.")
a.wg.Wait()
return nil
}
func (a *alertsReceiver) shutdownPoller(ctx context.Context) error {
a.telemetrySettings.Logger.Debug("Shutting down client")
close(a.doneChan)
a.wg.Wait()
return a.writeCheckpoint(ctx)
}
func (a *alertsReceiver) convertAlerts(now pcommon.Timestamp, alerts []mongodbatlas.Alert, project *mongodbatlas.Project) (plog.Logs, error) {
logs := plog.NewLogs()
var errs error
for i := range alerts {
alert := alerts[i]
resourceLogs := logs.ResourceLogs().AppendEmpty()
resourceAttrs := resourceLogs.Resource().Attributes()
resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID)
resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID)
resourceAttrs.PutStr("mongodbatlas.org.id", project.OrgID)
resourceAttrs.PutStr("mongodbatlas.project.name", project.Name)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", &alert.ClusterName)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", &alert.ReplicaSetName)
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(now)
ts, err := time.Parse(time.RFC3339, alert.Updated)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated))
continue
}
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
logRecord.SetSeverityNumber(severityFromAPIAlert(alert.Status))
logRecord.SetSeverityText(alert.Status)
// this could be fairly expensive to do, expecting not too many issues unless there are a ton
// of unrecognized alerts to process.
bodyBytes, err := json.Marshal(alert)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to marshal alert into a body string")
continue
}
logRecord.Body().SetStr(string(bodyBytes))
attrs := logRecord.Attributes()
// These attributes are always present
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("event.name", alert.EventTypeName)
attrs.PutStr("status", alert.Status)
attrs.PutStr("created", alert.Created)
attrs.PutStr("updated", alert.Updated)
attrs.PutStr("id", alert.ID)
// These attributes are optional and may not be present, depending on the alert type.
putStringToMapNotNil(attrs, "metric.name", &alert.MetricName)
putStringToMapNotNil(attrs, "type_name", &alert.EventTypeName)
putStringToMapNotNil(attrs, "last_notified", &alert.LastNotified)
putStringToMapNotNil(attrs, "resolved", &alert.Resolved)
putStringToMapNotNil(attrs, "acknowledgement.comment", &alert.AcknowledgementComment)
putStringToMapNotNil(attrs, "acknowledgement.username", &alert.AcknowledgingUsername)
putStringToMapNotNil(attrs, "acknowledgement.until", &alert.AcknowledgedUntil)
if alert.CurrentValue != nil {
attrs.PutDouble("metric.value", *alert.CurrentValue.Number)
attrs.PutStr("metric.units", alert.CurrentValue.Units)
}
// Only present for HOST, HOST_METRIC, and REPLICA_SET alerts
if alert.HostnameAndPort == "" {
continue
}
host, portStr, err := net.SplitHostPort(alert.HostnameAndPort)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split host:port %s: %w", alert.HostnameAndPort, err))
continue
}
port, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to parse port %s: %w", portStr, err))
continue
}
attrs.PutStr("net.peer.name", host)
attrs.PutInt("net.peer.port", port)
}
return logs, errs
}
func verifyHMACSignature(secret string, payload []byte, signatureHeader string) error {
b64Decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(signatureHeader))
payloadSig, err := io.ReadAll(b64Decoder)
if err != nil {
return err
}
h := hmac.New(sha1.New, []byte(secret))
h.Write(payload)
calculatedSig := h.Sum(nil)
if !hmac.Equal(calculatedSig, payloadSig) {
return errors.New("calculated signature does not equal header signature")
}
return nil
}
func payloadToLogs(now time.Time, payload []byte) (plog.Logs, error) {
var alert model.Alert
err := json.Unmarshal(payload, &alert)
if err != nil {
return plog.Logs{}, err
}
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(now))
logRecord.SetTimestamp(timestampFromAlert(alert))
logRecord.SetSeverityNumber(severityFromAlert(alert))
logRecord.Body().SetStr(string(payload))
resourceAttrs := resourceLogs.Resource().Attributes()
resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID)
resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", alert.ClusterName)
putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", alert.ReplicaSetName)
attrs := logRecord.Attributes()
// These attributes are always present
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("event.name", alert.EventType)
attrs.PutStr("message", alert.HumanReadable)
attrs.PutStr("status", alert.Status)
attrs.PutStr("created", alert.Created)
attrs.PutStr("updated", alert.Updated)
attrs.PutStr("id", alert.ID)
// These attributes are optional and may not be present, depending on the alert type.
putStringToMapNotNil(attrs, "metric.name", alert.MetricName)
putStringToMapNotNil(attrs, "type_name", alert.TypeName)
putStringToMapNotNil(attrs, "user_alias", alert.UserAlias)
putStringToMapNotNil(attrs, "last_notified", alert.LastNotified)
putStringToMapNotNil(attrs, "resolved", alert.Resolved)
putStringToMapNotNil(attrs, "acknowledgement.comment", alert.AcknowledgementComment)
putStringToMapNotNil(attrs, "acknowledgement.username", alert.AcknowledgementUsername)
putStringToMapNotNil(attrs, "acknowledgement.until", alert.AcknowledgedUntil)
if alert.CurrentValue != nil {
attrs.PutDouble("metric.value", alert.CurrentValue.Number)
attrs.PutStr("metric.units", alert.CurrentValue.Units)
}
if alert.HostNameAndPort != nil {
host, portStr, err := net.SplitHostPort(*alert.HostNameAndPort)
if err != nil {
return plog.Logs{}, fmt.Errorf("failed to split host:port %s: %w", *alert.HostNameAndPort, err)
}
port, err := strconv.ParseInt(portStr, 10, 64)
if err != nil {
return plog.Logs{}, fmt.Errorf("failed to parse port %s: %w", portStr, err)
}
attrs.PutStr("net.peer.name", host)
attrs.PutInt("net.peer.port", port)
}
return logs, nil
}
// alertRecord wraps a sync Map so it is goroutine safe as well as
// can have custom marshaling
type alertRecord struct {
sync.Mutex
LastRecordedTime *time.Time `mapstructure:"last_recorded"`
}
func (a *alertRecord) SetLastRecorded(lastUpdated *time.Time) {
a.Lock()
a.LastRecordedTime = lastUpdated
a.Unlock()
}
func (a *alertsReceiver) syncPersistence(ctx context.Context) error {
if a.storageClient == nil {
return nil
}
cBytes, err := a.storageClient.Get(ctx, alertCacheKey)
if err != nil || cBytes == nil {
a.record = &alertRecord{}
return nil
}
var cache alertRecord
if err = json.Unmarshal(cBytes, &cache); err != nil {
return fmt.Errorf("unable to decode stored cache: %w", err)
}
a.record = &cache
return nil
}
func (a *alertsReceiver) writeCheckpoint(ctx context.Context) error {
if a.storageClient == nil {
a.telemetrySettings.Logger.Error("unable to write checkpoint since no storage client was found")
return errors.New("missing non-nil storage client")
}
marshalBytes, err := json.Marshal(&a.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return a.storageClient.Set(ctx, alertCacheKey, marshalBytes)
}
func (a *alertsReceiver) applyFilters(pConf *ProjectConfig, alerts []mongodbatlas.Alert) []mongodbatlas.Alert {
filtered := []mongodbatlas.Alert{}
lastRecordedTime := pcommon.Timestamp(0).AsTime()
if a.record.LastRecordedTime != nil {
lastRecordedTime = *a.record.LastRecordedTime
}
// we need to maintain two timestamps in order to not conflict while iterating
latestInPayload := pcommon.Timestamp(0).AsTime()
for _, alert := range alerts {
updatedTime, err := time.Parse(time.RFC3339, alert.Updated)
if err != nil {
a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated))
continue
}
if updatedTime.Before(lastRecordedTime) || updatedTime.Equal(lastRecordedTime) {
// already processed if the updated time was before or equal to the last recorded
continue
}
if len(pConf.excludesByClusterName) > 0 {
if _, ok := pConf.excludesByClusterName[alert.ClusterName]; ok {
continue
}
}
if len(pConf.IncludeClusters) > 0 {
if _, ok := pConf.includesByClusterName[alert.ClusterName]; !ok {
continue
}
}
filtered = append(filtered, alert)
if updatedTime.After(latestInPayload) {
latestInPayload = updatedTime
}
}
if latestInPayload.After(lastRecordedTime) {
a.record.SetLastRecorded(&latestInPayload)
}
return filtered
}
func timestampFromAlert(a model.Alert) pcommon.Timestamp {
if time, err := time.Parse(time.RFC3339, a.Updated); err == nil {
return pcommon.NewTimestampFromTime(time)
}
return pcommon.Timestamp(0)
}
// severityFromAlert maps the alert to a severity number.
// Currently, it just maps "OPEN" alerts to WARN, and everything else to INFO.
func severityFromAlert(a model.Alert) plog.SeverityNumber {
// Status is defined here: https://www.mongodb.com/docs/atlas/reference/api/alerts-get-alert/#response-elements
// It may also be "INFORMATIONAL" for single-fire alerts (events)
switch a.Status {
case "OPEN":
return plog.SeverityNumberWarn
default:
return plog.SeverityNumberInfo
}
}
// severityFromAPIAlert is a workaround for shared types between the API and the model
func severityFromAPIAlert(a string) plog.SeverityNumber {
switch a {
case "OPEN":
return plog.SeverityNumberWarn
default:
return plog.SeverityNumberInfo
}
}
func putStringToMapNotNil(m pcommon.Map, k string, v *string) {
if v != nil {
m.PutStr(k, *v)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"fmt"
"go.opentelemetry.io/collector/component"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter"
)
// combinedLogsReceiver wraps alerts and log receivers in a single log receiver to be consumed by the factory
type combinedLogsReceiver struct {
alerts *alertsReceiver
logs *logsReceiver
events *eventsReceiver
accessLogs *accessLogsReceiver
storageID *component.ID
id component.ID
}
// Starts up the combined MongoDB Atlas Logs and Alert Receiver
func (c *combinedLogsReceiver) Start(ctx context.Context, host component.Host) error {
var errs error
storageClient, err := adapter.GetStorageClient(ctx, host, c.storageID, c.id)
if err != nil {
return fmt.Errorf("failed to get storage client: %w", err)
}
if c.alerts != nil {
if err := c.alerts.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.logs != nil {
if err := c.logs.Start(ctx, host); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.events != nil {
if err := c.events.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.accessLogs != nil {
if err := c.accessLogs.Start(ctx, host, storageClient); err != nil {
errs = multierr.Append(errs, err)
}
}
return errs
}
// Shuts down the combined MongoDB Atlas Logs and Alert Receiver
func (c *combinedLogsReceiver) Shutdown(ctx context.Context) error {
var errs error
if c.alerts != nil {
if err := c.alerts.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.logs != nil {
if err := c.logs.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.events != nil {
if err := c.events.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
if c.accessLogs != nil {
if err := c.accessLogs.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, err)
}
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"errors"
"fmt"
"net"
"strings"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configopaque"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/scraper/scraperhelper"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
var _ component.Config = (*Config)(nil)
type Config struct {
scraperhelper.ControllerConfig `mapstructure:",squash"`
PublicKey string `mapstructure:"public_key"`
PrivateKey configopaque.String `mapstructure:"private_key"`
Granularity string `mapstructure:"granularity"`
MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"`
Projects []*ProjectConfig `mapstructure:"projects"`
Alerts AlertConfig `mapstructure:"alerts"`
Events *EventsConfig `mapstructure:"events"`
Logs LogConfig `mapstructure:"logs"`
BackOffConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"`
StorageID *component.ID `mapstructure:"storage"`
}
type AlertConfig struct {
Enabled bool `mapstructure:"enabled"`
Endpoint string `mapstructure:"endpoint"`
Secret configopaque.String `mapstructure:"secret"`
TLS *configtls.ServerConfig `mapstructure:"tls"`
Mode string `mapstructure:"mode"`
// these parameters are only relevant in retrieval mode
Projects []*ProjectConfig `mapstructure:"projects"`
PollInterval time.Duration `mapstructure:"poll_interval"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
}
type LogConfig struct {
Enabled bool `mapstructure:"enabled"`
Projects []*LogsProjectConfig `mapstructure:"projects"`
}
// EventsConfig is the configuration options for events collection
type EventsConfig struct {
Projects []*ProjectConfig `mapstructure:"projects"`
Organizations []*OrgConfig `mapstructure:"organizations"`
PollInterval time.Duration `mapstructure:"poll_interval"`
Types []string `mapstructure:"types"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
}
type LogsProjectConfig struct {
ProjectConfig `mapstructure:",squash"`
EnableAuditLogs bool `mapstructure:"collect_audit_logs"`
EnableHostLogs *bool `mapstructure:"collect_host_logs"`
AccessLogs *AccessLogsConfig `mapstructure:"access_logs"`
}
type AccessLogsConfig struct {
Enabled *bool `mapstructure:"enabled"`
PollInterval time.Duration `mapstructure:"poll_interval"`
PageSize int64 `mapstructure:"page_size"`
MaxPages int64 `mapstructure:"max_pages"`
AuthResult *bool `mapstructure:"auth_result"`
}
func (alc *AccessLogsConfig) IsEnabled() bool {
return alc.Enabled == nil || *alc.Enabled
}
type ProjectConfig struct {
Name string `mapstructure:"name"`
ExcludeClusters []string `mapstructure:"exclude_clusters"`
IncludeClusters []string `mapstructure:"include_clusters"`
includesByClusterName map[string]struct{}
excludesByClusterName map[string]struct{}
}
type OrgConfig struct {
ID string `mapstructure:"id"`
}
func (pc *ProjectConfig) populateIncludesAndExcludes() {
pc.includesByClusterName = map[string]struct{}{}
for _, inclusion := range pc.IncludeClusters {
pc.includesByClusterName[inclusion] = struct{}{}
}
pc.excludesByClusterName = map[string]struct{}{}
for _, exclusion := range pc.ExcludeClusters {
pc.excludesByClusterName[exclusion] = struct{}{}
}
}
var (
// Alerts Receiver Errors
errNoEndpoint = errors.New("an endpoint must be specified")
errNoSecret = errors.New("a webhook secret must be specified")
errNoCert = errors.New("tls was configured, but no cert file was specified")
errNoKey = errors.New("tls was configured, but no key file was specified")
errNoModeRecognized = fmt.Errorf("alert mode not recognized for mode. Known alert modes are: %s", strings.Join([]string{
alertModeListen,
alertModePoll,
}, ","))
errPageSizeIncorrect = errors.New("page size must be a value between 1 and 500")
// Logs Receiver Errors
errNoProjects = errors.New("at least one 'project' must be specified")
errNoEvents = errors.New("at least one 'project' or 'organizations' event type must be specified")
errClusterConfig = errors.New("only one of 'include_clusters' or 'exclude_clusters' may be specified")
// Access Logs Errors
errMaxPageSize = errors.New("the maximum value for 'page_size' is 20000")
)
func (c *Config) Validate() error {
var errs error
for _, project := range c.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
}
errs = multierr.Append(errs, c.Alerts.validate())
errs = multierr.Append(errs, c.Logs.validate())
if c.Events != nil {
errs = multierr.Append(errs, c.Events.validate())
}
return errs
}
func (l *LogConfig) validate() error {
if !l.Enabled {
return nil
}
var errs error
if len(l.Projects) == 0 {
errs = multierr.Append(errs, errNoProjects)
}
for _, project := range l.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
if project.AccessLogs != nil && project.AccessLogs.IsEnabled() {
if project.AccessLogs.PageSize > 20000 {
errs = multierr.Append(errs, errMaxPageSize)
}
}
}
return errs
}
func (a *AlertConfig) validate() error {
if !a.Enabled {
// No need to further validate, receiving alerts is disabled.
return nil
}
switch a.Mode {
case alertModePoll:
return a.validatePollConfig()
case alertModeListen:
return a.validateListenConfig()
default:
return errNoModeRecognized
}
}
func (a AlertConfig) validatePollConfig() error {
if len(a.Projects) == 0 {
return errNoProjects
}
// based off API limits https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/
if 0 >= a.PageSize || a.PageSize > 500 {
return errPageSizeIncorrect
}
var errs error
for _, project := range a.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
}
return errs
}
func (a AlertConfig) validateListenConfig() error {
if a.Endpoint == "" {
return errNoEndpoint
}
var errs error
_, _, err := net.SplitHostPort(a.Endpoint)
if err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err))
}
if a.Secret == "" {
errs = multierr.Append(errs, errNoSecret)
}
if a.TLS != nil {
if a.TLS.CertFile == "" {
errs = multierr.Append(errs, errNoCert)
}
if a.TLS.KeyFile == "" {
errs = multierr.Append(errs, errNoKey)
}
}
return errs
}
func (e EventsConfig) validate() error {
if len(e.Projects) == 0 && len(e.Organizations) == 0 {
return errNoEvents
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/extension/xextension/storage"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
)
const (
eventStorageKey = "last_recorded_event"
defaultEventsMaxPages = 25
defaultEventsPageSize = 100
defaultPollInterval = time.Minute
)
type eventsClient interface {
GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error)
GetProjectEvents(ctx context.Context, groupID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error)
GetOrganization(ctx context.Context, orgID string) (*mongodbatlas.Organization, error)
GetOrganizationEvents(ctx context.Context, orgID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error)
Shutdown() error
}
type eventsReceiver struct {
client eventsClient
logger *zap.Logger
storageClient storage.Client
cfg *Config
consumer consumer.Logs
maxPages int
pageSize int
pollInterval time.Duration
wg *sync.WaitGroup
record *eventRecord // this record is used for checkpointing last processed events
cancel context.CancelFunc
}
type eventRecord struct {
NextStartTime *time.Time `mapstructure:"next_start_time"`
}
func newEventsReceiver(settings rcvr.Settings, c *Config, consumer consumer.Logs) *eventsReceiver {
r := &eventsReceiver{
client: internal.NewMongoDBAtlasClient(c.PublicKey, string(c.PrivateKey), c.BackOffConfig, settings.Logger),
cfg: c,
logger: settings.Logger,
consumer: consumer,
pollInterval: c.Events.PollInterval,
wg: &sync.WaitGroup{},
maxPages: int(c.Events.MaxPages),
pageSize: int(c.Events.PageSize),
storageClient: storage.NewNopClient(),
}
if r.maxPages == 0 {
r.maxPages = defaultEventsMaxPages
}
if r.pageSize == 0 {
r.pageSize = defaultEventsPageSize
}
if r.pollInterval == 0 {
r.pollInterval = time.Minute
}
return r
}
func (er *eventsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error {
er.logger.Debug("Starting up events receiver")
cancelCtx, cancel := context.WithCancel(ctx)
er.cancel = cancel
er.storageClient = storageClient
er.loadCheckpoint(cancelCtx)
return er.startPolling(cancelCtx)
}
func (er *eventsReceiver) Shutdown(ctx context.Context) error {
er.logger.Debug("Shutting down events receiver")
er.cancel()
er.wg.Wait()
var err []error
err = append(err, er.client.Shutdown())
err = append(err, er.checkpoint(ctx))
return errors.Join(err...)
}
func (er *eventsReceiver) startPolling(ctx context.Context) error {
t := time.NewTicker(er.pollInterval)
er.wg.Add(1)
go func() {
defer er.wg.Done()
for {
select {
case <-t.C:
if err := er.pollEvents(ctx); err != nil {
er.logger.Error("error while polling for events", zap.Error(err))
}
case <-ctx.Done():
return
}
}
}()
return nil
}
func (er *eventsReceiver) pollEvents(ctx context.Context) error {
st := pcommon.NewTimestampFromTime(time.Now().Add(-er.pollInterval)).AsTime()
if er.record.NextStartTime != nil {
st = *er.record.NextStartTime
}
et := time.Now()
for _, pc := range er.cfg.Events.Projects {
project, err := er.client.GetProject(ctx, pc.Name)
if err != nil {
er.logger.Error("error retrieving project information for "+pc.Name+":", zap.Error(err))
return err
}
er.pollProject(ctx, project, pc, st, et)
}
for _, pc := range er.cfg.Events.Organizations {
org, err := er.client.GetOrganization(ctx, pc.ID)
if err != nil {
er.logger.Error("error retrieving org information for "+pc.ID+":", zap.Error(err))
return err
}
er.pollOrg(ctx, org, pc, st, et)
}
er.record.NextStartTime = &et
return er.checkpoint(ctx)
}
func (er *eventsReceiver) pollProject(ctx context.Context, project *mongodbatlas.Project, p *ProjectConfig, startTime, now time.Time) {
for pageN := 1; pageN <= er.maxPages; pageN++ {
opts := &internal.GetEventsOptions{
PageNum: pageN,
EventTypes: er.cfg.Events.Types,
MaxDate: now,
MinDate: startTime,
}
projectEvents, hasNext, err := er.client.GetProjectEvents(ctx, project.ID, opts)
if err != nil {
er.logger.Error("unable to get events for project", zap.Error(err), zap.String("project", p.Name))
break
}
now := pcommon.NewTimestampFromTime(now)
logs := er.transformProjectEvents(now, projectEvents, project)
if logs.LogRecordCount() > 0 {
if err = er.consumer.ConsumeLogs(ctx, logs); err != nil {
er.logger.Error("error consuming project events", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (er *eventsReceiver) pollOrg(ctx context.Context, org *mongodbatlas.Organization, p *OrgConfig, startTime, now time.Time) {
for pageN := 1; pageN <= er.maxPages; pageN++ {
opts := &internal.GetEventsOptions{
PageNum: pageN,
EventTypes: er.cfg.Events.Types,
MaxDate: now,
MinDate: startTime,
}
organizationEvents, hasNext, err := er.client.GetOrganizationEvents(ctx, org.ID, opts)
if err != nil {
er.logger.Error("unable to get events for organization", zap.Error(err), zap.String("organization", p.ID))
break
}
now := pcommon.NewTimestampFromTime(now)
logs := er.transformOrgEvents(now, organizationEvents, org)
if logs.LogRecordCount() > 0 {
if err = er.consumer.ConsumeLogs(ctx, logs); err != nil {
er.logger.Error("error consuming organization events", zap.Error(err))
break
}
}
if !hasNext {
break
}
}
}
func (er *eventsReceiver) transformProjectEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, p *mongodbatlas.Project) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.project.name", p.Name)
ra.PutStr("mongodbatlas.org.id", p.OrgID)
er.transformEvents(now, events, &resourceLogs)
return logs
}
func (er *eventsReceiver) transformOrgEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, o *mongodbatlas.Organization) plog.Logs {
logs := plog.NewLogs()
resourceLogs := logs.ResourceLogs().AppendEmpty()
ra := resourceLogs.Resource().Attributes()
ra.PutStr("mongodbatlas.org.id", o.ID)
er.transformEvents(now, events, &resourceLogs)
return logs
}
func (er *eventsReceiver) transformEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, resourceLogs *plog.ResourceLogs) {
for _, event := range events {
logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()
bodyBytes, err := json.Marshal(event)
if err != nil {
er.logger.Error("unable to unmarshal event into body string", zap.Error(err))
continue
}
logRecord.Body().SetStr(string(bodyBytes))
// ISO-8601 formatted
ts, err := time.Parse(time.RFC3339, event.Created)
if err != nil {
er.logger.Warn("unable to interpret when an event was created, expecting a RFC3339 timestamp", zap.String("timestamp", event.Created), zap.String("event", event.ID))
logRecord.SetTimestamp(now)
} else {
logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts))
}
logRecord.SetObservedTimestamp(now)
attrs := logRecord.Attributes()
// always present attributes
attrs.PutStr("event.domain", "mongodbatlas")
attrs.PutStr("type", event.EventTypeName)
attrs.PutStr("id", event.ID)
attrs.PutStr("group.id", event.GroupID)
parseOptionalAttributes(&attrs, event)
}
}
func (er *eventsReceiver) checkpoint(ctx context.Context) error {
marshalBytes, err := json.Marshal(er.record)
if err != nil {
return fmt.Errorf("unable to write checkpoint: %w", err)
}
return er.storageClient.Set(ctx, eventStorageKey, marshalBytes)
}
func (er *eventsReceiver) loadCheckpoint(ctx context.Context) {
cBytes, err := er.storageClient.Get(ctx, eventStorageKey)
if err != nil {
er.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err))
er.record = &eventRecord{}
return
}
if cBytes == nil {
er.record = &eventRecord{}
return
}
var record eventRecord
if err = json.Unmarshal(cBytes, &record); err != nil {
er.logger.Error("unable to decode stored record for events, continuing without a checkpoint", zap.Error(err))
er.record = &eventRecord{}
return
}
er.record = &record
}
func parseOptionalAttributes(m *pcommon.Map, event *mongodbatlas.Event) {
if event.AlertID != "" {
m.PutStr("alert.id", event.AlertID)
}
if event.AlertConfigID != "" {
m.PutStr("alert.config.id", event.AlertConfigID)
}
if event.Collection != "" {
m.PutStr("collection", event.Collection)
}
if event.Database != "" {
m.PutStr("database", event.Database)
}
if event.Hostname != "" {
m.PutStr("net.peer.name", event.Hostname)
}
if event.Port != 0 {
m.PutInt("net.peer.port", int64(event.Port))
}
if event.InvoiceID != "" {
m.PutStr("invoice.id", event.InvoiceID)
}
if event.Username != "" {
m.PutStr("user.name", event.Username)
}
if event.TargetUsername != "" {
m.PutStr("target.user.name", event.TargetUsername)
}
if event.UserID != "" {
m.PutStr("user.id", event.UserID)
}
if event.TeamID != "" {
m.PutStr("team.id", event.TeamID)
}
if event.RemoteAddress != "" {
m.PutStr("remote.ip", event.RemoteAddress)
}
if event.MetricName != "" {
m.PutStr("metric.name", event.MetricName)
}
if event.OpType != "" {
m.PutStr("event.op_type", event.OpType)
}
if event.PaymentID != "" {
m.PutStr("payment.id", event.PaymentID)
}
if event.ReplicaSetName != "" {
m.PutStr("replica_set.name", event.ReplicaSetName)
}
if event.CurrentValue != nil {
m.PutDouble("metric.value", *event.CurrentValue.Number)
m.PutStr("metric.units", event.CurrentValue.Units)
}
if event.ShardName != "" {
m.PutStr("shard.name", event.ShardName)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"errors"
"fmt"
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configretry"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/scraper/scraperhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
const (
defaultGranularity = "PT1M" // 1-minute, as per https://docs.atlas.mongodb.com/reference/api/process-measurements/
defaultAlertsEnabled = false
defaultLogsEnabled = false
)
// NewFactory creates a factory for MongoDB Atlas receiver
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createCombinedLogReceiver, metadata.LogsStability))
}
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
cfg := rConf.(*Config)
recv := newMongoDBAtlasReceiver(params, cfg)
ms, err := newMongoDBAtlasScraper(recv)
if err != nil {
return nil, fmt.Errorf("unable to create a MongoDB Atlas Scraper instance: %w", err)
}
return scraperhelper.NewMetricsController(&cfg.ControllerConfig, params, consumer, scraperhelper.AddScraper(metadata.Type, ms))
}
func createCombinedLogReceiver(
_ context.Context,
params receiver.Settings,
rConf component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
cfg := rConf.(*Config)
if !cfg.Alerts.Enabled && !cfg.Logs.Enabled && cfg.Events == nil {
return nil, errors.New("one of 'alerts', 'events', or 'logs' must be enabled")
}
var err error
recv := &combinedLogsReceiver{
id: params.ID,
storageID: cfg.StorageID,
}
if cfg.Alerts.Enabled {
recv.alerts, err = newAlertsReceiver(params, cfg, consumer)
if err != nil {
return nil, fmt.Errorf("unable to create a MongoDB Atlas Alerts Receiver instance: %w", err)
}
}
if cfg.Logs.Enabled {
recv.logs = newMongoDBAtlasLogsReceiver(params, cfg, consumer)
// Confirm at least one project is enabled for access logs before adding
for _, project := range cfg.Logs.Projects {
if project.AccessLogs != nil && project.AccessLogs.IsEnabled() {
recv.accessLogs = newAccessLogsReceiver(params, cfg, consumer)
break
}
}
}
if cfg.Events != nil {
recv.events = newEventsReceiver(params, cfg, consumer)
}
return recv, nil
}
func createDefaultConfig() component.Config {
c := &Config{
ControllerConfig: scraperhelper.NewDefaultControllerConfig(),
Granularity: defaultGranularity,
BackOffConfig: configretry.NewDefaultBackOffConfig(),
MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(),
Alerts: AlertConfig{
Enabled: defaultAlertsEnabled,
Mode: alertModeListen,
PollInterval: defaultAlertsPollInterval,
PageSize: defaultAlertsPageSize,
MaxPages: defaultAlertsMaxPages,
},
Logs: LogConfig{
Enabled: defaultLogsEnabled,
Projects: []*LogsProjectConfig{},
},
}
// reset default of 1 minute to be 3 minutes in order to avoid null values for some metrics that do not publish
// more frequently
c.ControllerConfig.CollectionInterval = 3 * time.Minute
return c
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/collector/filter"
)
// MetricConfig provides common config for a particular metric.
type MetricConfig struct {
Enabled bool `mapstructure:"enabled"`
enabledSetByUser bool
}
func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error {
if parser == nil {
return nil
}
err := parser.Unmarshal(ms)
if err != nil {
return err
}
ms.enabledSetByUser = parser.IsSet("enabled")
return nil
}
// MetricsConfig provides config for mongodbatlas metrics.
type MetricsConfig struct {
MongodbatlasDbCounts MetricConfig `mapstructure:"mongodbatlas.db.counts"`
MongodbatlasDbSize MetricConfig `mapstructure:"mongodbatlas.db.size"`
MongodbatlasDiskPartitionIopsAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.average"`
MongodbatlasDiskPartitionIopsMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.max"`
MongodbatlasDiskPartitionLatencyAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.average"`
MongodbatlasDiskPartitionLatencyMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.max"`
MongodbatlasDiskPartitionQueueDepth MetricConfig `mapstructure:"mongodbatlas.disk.partition.queue.depth"`
MongodbatlasDiskPartitionSpaceAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.average"`
MongodbatlasDiskPartitionSpaceMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.max"`
MongodbatlasDiskPartitionThroughput MetricConfig `mapstructure:"mongodbatlas.disk.partition.throughput"`
MongodbatlasDiskPartitionUsageAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.average"`
MongodbatlasDiskPartitionUsageMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.max"`
MongodbatlasDiskPartitionUtilizationAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.average"`
MongodbatlasDiskPartitionUtilizationMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.max"`
MongodbatlasProcessAsserts MetricConfig `mapstructure:"mongodbatlas.process.asserts"`
MongodbatlasProcessBackgroundFlush MetricConfig `mapstructure:"mongodbatlas.process.background_flush"`
MongodbatlasProcessCacheIo MetricConfig `mapstructure:"mongodbatlas.process.cache.io"`
MongodbatlasProcessCacheRatio MetricConfig `mapstructure:"mongodbatlas.process.cache.ratio"`
MongodbatlasProcessCacheSize MetricConfig `mapstructure:"mongodbatlas.process.cache.size"`
MongodbatlasProcessConnections MetricConfig `mapstructure:"mongodbatlas.process.connections"`
MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"`
MongodbatlasProcessCPUChildrenNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.max"`
MongodbatlasProcessCPUChildrenUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.usage.average"`
MongodbatlasProcessCPUChildrenUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.usage.max"`
MongodbatlasProcessCPUNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.normalized.usage.average"`
MongodbatlasProcessCPUNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.normalized.usage.max"`
MongodbatlasProcessCPUUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.usage.average"`
MongodbatlasProcessCPUUsageMax MetricConfig `mapstructure:"mongodbatlas.process.cpu.usage.max"`
MongodbatlasProcessCursors MetricConfig `mapstructure:"mongodbatlas.process.cursors"`
MongodbatlasProcessDbDocumentRate MetricConfig `mapstructure:"mongodbatlas.process.db.document.rate"`
MongodbatlasProcessDbOperationsRate MetricConfig `mapstructure:"mongodbatlas.process.db.operations.rate"`
MongodbatlasProcessDbOperationsTime MetricConfig `mapstructure:"mongodbatlas.process.db.operations.time"`
MongodbatlasProcessDbQueryExecutorScanned MetricConfig `mapstructure:"mongodbatlas.process.db.query_executor.scanned"`
MongodbatlasProcessDbQueryTargetingScannedPerReturned MetricConfig `mapstructure:"mongodbatlas.process.db.query_targeting.scanned_per_returned"`
MongodbatlasProcessDbStorage MetricConfig `mapstructure:"mongodbatlas.process.db.storage"`
MongodbatlasProcessGlobalLock MetricConfig `mapstructure:"mongodbatlas.process.global_lock"`
MongodbatlasProcessIndexBtreeMissRatio MetricConfig `mapstructure:"mongodbatlas.process.index.btree_miss_ratio"`
MongodbatlasProcessIndexCounters MetricConfig `mapstructure:"mongodbatlas.process.index.counters"`
MongodbatlasProcessJournalingCommits MetricConfig `mapstructure:"mongodbatlas.process.journaling.commits"`
MongodbatlasProcessJournalingDataFiles MetricConfig `mapstructure:"mongodbatlas.process.journaling.data_files"`
MongodbatlasProcessJournalingWritten MetricConfig `mapstructure:"mongodbatlas.process.journaling.written"`
MongodbatlasProcessMemoryUsage MetricConfig `mapstructure:"mongodbatlas.process.memory.usage"`
MongodbatlasProcessNetworkIo MetricConfig `mapstructure:"mongodbatlas.process.network.io"`
MongodbatlasProcessNetworkRequests MetricConfig `mapstructure:"mongodbatlas.process.network.requests"`
MongodbatlasProcessOplogRate MetricConfig `mapstructure:"mongodbatlas.process.oplog.rate"`
MongodbatlasProcessOplogTime MetricConfig `mapstructure:"mongodbatlas.process.oplog.time"`
MongodbatlasProcessPageFaults MetricConfig `mapstructure:"mongodbatlas.process.page_faults"`
MongodbatlasProcessRestarts MetricConfig `mapstructure:"mongodbatlas.process.restarts"`
MongodbatlasProcessTickets MetricConfig `mapstructure:"mongodbatlas.process.tickets"`
MongodbatlasSystemCPUNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.cpu.normalized.usage.average"`
MongodbatlasSystemCPUNormalizedUsageMax MetricConfig `mapstructure:"mongodbatlas.system.cpu.normalized.usage.max"`
MongodbatlasSystemCPUUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.cpu.usage.average"`
MongodbatlasSystemCPUUsageMax MetricConfig `mapstructure:"mongodbatlas.system.cpu.usage.max"`
MongodbatlasSystemFtsCPUNormalizedUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.cpu.normalized.usage"`
MongodbatlasSystemFtsCPUUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.cpu.usage"`
MongodbatlasSystemFtsDiskUsed MetricConfig `mapstructure:"mongodbatlas.system.fts.disk.used"`
MongodbatlasSystemFtsMemoryUsage MetricConfig `mapstructure:"mongodbatlas.system.fts.memory.usage"`
MongodbatlasSystemMemoryUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.memory.usage.average"`
MongodbatlasSystemMemoryUsageMax MetricConfig `mapstructure:"mongodbatlas.system.memory.usage.max"`
MongodbatlasSystemNetworkIoAverage MetricConfig `mapstructure:"mongodbatlas.system.network.io.average"`
MongodbatlasSystemNetworkIoMax MetricConfig `mapstructure:"mongodbatlas.system.network.io.max"`
MongodbatlasSystemPagingIoAverage MetricConfig `mapstructure:"mongodbatlas.system.paging.io.average"`
MongodbatlasSystemPagingIoMax MetricConfig `mapstructure:"mongodbatlas.system.paging.io.max"`
MongodbatlasSystemPagingUsageAverage MetricConfig `mapstructure:"mongodbatlas.system.paging.usage.average"`
MongodbatlasSystemPagingUsageMax MetricConfig `mapstructure:"mongodbatlas.system.paging.usage.max"`
}
func DefaultMetricsConfig() MetricsConfig {
return MetricsConfig{
MongodbatlasDbCounts: MetricConfig{
Enabled: true,
},
MongodbatlasDbSize: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionIopsAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionIopsMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionLatencyAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionLatencyMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionQueueDepth: MetricConfig{
Enabled: false,
},
MongodbatlasDiskPartitionSpaceAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionSpaceMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionThroughput: MetricConfig{
Enabled: false,
},
MongodbatlasDiskPartitionUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{
Enabled: true,
},
MongodbatlasDiskPartitionUtilizationMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessAsserts: MetricConfig{
Enabled: true,
},
MongodbatlasProcessBackgroundFlush: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCacheIo: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCacheRatio: MetricConfig{
Enabled: false,
},
MongodbatlasProcessCacheSize: MetricConfig{
Enabled: true,
},
MongodbatlasProcessConnections: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUChildrenUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCPUUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasProcessCursors: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbDocumentRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbOperationsRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbOperationsTime: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbQueryExecutorScanned: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbQueryTargetingScannedPerReturned: MetricConfig{
Enabled: true,
},
MongodbatlasProcessDbStorage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessGlobalLock: MetricConfig{
Enabled: true,
},
MongodbatlasProcessIndexBtreeMissRatio: MetricConfig{
Enabled: true,
},
MongodbatlasProcessIndexCounters: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingCommits: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingDataFiles: MetricConfig{
Enabled: true,
},
MongodbatlasProcessJournalingWritten: MetricConfig{
Enabled: true,
},
MongodbatlasProcessMemoryUsage: MetricConfig{
Enabled: true,
},
MongodbatlasProcessNetworkIo: MetricConfig{
Enabled: true,
},
MongodbatlasProcessNetworkRequests: MetricConfig{
Enabled: true,
},
MongodbatlasProcessOplogRate: MetricConfig{
Enabled: true,
},
MongodbatlasProcessOplogTime: MetricConfig{
Enabled: true,
},
MongodbatlasProcessPageFaults: MetricConfig{
Enabled: true,
},
MongodbatlasProcessRestarts: MetricConfig{
Enabled: true,
},
MongodbatlasProcessTickets: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUNormalizedUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUNormalizedUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemCPUUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsCPUNormalizedUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsCPUUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsDiskUsed: MetricConfig{
Enabled: true,
},
MongodbatlasSystemFtsMemoryUsage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemMemoryUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemMemoryUsageMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemNetworkIoAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemNetworkIoMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingIoAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingIoMax: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingUsageAverage: MetricConfig{
Enabled: true,
},
MongodbatlasSystemPagingUsageMax: MetricConfig{
Enabled: true,
},
}
}
// ResourceAttributeConfig provides common config for a particular resource attribute.
type ResourceAttributeConfig struct {
Enabled bool `mapstructure:"enabled"`
// Experimental: MetricsInclude defines a list of filters for attribute values.
// If the list is not empty, only metrics with matching resource attribute values will be emitted.
MetricsInclude []filter.Config `mapstructure:"metrics_include"`
// Experimental: MetricsExclude defines a list of filters for attribute values.
// If the list is not empty, metrics with matching resource attribute values will not be emitted.
// MetricsInclude has higher priority than MetricsExclude.
MetricsExclude []filter.Config `mapstructure:"metrics_exclude"`
enabledSetByUser bool
}
func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error {
if parser == nil {
return nil
}
err := parser.Unmarshal(rac)
if err != nil {
return err
}
rac.enabledSetByUser = parser.IsSet("enabled")
return nil
}
// ResourceAttributesConfig provides config for mongodbatlas resource attributes.
type ResourceAttributesConfig struct {
MongodbAtlasClusterName ResourceAttributeConfig `mapstructure:"mongodb_atlas.cluster.name"`
MongodbAtlasDbName ResourceAttributeConfig `mapstructure:"mongodb_atlas.db.name"`
MongodbAtlasDiskPartition ResourceAttributeConfig `mapstructure:"mongodb_atlas.disk.partition"`
MongodbAtlasHostName ResourceAttributeConfig `mapstructure:"mongodb_atlas.host.name"`
MongodbAtlasOrgName ResourceAttributeConfig `mapstructure:"mongodb_atlas.org_name"`
MongodbAtlasProcessID ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.id"`
MongodbAtlasProcessPort ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.port"`
MongodbAtlasProcessTypeName ResourceAttributeConfig `mapstructure:"mongodb_atlas.process.type_name"`
MongodbAtlasProjectID ResourceAttributeConfig `mapstructure:"mongodb_atlas.project.id"`
MongodbAtlasProjectName ResourceAttributeConfig `mapstructure:"mongodb_atlas.project.name"`
MongodbAtlasProviderName ResourceAttributeConfig `mapstructure:"mongodb_atlas.provider.name"`
MongodbAtlasRegionName ResourceAttributeConfig `mapstructure:"mongodb_atlas.region.name"`
MongodbAtlasUserAlias ResourceAttributeConfig `mapstructure:"mongodb_atlas.user.alias"`
}
func DefaultResourceAttributesConfig() ResourceAttributesConfig {
return ResourceAttributesConfig{
MongodbAtlasClusterName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasDbName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasDiskPartition: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasHostName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasOrgName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessID: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessPort: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProcessTypeName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProjectID: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProjectName: ResourceAttributeConfig{
Enabled: true,
},
MongodbAtlasProviderName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasRegionName: ResourceAttributeConfig{
Enabled: false,
},
MongodbAtlasUserAlias: ResourceAttributeConfig{
Enabled: false,
},
}
}
// MetricsBuilderConfig is a configuration for mongodbatlas metrics builder.
type MetricsBuilderConfig struct {
Metrics MetricsConfig `mapstructure:"metrics"`
ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"`
}
func DefaultMetricsBuilderConfig() MetricsBuilderConfig {
return MetricsBuilderConfig{
Metrics: DefaultMetricsConfig(),
ResourceAttributes: DefaultResourceAttributesConfig(),
}
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"time"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/filter"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
)
// AttributeAssertType specifies the value assert_type attribute.
type AttributeAssertType int
const (
_ AttributeAssertType = iota
AttributeAssertTypeRegular
AttributeAssertTypeWarning
AttributeAssertTypeMsg
AttributeAssertTypeUser
)
// String returns the string representation of the AttributeAssertType.
func (av AttributeAssertType) String() string {
switch av {
case AttributeAssertTypeRegular:
return "regular"
case AttributeAssertTypeWarning:
return "warning"
case AttributeAssertTypeMsg:
return "msg"
case AttributeAssertTypeUser:
return "user"
}
return ""
}
// MapAttributeAssertType is a helper map of string to AttributeAssertType attribute value.
var MapAttributeAssertType = map[string]AttributeAssertType{
"regular": AttributeAssertTypeRegular,
"warning": AttributeAssertTypeWarning,
"msg": AttributeAssertTypeMsg,
"user": AttributeAssertTypeUser,
}
// AttributeBtreeCounterType specifies the value btree_counter_type attribute.
type AttributeBtreeCounterType int
const (
_ AttributeBtreeCounterType = iota
AttributeBtreeCounterTypeAccesses
AttributeBtreeCounterTypeHits
AttributeBtreeCounterTypeMisses
)
// String returns the string representation of the AttributeBtreeCounterType.
func (av AttributeBtreeCounterType) String() string {
switch av {
case AttributeBtreeCounterTypeAccesses:
return "accesses"
case AttributeBtreeCounterTypeHits:
return "hits"
case AttributeBtreeCounterTypeMisses:
return "misses"
}
return ""
}
// MapAttributeBtreeCounterType is a helper map of string to AttributeBtreeCounterType attribute value.
var MapAttributeBtreeCounterType = map[string]AttributeBtreeCounterType{
"accesses": AttributeBtreeCounterTypeAccesses,
"hits": AttributeBtreeCounterTypeHits,
"misses": AttributeBtreeCounterTypeMisses,
}
// AttributeCacheDirection specifies the value cache_direction attribute.
type AttributeCacheDirection int
const (
_ AttributeCacheDirection = iota
AttributeCacheDirectionReadInto
AttributeCacheDirectionWrittenFrom
)
// String returns the string representation of the AttributeCacheDirection.
func (av AttributeCacheDirection) String() string {
switch av {
case AttributeCacheDirectionReadInto:
return "read_into"
case AttributeCacheDirectionWrittenFrom:
return "written_from"
}
return ""
}
// MapAttributeCacheDirection is a helper map of string to AttributeCacheDirection attribute value.
var MapAttributeCacheDirection = map[string]AttributeCacheDirection{
"read_into": AttributeCacheDirectionReadInto,
"written_from": AttributeCacheDirectionWrittenFrom,
}
// AttributeCacheRatioType specifies the value cache_ratio_type attribute.
type AttributeCacheRatioType int
const (
_ AttributeCacheRatioType = iota
AttributeCacheRatioTypeCacheFill
AttributeCacheRatioTypeDirtyFill
)
// String returns the string representation of the AttributeCacheRatioType.
func (av AttributeCacheRatioType) String() string {
switch av {
case AttributeCacheRatioTypeCacheFill:
return "cache_fill"
case AttributeCacheRatioTypeDirtyFill:
return "dirty_fill"
}
return ""
}
// MapAttributeCacheRatioType is a helper map of string to AttributeCacheRatioType attribute value.
var MapAttributeCacheRatioType = map[string]AttributeCacheRatioType{
"cache_fill": AttributeCacheRatioTypeCacheFill,
"dirty_fill": AttributeCacheRatioTypeDirtyFill,
}
// AttributeCacheStatus specifies the value cache_status attribute.
type AttributeCacheStatus int
const (
_ AttributeCacheStatus = iota
AttributeCacheStatusDirty
AttributeCacheStatusUsed
)
// String returns the string representation of the AttributeCacheStatus.
func (av AttributeCacheStatus) String() string {
switch av {
case AttributeCacheStatusDirty:
return "dirty"
case AttributeCacheStatusUsed:
return "used"
}
return ""
}
// MapAttributeCacheStatus is a helper map of string to AttributeCacheStatus attribute value.
var MapAttributeCacheStatus = map[string]AttributeCacheStatus{
"dirty": AttributeCacheStatusDirty,
"used": AttributeCacheStatusUsed,
}
// AttributeClusterRole specifies the value cluster_role attribute.
type AttributeClusterRole int
const (
_ AttributeClusterRole = iota
AttributeClusterRolePrimary
AttributeClusterRoleReplica
)
// String returns the string representation of the AttributeClusterRole.
func (av AttributeClusterRole) String() string {
switch av {
case AttributeClusterRolePrimary:
return "primary"
case AttributeClusterRoleReplica:
return "replica"
}
return ""
}
// MapAttributeClusterRole is a helper map of string to AttributeClusterRole attribute value.
var MapAttributeClusterRole = map[string]AttributeClusterRole{
"primary": AttributeClusterRolePrimary,
"replica": AttributeClusterRoleReplica,
}
// AttributeCPUState specifies the value cpu_state attribute.
type AttributeCPUState int
const (
_ AttributeCPUState = iota
AttributeCPUStateKernel
AttributeCPUStateUser
AttributeCPUStateNice
AttributeCPUStateIowait
AttributeCPUStateIrq
AttributeCPUStateSoftirq
AttributeCPUStateGuest
AttributeCPUStateSteal
)
// String returns the string representation of the AttributeCPUState.
func (av AttributeCPUState) String() string {
switch av {
case AttributeCPUStateKernel:
return "kernel"
case AttributeCPUStateUser:
return "user"
case AttributeCPUStateNice:
return "nice"
case AttributeCPUStateIowait:
return "iowait"
case AttributeCPUStateIrq:
return "irq"
case AttributeCPUStateSoftirq:
return "softirq"
case AttributeCPUStateGuest:
return "guest"
case AttributeCPUStateSteal:
return "steal"
}
return ""
}
// MapAttributeCPUState is a helper map of string to AttributeCPUState attribute value.
var MapAttributeCPUState = map[string]AttributeCPUState{
"kernel": AttributeCPUStateKernel,
"user": AttributeCPUStateUser,
"nice": AttributeCPUStateNice,
"iowait": AttributeCPUStateIowait,
"irq": AttributeCPUStateIrq,
"softirq": AttributeCPUStateSoftirq,
"guest": AttributeCPUStateGuest,
"steal": AttributeCPUStateSteal,
}
// AttributeCursorState specifies the value cursor_state attribute.
type AttributeCursorState int
const (
_ AttributeCursorState = iota
AttributeCursorStateTimedOut
AttributeCursorStateOpen
)
// String returns the string representation of the AttributeCursorState.
func (av AttributeCursorState) String() string {
switch av {
case AttributeCursorStateTimedOut:
return "timed_out"
case AttributeCursorStateOpen:
return "open"
}
return ""
}
// MapAttributeCursorState is a helper map of string to AttributeCursorState attribute value.
var MapAttributeCursorState = map[string]AttributeCursorState{
"timed_out": AttributeCursorStateTimedOut,
"open": AttributeCursorStateOpen,
}
// AttributeDirection specifies the value direction attribute.
type AttributeDirection int
const (
_ AttributeDirection = iota
AttributeDirectionReceive
AttributeDirectionTransmit
)
// String returns the string representation of the AttributeDirection.
func (av AttributeDirection) String() string {
switch av {
case AttributeDirectionReceive:
return "receive"
case AttributeDirectionTransmit:
return "transmit"
}
return ""
}
// MapAttributeDirection is a helper map of string to AttributeDirection attribute value.
var MapAttributeDirection = map[string]AttributeDirection{
"receive": AttributeDirectionReceive,
"transmit": AttributeDirectionTransmit,
}
// AttributeDiskDirection specifies the value disk_direction attribute.
type AttributeDiskDirection int
const (
_ AttributeDiskDirection = iota
AttributeDiskDirectionRead
AttributeDiskDirectionWrite
AttributeDiskDirectionTotal
)
// String returns the string representation of the AttributeDiskDirection.
func (av AttributeDiskDirection) String() string {
switch av {
case AttributeDiskDirectionRead:
return "read"
case AttributeDiskDirectionWrite:
return "write"
case AttributeDiskDirectionTotal:
return "total"
}
return ""
}
// MapAttributeDiskDirection is a helper map of string to AttributeDiskDirection attribute value.
var MapAttributeDiskDirection = map[string]AttributeDiskDirection{
"read": AttributeDiskDirectionRead,
"write": AttributeDiskDirectionWrite,
"total": AttributeDiskDirectionTotal,
}
// AttributeDiskStatus specifies the value disk_status attribute.
type AttributeDiskStatus int
const (
_ AttributeDiskStatus = iota
AttributeDiskStatusFree
AttributeDiskStatusUsed
)
// String returns the string representation of the AttributeDiskStatus.
func (av AttributeDiskStatus) String() string {
switch av {
case AttributeDiskStatusFree:
return "free"
case AttributeDiskStatusUsed:
return "used"
}
return ""
}
// MapAttributeDiskStatus is a helper map of string to AttributeDiskStatus attribute value.
var MapAttributeDiskStatus = map[string]AttributeDiskStatus{
"free": AttributeDiskStatusFree,
"used": AttributeDiskStatusUsed,
}
// AttributeDocumentStatus specifies the value document_status attribute.
type AttributeDocumentStatus int
const (
_ AttributeDocumentStatus = iota
AttributeDocumentStatusReturned
AttributeDocumentStatusInserted
AttributeDocumentStatusUpdated
AttributeDocumentStatusDeleted
)
// String returns the string representation of the AttributeDocumentStatus.
func (av AttributeDocumentStatus) String() string {
switch av {
case AttributeDocumentStatusReturned:
return "returned"
case AttributeDocumentStatusInserted:
return "inserted"
case AttributeDocumentStatusUpdated:
return "updated"
case AttributeDocumentStatusDeleted:
return "deleted"
}
return ""
}
// MapAttributeDocumentStatus is a helper map of string to AttributeDocumentStatus attribute value.
var MapAttributeDocumentStatus = map[string]AttributeDocumentStatus{
"returned": AttributeDocumentStatusReturned,
"inserted": AttributeDocumentStatusInserted,
"updated": AttributeDocumentStatusUpdated,
"deleted": AttributeDocumentStatusDeleted,
}
// AttributeExecutionType specifies the value execution_type attribute.
type AttributeExecutionType int
const (
_ AttributeExecutionType = iota
AttributeExecutionTypeReads
AttributeExecutionTypeWrites
AttributeExecutionTypeCommands
)
// String returns the string representation of the AttributeExecutionType.
func (av AttributeExecutionType) String() string {
switch av {
case AttributeExecutionTypeReads:
return "reads"
case AttributeExecutionTypeWrites:
return "writes"
case AttributeExecutionTypeCommands:
return "commands"
}
return ""
}
// MapAttributeExecutionType is a helper map of string to AttributeExecutionType attribute value.
var MapAttributeExecutionType = map[string]AttributeExecutionType{
"reads": AttributeExecutionTypeReads,
"writes": AttributeExecutionTypeWrites,
"commands": AttributeExecutionTypeCommands,
}
// AttributeGlobalLockState specifies the value global_lock_state attribute.
type AttributeGlobalLockState int
const (
_ AttributeGlobalLockState = iota
AttributeGlobalLockStateCurrentQueueTotal
AttributeGlobalLockStateCurrentQueueReaders
AttributeGlobalLockStateCurrentQueueWriters
)
// String returns the string representation of the AttributeGlobalLockState.
func (av AttributeGlobalLockState) String() string {
switch av {
case AttributeGlobalLockStateCurrentQueueTotal:
return "current_queue_total"
case AttributeGlobalLockStateCurrentQueueReaders:
return "current_queue_readers"
case AttributeGlobalLockStateCurrentQueueWriters:
return "current_queue_writers"
}
return ""
}
// MapAttributeGlobalLockState is a helper map of string to AttributeGlobalLockState attribute value.
var MapAttributeGlobalLockState = map[string]AttributeGlobalLockState{
"current_queue_total": AttributeGlobalLockStateCurrentQueueTotal,
"current_queue_readers": AttributeGlobalLockStateCurrentQueueReaders,
"current_queue_writers": AttributeGlobalLockStateCurrentQueueWriters,
}
// AttributeMemoryIssueType specifies the value memory_issue_type attribute.
type AttributeMemoryIssueType int
const (
_ AttributeMemoryIssueType = iota
AttributeMemoryIssueTypeExtraInfo
AttributeMemoryIssueTypeGlobalAccessesNotInMemory
AttributeMemoryIssueTypeExceptionsThrown
)
// String returns the string representation of the AttributeMemoryIssueType.
func (av AttributeMemoryIssueType) String() string {
switch av {
case AttributeMemoryIssueTypeExtraInfo:
return "extra_info"
case AttributeMemoryIssueTypeGlobalAccessesNotInMemory:
return "global_accesses_not_in_memory"
case AttributeMemoryIssueTypeExceptionsThrown:
return "exceptions_thrown"
}
return ""
}
// MapAttributeMemoryIssueType is a helper map of string to AttributeMemoryIssueType attribute value.
var MapAttributeMemoryIssueType = map[string]AttributeMemoryIssueType{
"extra_info": AttributeMemoryIssueTypeExtraInfo,
"global_accesses_not_in_memory": AttributeMemoryIssueTypeGlobalAccessesNotInMemory,
"exceptions_thrown": AttributeMemoryIssueTypeExceptionsThrown,
}
// AttributeMemoryState specifies the value memory_state attribute.
type AttributeMemoryState int
const (
_ AttributeMemoryState = iota
AttributeMemoryStateResident
AttributeMemoryStateVirtual
AttributeMemoryStateMapped
AttributeMemoryStateComputed
AttributeMemoryStateShared
AttributeMemoryStateFree
AttributeMemoryStateUsed
)
// String returns the string representation of the AttributeMemoryState.
func (av AttributeMemoryState) String() string {
switch av {
case AttributeMemoryStateResident:
return "resident"
case AttributeMemoryStateVirtual:
return "virtual"
case AttributeMemoryStateMapped:
return "mapped"
case AttributeMemoryStateComputed:
return "computed"
case AttributeMemoryStateShared:
return "shared"
case AttributeMemoryStateFree:
return "free"
case AttributeMemoryStateUsed:
return "used"
}
return ""
}
// MapAttributeMemoryState is a helper map of string to AttributeMemoryState attribute value.
var MapAttributeMemoryState = map[string]AttributeMemoryState{
"resident": AttributeMemoryStateResident,
"virtual": AttributeMemoryStateVirtual,
"mapped": AttributeMemoryStateMapped,
"computed": AttributeMemoryStateComputed,
"shared": AttributeMemoryStateShared,
"free": AttributeMemoryStateFree,
"used": AttributeMemoryStateUsed,
}
// AttributeMemoryStatus specifies the value memory_status attribute.
type AttributeMemoryStatus int
const (
_ AttributeMemoryStatus = iota
AttributeMemoryStatusAvailable
AttributeMemoryStatusBuffers
AttributeMemoryStatusCached
AttributeMemoryStatusFree
AttributeMemoryStatusShared
AttributeMemoryStatusUsed
)
// String returns the string representation of the AttributeMemoryStatus.
func (av AttributeMemoryStatus) String() string {
switch av {
case AttributeMemoryStatusAvailable:
return "available"
case AttributeMemoryStatusBuffers:
return "buffers"
case AttributeMemoryStatusCached:
return "cached"
case AttributeMemoryStatusFree:
return "free"
case AttributeMemoryStatusShared:
return "shared"
case AttributeMemoryStatusUsed:
return "used"
}
return ""
}
// MapAttributeMemoryStatus is a helper map of string to AttributeMemoryStatus attribute value.
var MapAttributeMemoryStatus = map[string]AttributeMemoryStatus{
"available": AttributeMemoryStatusAvailable,
"buffers": AttributeMemoryStatusBuffers,
"cached": AttributeMemoryStatusCached,
"free": AttributeMemoryStatusFree,
"shared": AttributeMemoryStatusShared,
"used": AttributeMemoryStatusUsed,
}
// AttributeObjectType specifies the value object_type attribute.
type AttributeObjectType int
const (
_ AttributeObjectType = iota
AttributeObjectTypeCollection
AttributeObjectTypeIndex
AttributeObjectTypeExtent
AttributeObjectTypeObject
AttributeObjectTypeView
AttributeObjectTypeStorage
AttributeObjectTypeData
)
// String returns the string representation of the AttributeObjectType.
func (av AttributeObjectType) String() string {
switch av {
case AttributeObjectTypeCollection:
return "collection"
case AttributeObjectTypeIndex:
return "index"
case AttributeObjectTypeExtent:
return "extent"
case AttributeObjectTypeObject:
return "object"
case AttributeObjectTypeView:
return "view"
case AttributeObjectTypeStorage:
return "storage"
case AttributeObjectTypeData:
return "data"
}
return ""
}
// MapAttributeObjectType is a helper map of string to AttributeObjectType attribute value.
var MapAttributeObjectType = map[string]AttributeObjectType{
"collection": AttributeObjectTypeCollection,
"index": AttributeObjectTypeIndex,
"extent": AttributeObjectTypeExtent,
"object": AttributeObjectTypeObject,
"view": AttributeObjectTypeView,
"storage": AttributeObjectTypeStorage,
"data": AttributeObjectTypeData,
}
// AttributeOperation specifies the value operation attribute.
type AttributeOperation int
const (
_ AttributeOperation = iota
AttributeOperationCmd
AttributeOperationQuery
AttributeOperationUpdate
AttributeOperationDelete
AttributeOperationGetmore
AttributeOperationInsert
AttributeOperationScanAndOrder
AttributeOperationTTLDeleted
)
// String returns the string representation of the AttributeOperation.
func (av AttributeOperation) String() string {
switch av {
case AttributeOperationCmd:
return "cmd"
case AttributeOperationQuery:
return "query"
case AttributeOperationUpdate:
return "update"
case AttributeOperationDelete:
return "delete"
case AttributeOperationGetmore:
return "getmore"
case AttributeOperationInsert:
return "insert"
case AttributeOperationScanAndOrder:
return "scan_and_order"
case AttributeOperationTTLDeleted:
return "ttl_deleted"
}
return ""
}
// MapAttributeOperation is a helper map of string to AttributeOperation attribute value.
var MapAttributeOperation = map[string]AttributeOperation{
"cmd": AttributeOperationCmd,
"query": AttributeOperationQuery,
"update": AttributeOperationUpdate,
"delete": AttributeOperationDelete,
"getmore": AttributeOperationGetmore,
"insert": AttributeOperationInsert,
"scan_and_order": AttributeOperationScanAndOrder,
"ttl_deleted": AttributeOperationTTLDeleted,
}
// AttributeOplogType specifies the value oplog_type attribute.
type AttributeOplogType int
const (
_ AttributeOplogType = iota
AttributeOplogTypeSlaveLagMasterTime
AttributeOplogTypeMasterTime
AttributeOplogTypeMasterLagTimeDiff
)
// String returns the string representation of the AttributeOplogType.
func (av AttributeOplogType) String() string {
switch av {
case AttributeOplogTypeSlaveLagMasterTime:
return "slave_lag_master_time"
case AttributeOplogTypeMasterTime:
return "master_time"
case AttributeOplogTypeMasterLagTimeDiff:
return "master_lag_time_diff"
}
return ""
}
// MapAttributeOplogType is a helper map of string to AttributeOplogType attribute value.
var MapAttributeOplogType = map[string]AttributeOplogType{
"slave_lag_master_time": AttributeOplogTypeSlaveLagMasterTime,
"master_time": AttributeOplogTypeMasterTime,
"master_lag_time_diff": AttributeOplogTypeMasterLagTimeDiff,
}
// AttributeScannedType specifies the value scanned_type attribute.
type AttributeScannedType int
const (
_ AttributeScannedType = iota
AttributeScannedTypeIndexItems
AttributeScannedTypeObjects
)
// String returns the string representation of the AttributeScannedType.
func (av AttributeScannedType) String() string {
switch av {
case AttributeScannedTypeIndexItems:
return "index_items"
case AttributeScannedTypeObjects:
return "objects"
}
return ""
}
// MapAttributeScannedType is a helper map of string to AttributeScannedType attribute value.
var MapAttributeScannedType = map[string]AttributeScannedType{
"index_items": AttributeScannedTypeIndexItems,
"objects": AttributeScannedTypeObjects,
}
// AttributeStorageStatus specifies the value storage_status attribute.
type AttributeStorageStatus int
const (
_ AttributeStorageStatus = iota
AttributeStorageStatusTotal
AttributeStorageStatusDataSize
AttributeStorageStatusIndexSize
AttributeStorageStatusDataSizeWoSystem
)
// String returns the string representation of the AttributeStorageStatus.
func (av AttributeStorageStatus) String() string {
switch av {
case AttributeStorageStatusTotal:
return "total"
case AttributeStorageStatusDataSize:
return "data_size"
case AttributeStorageStatusIndexSize:
return "index_size"
case AttributeStorageStatusDataSizeWoSystem:
return "data_size_wo_system"
}
return ""
}
// MapAttributeStorageStatus is a helper map of string to AttributeStorageStatus attribute value.
var MapAttributeStorageStatus = map[string]AttributeStorageStatus{
"total": AttributeStorageStatusTotal,
"data_size": AttributeStorageStatusDataSize,
"index_size": AttributeStorageStatusIndexSize,
"data_size_wo_system": AttributeStorageStatusDataSizeWoSystem,
}
// AttributeTicketType specifies the value ticket_type attribute.
type AttributeTicketType int
const (
_ AttributeTicketType = iota
AttributeTicketTypeAvailableReads
AttributeTicketTypeAvailableWrites
)
// String returns the string representation of the AttributeTicketType.
func (av AttributeTicketType) String() string {
switch av {
case AttributeTicketTypeAvailableReads:
return "available_reads"
case AttributeTicketTypeAvailableWrites:
return "available_writes"
}
return ""
}
// MapAttributeTicketType is a helper map of string to AttributeTicketType attribute value.
var MapAttributeTicketType = map[string]AttributeTicketType{
"available_reads": AttributeTicketTypeAvailableReads,
"available_writes": AttributeTicketTypeAvailableWrites,
}
type metricMongodbatlasDbCounts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.db.counts metric with initial data.
func (m *metricMongodbatlasDbCounts) init() {
m.data.SetName("mongodbatlas.db.counts")
m.data.SetDescription("Database feature size")
m.data.SetUnit("{objects}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDbCounts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("object_type", objectTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDbCounts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDbCounts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDbCounts(cfg MetricConfig) metricMongodbatlasDbCounts {
m := metricMongodbatlasDbCounts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDbSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.db.size metric with initial data.
func (m *metricMongodbatlasDbSize) init() {
m.data.SetName("mongodbatlas.db.size")
m.data.SetDescription("Database feature size")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("object_type", objectTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDbSize) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDbSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDbSize(cfg MetricConfig) metricMongodbatlasDbSize {
m := metricMongodbatlasDbSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionIopsAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.iops.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionIopsAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.iops.average")
m.data.SetDescription("Disk partition iops")
m.data.SetUnit("{ops}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionIopsAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionIopsAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionIopsAverage {
m := metricMongodbatlasDiskPartitionIopsAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionIopsMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.iops.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionIopsMax) init() {
m.data.SetName("mongodbatlas.disk.partition.iops.max")
m.data.SetDescription("Disk partition iops")
m.data.SetUnit("{ops}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionIopsMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionIopsMax(cfg MetricConfig) metricMongodbatlasDiskPartitionIopsMax {
m := metricMongodbatlasDiskPartitionIopsMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionLatencyAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.latency.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.latency.average")
m.data.SetDescription("Disk partition latency")
m.data.SetUnit("ms")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionLatencyAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionLatencyAverage {
m := metricMongodbatlasDiskPartitionLatencyAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionLatencyMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.latency.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionLatencyMax) init() {
m.data.SetName("mongodbatlas.disk.partition.latency.max")
m.data.SetDescription("Disk partition latency")
m.data.SetUnit("ms")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionLatencyMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionLatencyMax(cfg MetricConfig) metricMongodbatlasDiskPartitionLatencyMax {
m := metricMongodbatlasDiskPartitionLatencyMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionQueueDepth struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.queue.depth metric with initial data.
func (m *metricMongodbatlasDiskPartitionQueueDepth) init() {
m.data.SetName("mongodbatlas.disk.partition.queue.depth")
m.data.SetDescription("Disk queue depth")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionQueueDepth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionQueueDepth) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionQueueDepth) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionQueueDepth(cfg MetricConfig) metricMongodbatlasDiskPartitionQueueDepth {
m := metricMongodbatlasDiskPartitionQueueDepth{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionSpaceAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.space.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.space.average")
m.data.SetDescription("Disk partition space")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionSpaceAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionSpaceAverage {
m := metricMongodbatlasDiskPartitionSpaceAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionSpaceMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.space.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionSpaceMax) init() {
m.data.SetName("mongodbatlas.disk.partition.space.max")
m.data.SetDescription("Disk partition space")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionSpaceMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionSpaceMax(cfg MetricConfig) metricMongodbatlasDiskPartitionSpaceMax {
m := metricMongodbatlasDiskPartitionSpaceMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionThroughput struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.throughput metric with initial data.
func (m *metricMongodbatlasDiskPartitionThroughput) init() {
m.data.SetName("mongodbatlas.disk.partition.throughput")
m.data.SetDescription("Disk throughput")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionThroughput) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionThroughput) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionThroughput(cfg MetricConfig) metricMongodbatlasDiskPartitionThroughput {
m := metricMongodbatlasDiskPartitionThroughput{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.usage.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionUsageAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.usage.average")
m.data.SetDescription("Disk partition usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUsageAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionUsageAverage {
m := metricMongodbatlasDiskPartitionUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.usage.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionUsageMax) init() {
m.data.SetName("mongodbatlas.disk.partition.usage.max")
m.data.SetDescription("Disk partition usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("disk_status", diskStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUsageMax(cfg MetricConfig) metricMongodbatlasDiskPartitionUsageMax {
m := metricMongodbatlasDiskPartitionUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUtilizationAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.utilization.average metric with initial data.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) init() {
m.data.SetName("mongodbatlas.disk.partition.utilization.average")
m.data.SetDescription("The percentage of time during which requests are being issued to and serviced by the partition.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUtilizationAverage(cfg MetricConfig) metricMongodbatlasDiskPartitionUtilizationAverage {
m := metricMongodbatlasDiskPartitionUtilizationAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasDiskPartitionUtilizationMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.disk.partition.utilization.max metric with initial data.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) init() {
m.data.SetName("mongodbatlas.disk.partition.utilization.max")
m.data.SetDescription("The maximum percentage of time during which requests are being issued to and serviced by the partition.")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasDiskPartitionUtilizationMax(cfg MetricConfig) metricMongodbatlasDiskPartitionUtilizationMax {
m := metricMongodbatlasDiskPartitionUtilizationMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessAsserts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.asserts metric with initial data.
func (m *metricMongodbatlasProcessAsserts) init() {
m.data.SetName("mongodbatlas.process.asserts")
m.data.SetDescription("Number of assertions per second")
m.data.SetUnit("{assertions}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, assertTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("assert_type", assertTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessAsserts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessAsserts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessAsserts(cfg MetricConfig) metricMongodbatlasProcessAsserts {
m := metricMongodbatlasProcessAsserts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessBackgroundFlush struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.background_flush metric with initial data.
func (m *metricMongodbatlasProcessBackgroundFlush) init() {
m.data.SetName("mongodbatlas.process.background_flush")
m.data.SetDescription("Amount of data flushed in the background")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessBackgroundFlush) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessBackgroundFlush) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessBackgroundFlush(cfg MetricConfig) metricMongodbatlasProcessBackgroundFlush {
m := metricMongodbatlasProcessBackgroundFlush{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheIo struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.io metric with initial data.
func (m *metricMongodbatlasProcessCacheIo) init() {
m.data.SetName("mongodbatlas.process.cache.io")
m.data.SetDescription("Cache throughput (per second)")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_direction", cacheDirectionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheIo) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheIo) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheIo(cfg MetricConfig) metricMongodbatlasProcessCacheIo {
m := metricMongodbatlasProcessCacheIo{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheRatio struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.ratio metric with initial data.
func (m *metricMongodbatlasProcessCacheRatio) init() {
m.data.SetName("mongodbatlas.process.cache.ratio")
m.data.SetDescription("Cache ratios represented as (%)")
m.data.SetUnit("%")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_ratio_type", cacheRatioTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheRatio) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheRatio) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheRatio(cfg MetricConfig) metricMongodbatlasProcessCacheRatio {
m := metricMongodbatlasProcessCacheRatio{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCacheSize struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cache.size metric with initial data.
func (m *metricMongodbatlasProcessCacheSize) init() {
m.data.SetName("mongodbatlas.process.cache.size")
m.data.SetDescription("Cache sizes")
m.data.SetUnit("By")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cache_status", cacheStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCacheSize) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCacheSize) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCacheSize(cfg MetricConfig) metricMongodbatlasProcessCacheSize {
m := metricMongodbatlasProcessCacheSize{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessConnections struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.connections metric with initial data.
func (m *metricMongodbatlasProcessConnections) init() {
m.data.SetName("mongodbatlas.process.connections")
m.data.SetDescription("Number of current connections")
m.data.SetUnit("{connections}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMongodbatlasProcessConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessConnections) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessConnections) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessConnections(cfg MetricConfig) metricMongodbatlasProcessConnections {
m := metricMongodbatlasProcessConnections{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.average")
m.data.SetDescription("CPU Usage for child processes, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage {
m := metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.max")
m.data.SetDescription("CPU Usage for child processes, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenNormalizedUsageMax {
m := metricMongodbatlasProcessCPUChildrenNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.children.usage.average")
m.data.SetDescription("CPU Usage for child processes (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenUsageAverage {
m := metricMongodbatlasProcessCPUChildrenUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUChildrenUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.children.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.children.usage.max")
m.data.SetDescription("CPU Usage for child processes (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUChildrenUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUChildrenUsageMax {
m := metricMongodbatlasProcessCPUChildrenUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.normalized.usage.average")
m.data.SetDescription("CPU Usage, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUNormalizedUsageAverage {
m := metricMongodbatlasProcessCPUNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.normalized.usage.max")
m.data.SetDescription("CPU Usage, normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUNormalizedUsageMax {
m := metricMongodbatlasProcessCPUNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.usage.average metric with initial data.
func (m *metricMongodbatlasProcessCPUUsageAverage) init() {
m.data.SetName("mongodbatlas.process.cpu.usage.average")
m.data.SetDescription("CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUUsageAverage(cfg MetricConfig) metricMongodbatlasProcessCPUUsageAverage {
m := metricMongodbatlasProcessCPUUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCPUUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cpu.usage.max metric with initial data.
func (m *metricMongodbatlasProcessCPUUsageMax) init() {
m.data.SetName("mongodbatlas.process.cpu.usage.max")
m.data.SetDescription("CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCPUUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCPUUsageMax(cfg MetricConfig) metricMongodbatlasProcessCPUUsageMax {
m := metricMongodbatlasProcessCPUUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessCursors struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.cursors metric with initial data.
func (m *metricMongodbatlasProcessCursors) init() {
m.data.SetName("mongodbatlas.process.cursors")
m.data.SetDescription("Number of cursors")
m.data.SetUnit("{cursors}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cursorStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cursor_state", cursorStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessCursors) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessCursors) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessCursors(cfg MetricConfig) metricMongodbatlasProcessCursors {
m := metricMongodbatlasProcessCursors{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbDocumentRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.document.rate metric with initial data.
func (m *metricMongodbatlasProcessDbDocumentRate) init() {
m.data.SetName("mongodbatlas.process.db.document.rate")
m.data.SetDescription("Document access rates")
m.data.SetUnit("{documents}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, documentStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("document_status", documentStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbDocumentRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbDocumentRate(cfg MetricConfig) metricMongodbatlasProcessDbDocumentRate {
m := metricMongodbatlasProcessDbDocumentRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbOperationsRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.operations.rate metric with initial data.
func (m *metricMongodbatlasProcessDbOperationsRate) init() {
m.data.SetName("mongodbatlas.process.db.operations.rate")
m.data.SetDescription("DB Operation Rates")
m.data.SetUnit("{operations}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("operation", operationAttributeValue)
dp.Attributes().PutStr("cluster_role", clusterRoleAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbOperationsRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbOperationsRate(cfg MetricConfig) metricMongodbatlasProcessDbOperationsRate {
m := metricMongodbatlasProcessDbOperationsRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbOperationsTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.operations.time metric with initial data.
func (m *metricMongodbatlasProcessDbOperationsTime) init() {
m.data.SetName("mongodbatlas.process.db.operations.time")
m.data.SetDescription("DB Operation Times")
m.data.SetUnit("ms")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, executionTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("execution_type", executionTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbOperationsTime) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbOperationsTime(cfg MetricConfig) metricMongodbatlasProcessDbOperationsTime {
m := metricMongodbatlasProcessDbOperationsTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbQueryExecutorScanned struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.query_executor.scanned metric with initial data.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) init() {
m.data.SetName("mongodbatlas.process.db.query_executor.scanned")
m.data.SetDescription("Scanned objects")
m.data.SetUnit("{objects}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("scanned_type", scannedTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbQueryExecutorScanned(cfg MetricConfig) metricMongodbatlasProcessDbQueryExecutorScanned {
m := metricMongodbatlasProcessDbQueryExecutorScanned{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbQueryTargetingScannedPerReturned struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.query_targeting.scanned_per_returned metric with initial data.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) init() {
m.data.SetName("mongodbatlas.process.db.query_targeting.scanned_per_returned")
m.data.SetDescription("Scanned objects per returned")
m.data.SetUnit("{scanned}/{returned}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("scanned_type", scannedTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(cfg MetricConfig) metricMongodbatlasProcessDbQueryTargetingScannedPerReturned {
m := metricMongodbatlasProcessDbQueryTargetingScannedPerReturned{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessDbStorage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.db.storage metric with initial data.
func (m *metricMongodbatlasProcessDbStorage) init() {
m.data.SetName("mongodbatlas.process.db.storage")
m.data.SetDescription("Storage used by the database")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, storageStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("storage_status", storageStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessDbStorage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessDbStorage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessDbStorage(cfg MetricConfig) metricMongodbatlasProcessDbStorage {
m := metricMongodbatlasProcessDbStorage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessGlobalLock struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.global_lock metric with initial data.
func (m *metricMongodbatlasProcessGlobalLock) init() {
m.data.SetName("mongodbatlas.process.global_lock")
m.data.SetDescription("Number and status of locks")
m.data.SetUnit("{locks}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, globalLockStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("global_lock_state", globalLockStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessGlobalLock) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessGlobalLock(cfg MetricConfig) metricMongodbatlasProcessGlobalLock {
m := metricMongodbatlasProcessGlobalLock{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessIndexBtreeMissRatio struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.index.btree_miss_ratio metric with initial data.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) init() {
m.data.SetName("mongodbatlas.process.index.btree_miss_ratio")
m.data.SetDescription("Index miss ratio (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessIndexBtreeMissRatio(cfg MetricConfig) metricMongodbatlasProcessIndexBtreeMissRatio {
m := metricMongodbatlasProcessIndexBtreeMissRatio{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessIndexCounters struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.index.counters metric with initial data.
func (m *metricMongodbatlasProcessIndexCounters) init() {
m.data.SetName("mongodbatlas.process.index.counters")
m.data.SetDescription("Indexes")
m.data.SetUnit("{indexes}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("btree_counter_type", btreeCounterTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessIndexCounters) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessIndexCounters(cfg MetricConfig) metricMongodbatlasProcessIndexCounters {
m := metricMongodbatlasProcessIndexCounters{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingCommits struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.commits metric with initial data.
func (m *metricMongodbatlasProcessJournalingCommits) init() {
m.data.SetName("mongodbatlas.process.journaling.commits")
m.data.SetDescription("Journaling commits")
m.data.SetUnit("{commits}")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingCommits) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingCommits(cfg MetricConfig) metricMongodbatlasProcessJournalingCommits {
m := metricMongodbatlasProcessJournalingCommits{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingDataFiles struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.data_files metric with initial data.
func (m *metricMongodbatlasProcessJournalingDataFiles) init() {
m.data.SetName("mongodbatlas.process.journaling.data_files")
m.data.SetDescription("Data file sizes")
m.data.SetUnit("MiBy")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingDataFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingDataFiles) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingDataFiles(cfg MetricConfig) metricMongodbatlasProcessJournalingDataFiles {
m := metricMongodbatlasProcessJournalingDataFiles{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessJournalingWritten struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.journaling.written metric with initial data.
func (m *metricMongodbatlasProcessJournalingWritten) init() {
m.data.SetName("mongodbatlas.process.journaling.written")
m.data.SetDescription("Journals written")
m.data.SetUnit("MiBy")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessJournalingWritten) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessJournalingWritten) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessJournalingWritten(cfg MetricConfig) metricMongodbatlasProcessJournalingWritten {
m := metricMongodbatlasProcessJournalingWritten{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessMemoryUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.memory.usage metric with initial data.
func (m *metricMongodbatlasProcessMemoryUsage) init() {
m.data.SetName("mongodbatlas.process.memory.usage")
m.data.SetDescription("Memory Usage")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessMemoryUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessMemoryUsage(cfg MetricConfig) metricMongodbatlasProcessMemoryUsage {
m := metricMongodbatlasProcessMemoryUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessNetworkIo struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.network.io metric with initial data.
func (m *metricMongodbatlasProcessNetworkIo) init() {
m.data.SetName("mongodbatlas.process.network.io")
m.data.SetDescription("Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessNetworkIo) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessNetworkIo(cfg MetricConfig) metricMongodbatlasProcessNetworkIo {
m := metricMongodbatlasProcessNetworkIo{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessNetworkRequests struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.network.requests metric with initial data.
func (m *metricMongodbatlasProcessNetworkRequests) init() {
m.data.SetName("mongodbatlas.process.network.requests")
m.data.SetDescription("Network requests")
m.data.SetUnit("{requests}")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
}
func (m *metricMongodbatlasProcessNetworkRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessNetworkRequests) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessNetworkRequests(cfg MetricConfig) metricMongodbatlasProcessNetworkRequests {
m := metricMongodbatlasProcessNetworkRequests{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessOplogRate struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.oplog.rate metric with initial data.
func (m *metricMongodbatlasProcessOplogRate) init() {
m.data.SetName("mongodbatlas.process.oplog.rate")
m.data.SetDescription("Execution rate by operation")
m.data.SetUnit("GiBy/h")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessOplogRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessOplogRate) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessOplogRate) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessOplogRate(cfg MetricConfig) metricMongodbatlasProcessOplogRate {
m := metricMongodbatlasProcessOplogRate{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessOplogTime struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.oplog.time metric with initial data.
func (m *metricMongodbatlasProcessOplogTime) init() {
m.data.SetName("mongodbatlas.process.oplog.time")
m.data.SetDescription("Execution time by operation")
m.data.SetUnit("s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, oplogTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("oplog_type", oplogTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessOplogTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessOplogTime) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessOplogTime(cfg MetricConfig) metricMongodbatlasProcessOplogTime {
m := metricMongodbatlasProcessOplogTime{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessPageFaults struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.page_faults metric with initial data.
func (m *metricMongodbatlasProcessPageFaults) init() {
m.data.SetName("mongodbatlas.process.page_faults")
m.data.SetDescription("Page faults")
m.data.SetUnit("{faults}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_issue_type", memoryIssueTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessPageFaults) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessPageFaults) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessPageFaults(cfg MetricConfig) metricMongodbatlasProcessPageFaults {
m := metricMongodbatlasProcessPageFaults{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessRestarts struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.restarts metric with initial data.
func (m *metricMongodbatlasProcessRestarts) init() {
m.data.SetName("mongodbatlas.process.restarts")
m.data.SetDescription("Restarts in last hour")
m.data.SetUnit("{restarts}/h")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasProcessRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessRestarts) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessRestarts) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessRestarts(cfg MetricConfig) metricMongodbatlasProcessRestarts {
m := metricMongodbatlasProcessRestarts{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasProcessTickets struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.process.tickets metric with initial data.
func (m *metricMongodbatlasProcessTickets) init() {
m.data.SetName("mongodbatlas.process.tickets")
m.data.SetDescription("Tickets")
m.data.SetUnit("{tickets}")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, ticketTypeAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("ticket_type", ticketTypeAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasProcessTickets) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasProcessTickets) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasProcessTickets(cfg MetricConfig) metricMongodbatlasProcessTickets {
m := metricMongodbatlasProcessTickets{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUNormalizedUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.normalized.usage.average metric with initial data.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) init() {
m.data.SetName("mongodbatlas.system.cpu.normalized.usage.average")
m.data.SetDescription("System CPU Normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUNormalizedUsageAverage(cfg MetricConfig) metricMongodbatlasSystemCPUNormalizedUsageAverage {
m := metricMongodbatlasSystemCPUNormalizedUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUNormalizedUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.normalized.usage.max metric with initial data.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) init() {
m.data.SetName("mongodbatlas.system.cpu.normalized.usage.max")
m.data.SetDescription("System CPU Normalized to pct")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUNormalizedUsageMax(cfg MetricConfig) metricMongodbatlasSystemCPUNormalizedUsageMax {
m := metricMongodbatlasSystemCPUNormalizedUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.usage.average metric with initial data.
func (m *metricMongodbatlasSystemCPUUsageAverage) init() {
m.data.SetName("mongodbatlas.system.cpu.usage.average")
m.data.SetDescription("System CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUUsageAverage(cfg MetricConfig) metricMongodbatlasSystemCPUUsageAverage {
m := metricMongodbatlasSystemCPUUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemCPUUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.cpu.usage.max metric with initial data.
func (m *metricMongodbatlasSystemCPUUsageMax) init() {
m.data.SetName("mongodbatlas.system.cpu.usage.max")
m.data.SetDescription("System CPU Usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemCPUUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemCPUUsageMax(cfg MetricConfig) metricMongodbatlasSystemCPUUsageMax {
m := metricMongodbatlasSystemCPUUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsCPUNormalizedUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.cpu.normalized.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) init() {
m.data.SetName("mongodbatlas.system.fts.cpu.normalized.usage")
m.data.SetDescription("Full text search disk usage (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsCPUNormalizedUsage(cfg MetricConfig) metricMongodbatlasSystemFtsCPUNormalizedUsage {
m := metricMongodbatlasSystemFtsCPUNormalizedUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsCPUUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.cpu.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsCPUUsage) init() {
m.data.SetName("mongodbatlas.system.fts.cpu.usage")
m.data.SetDescription("Full-text search (%)")
m.data.SetUnit("1")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("cpu_state", cpuStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsCPUUsage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsCPUUsage(cfg MetricConfig) metricMongodbatlasSystemFtsCPUUsage {
m := metricMongodbatlasSystemFtsCPUUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsDiskUsed struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.disk.used metric with initial data.
func (m *metricMongodbatlasSystemFtsDiskUsed) init() {
m.data.SetName("mongodbatlas.system.fts.disk.used")
m.data.SetDescription("Full text search disk usage")
m.data.SetUnit("By")
m.data.SetEmptyGauge()
}
func (m *metricMongodbatlasSystemFtsDiskUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsDiskUsed) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsDiskUsed(cfg MetricConfig) metricMongodbatlasSystemFtsDiskUsed {
m := metricMongodbatlasSystemFtsDiskUsed{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemFtsMemoryUsage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.fts.memory.usage metric with initial data.
func (m *metricMongodbatlasSystemFtsMemoryUsage) init() {
m.data.SetName("mongodbatlas.system.fts.memory.usage")
m.data.SetDescription("Full-text search")
m.data.SetUnit("MiBy")
m.data.SetEmptySum()
m.data.Sum().SetIsMonotonic(true)
m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemFtsMemoryUsage) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemFtsMemoryUsage(cfg MetricConfig) metricMongodbatlasSystemFtsMemoryUsage {
m := metricMongodbatlasSystemFtsMemoryUsage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemMemoryUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.memory.usage.average metric with initial data.
func (m *metricMongodbatlasSystemMemoryUsageAverage) init() {
m.data.SetName("mongodbatlas.system.memory.usage.average")
m.data.SetDescription("System Memory Usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_status", memoryStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemMemoryUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemMemoryUsageAverage(cfg MetricConfig) metricMongodbatlasSystemMemoryUsageAverage {
m := metricMongodbatlasSystemMemoryUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemMemoryUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.memory.usage.max metric with initial data.
func (m *metricMongodbatlasSystemMemoryUsageMax) init() {
m.data.SetName("mongodbatlas.system.memory.usage.max")
m.data.SetDescription("System Memory Usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_status", memoryStatusAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemMemoryUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemMemoryUsageMax(cfg MetricConfig) metricMongodbatlasSystemMemoryUsageMax {
m := metricMongodbatlasSystemMemoryUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemNetworkIoAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.network.io.average metric with initial data.
func (m *metricMongodbatlasSystemNetworkIoAverage) init() {
m.data.SetName("mongodbatlas.system.network.io.average")
m.data.SetDescription("System Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemNetworkIoAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemNetworkIoAverage(cfg MetricConfig) metricMongodbatlasSystemNetworkIoAverage {
m := metricMongodbatlasSystemNetworkIoAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemNetworkIoMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.network.io.max metric with initial data.
func (m *metricMongodbatlasSystemNetworkIoMax) init() {
m.data.SetName("mongodbatlas.system.network.io.max")
m.data.SetDescription("System Network IO")
m.data.SetUnit("By/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemNetworkIoMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemNetworkIoMax(cfg MetricConfig) metricMongodbatlasSystemNetworkIoMax {
m := metricMongodbatlasSystemNetworkIoMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingIoAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.io.average metric with initial data.
func (m *metricMongodbatlasSystemPagingIoAverage) init() {
m.data.SetName("mongodbatlas.system.paging.io.average")
m.data.SetDescription("Swap IO")
m.data.SetUnit("{pages}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingIoAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingIoAverage(cfg MetricConfig) metricMongodbatlasSystemPagingIoAverage {
m := metricMongodbatlasSystemPagingIoAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingIoMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.io.max metric with initial data.
func (m *metricMongodbatlasSystemPagingIoMax) init() {
m.data.SetName("mongodbatlas.system.paging.io.max")
m.data.SetDescription("Swap IO")
m.data.SetUnit("{pages}/s")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("direction", directionAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingIoMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingIoMax(cfg MetricConfig) metricMongodbatlasSystemPagingIoMax {
m := metricMongodbatlasSystemPagingIoMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingUsageAverage struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.usage.average metric with initial data.
func (m *metricMongodbatlasSystemPagingUsageAverage) init() {
m.data.SetName("mongodbatlas.system.paging.usage.average")
m.data.SetDescription("Swap usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingUsageAverage) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingUsageAverage(cfg MetricConfig) metricMongodbatlasSystemPagingUsageAverage {
m := metricMongodbatlasSystemPagingUsageAverage{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
type metricMongodbatlasSystemPagingUsageMax struct {
data pmetric.Metric // data buffer for generated metric.
config MetricConfig // metric config provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills mongodbatlas.system.paging.usage.max metric with initial data.
func (m *metricMongodbatlasSystemPagingUsageMax) init() {
m.data.SetName("mongodbatlas.system.paging.usage.max")
m.data.SetDescription("Swap usage")
m.data.SetUnit("KiBy")
m.data.SetEmptyGauge()
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) {
if !m.config.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleValue(val)
dp.Attributes().PutStr("memory_state", memoryStateAttributeValue)
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricMongodbatlasSystemPagingUsageMax) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pmetric.MetricSlice) {
if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricMongodbatlasSystemPagingUsageMax(cfg MetricConfig) metricMongodbatlasSystemPagingUsageMax {
m := metricMongodbatlasSystemPagingUsageMax{config: cfg}
if cfg.Enabled {
m.data = pmetric.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user config.
type MetricsBuilder struct {
config MetricsBuilderConfig // config of the metrics builder.
startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
metricsCapacity int // maximum observed number of metrics per resource.
metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
buildInfo component.BuildInfo // contains version information.
resourceAttributeIncludeFilter map[string]filter.Filter
resourceAttributeExcludeFilter map[string]filter.Filter
metricMongodbatlasDbCounts metricMongodbatlasDbCounts
metricMongodbatlasDbSize metricMongodbatlasDbSize
metricMongodbatlasDiskPartitionIopsAverage metricMongodbatlasDiskPartitionIopsAverage
metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionIopsMax
metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyAverage
metricMongodbatlasDiskPartitionLatencyMax metricMongodbatlasDiskPartitionLatencyMax
metricMongodbatlasDiskPartitionQueueDepth metricMongodbatlasDiskPartitionQueueDepth
metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceAverage
metricMongodbatlasDiskPartitionSpaceMax metricMongodbatlasDiskPartitionSpaceMax
metricMongodbatlasDiskPartitionThroughput metricMongodbatlasDiskPartitionThroughput
metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageAverage
metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUsageMax
metricMongodbatlasDiskPartitionUtilizationAverage metricMongodbatlasDiskPartitionUtilizationAverage
metricMongodbatlasDiskPartitionUtilizationMax metricMongodbatlasDiskPartitionUtilizationMax
metricMongodbatlasProcessAsserts metricMongodbatlasProcessAsserts
metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessBackgroundFlush
metricMongodbatlasProcessCacheIo metricMongodbatlasProcessCacheIo
metricMongodbatlasProcessCacheRatio metricMongodbatlasProcessCacheRatio
metricMongodbatlasProcessCacheSize metricMongodbatlasProcessCacheSize
metricMongodbatlasProcessConnections metricMongodbatlasProcessConnections
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage
metricMongodbatlasProcessCPUChildrenNormalizedUsageMax metricMongodbatlasProcessCPUChildrenNormalizedUsageMax
metricMongodbatlasProcessCPUChildrenUsageAverage metricMongodbatlasProcessCPUChildrenUsageAverage
metricMongodbatlasProcessCPUChildrenUsageMax metricMongodbatlasProcessCPUChildrenUsageMax
metricMongodbatlasProcessCPUNormalizedUsageAverage metricMongodbatlasProcessCPUNormalizedUsageAverage
metricMongodbatlasProcessCPUNormalizedUsageMax metricMongodbatlasProcessCPUNormalizedUsageMax
metricMongodbatlasProcessCPUUsageAverage metricMongodbatlasProcessCPUUsageAverage
metricMongodbatlasProcessCPUUsageMax metricMongodbatlasProcessCPUUsageMax
metricMongodbatlasProcessCursors metricMongodbatlasProcessCursors
metricMongodbatlasProcessDbDocumentRate metricMongodbatlasProcessDbDocumentRate
metricMongodbatlasProcessDbOperationsRate metricMongodbatlasProcessDbOperationsRate
metricMongodbatlasProcessDbOperationsTime metricMongodbatlasProcessDbOperationsTime
metricMongodbatlasProcessDbQueryExecutorScanned metricMongodbatlasProcessDbQueryExecutorScanned
metricMongodbatlasProcessDbQueryTargetingScannedPerReturned metricMongodbatlasProcessDbQueryTargetingScannedPerReturned
metricMongodbatlasProcessDbStorage metricMongodbatlasProcessDbStorage
metricMongodbatlasProcessGlobalLock metricMongodbatlasProcessGlobalLock
metricMongodbatlasProcessIndexBtreeMissRatio metricMongodbatlasProcessIndexBtreeMissRatio
metricMongodbatlasProcessIndexCounters metricMongodbatlasProcessIndexCounters
metricMongodbatlasProcessJournalingCommits metricMongodbatlasProcessJournalingCommits
metricMongodbatlasProcessJournalingDataFiles metricMongodbatlasProcessJournalingDataFiles
metricMongodbatlasProcessJournalingWritten metricMongodbatlasProcessJournalingWritten
metricMongodbatlasProcessMemoryUsage metricMongodbatlasProcessMemoryUsage
metricMongodbatlasProcessNetworkIo metricMongodbatlasProcessNetworkIo
metricMongodbatlasProcessNetworkRequests metricMongodbatlasProcessNetworkRequests
metricMongodbatlasProcessOplogRate metricMongodbatlasProcessOplogRate
metricMongodbatlasProcessOplogTime metricMongodbatlasProcessOplogTime
metricMongodbatlasProcessPageFaults metricMongodbatlasProcessPageFaults
metricMongodbatlasProcessRestarts metricMongodbatlasProcessRestarts
metricMongodbatlasProcessTickets metricMongodbatlasProcessTickets
metricMongodbatlasSystemCPUNormalizedUsageAverage metricMongodbatlasSystemCPUNormalizedUsageAverage
metricMongodbatlasSystemCPUNormalizedUsageMax metricMongodbatlasSystemCPUNormalizedUsageMax
metricMongodbatlasSystemCPUUsageAverage metricMongodbatlasSystemCPUUsageAverage
metricMongodbatlasSystemCPUUsageMax metricMongodbatlasSystemCPUUsageMax
metricMongodbatlasSystemFtsCPUNormalizedUsage metricMongodbatlasSystemFtsCPUNormalizedUsage
metricMongodbatlasSystemFtsCPUUsage metricMongodbatlasSystemFtsCPUUsage
metricMongodbatlasSystemFtsDiskUsed metricMongodbatlasSystemFtsDiskUsed
metricMongodbatlasSystemFtsMemoryUsage metricMongodbatlasSystemFtsMemoryUsage
metricMongodbatlasSystemMemoryUsageAverage metricMongodbatlasSystemMemoryUsageAverage
metricMongodbatlasSystemMemoryUsageMax metricMongodbatlasSystemMemoryUsageMax
metricMongodbatlasSystemNetworkIoAverage metricMongodbatlasSystemNetworkIoAverage
metricMongodbatlasSystemNetworkIoMax metricMongodbatlasSystemNetworkIoMax
metricMongodbatlasSystemPagingIoAverage metricMongodbatlasSystemPagingIoAverage
metricMongodbatlasSystemPagingIoMax metricMongodbatlasSystemPagingIoMax
metricMongodbatlasSystemPagingUsageAverage metricMongodbatlasSystemPagingUsageAverage
metricMongodbatlasSystemPagingUsageMax metricMongodbatlasSystemPagingUsageMax
}
// MetricBuilderOption applies changes to default metrics builder.
type MetricBuilderOption interface {
apply(*MetricsBuilder)
}
type metricBuilderOptionFunc func(mb *MetricsBuilder)
func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) {
mbof(mb)
}
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
return metricBuilderOptionFunc(func(mb *MetricsBuilder) {
mb.startTime = startTime
})
}
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
config: mbc,
startTime: pcommon.NewTimestampFromTime(time.Now()),
metricsBuffer: pmetric.NewMetrics(),
buildInfo: settings.BuildInfo,
metricMongodbatlasDbCounts: newMetricMongodbatlasDbCounts(mbc.Metrics.MongodbatlasDbCounts),
metricMongodbatlasDbSize: newMetricMongodbatlasDbSize(mbc.Metrics.MongodbatlasDbSize),
metricMongodbatlasDiskPartitionIopsAverage: newMetricMongodbatlasDiskPartitionIopsAverage(mbc.Metrics.MongodbatlasDiskPartitionIopsAverage),
metricMongodbatlasDiskPartitionIopsMax: newMetricMongodbatlasDiskPartitionIopsMax(mbc.Metrics.MongodbatlasDiskPartitionIopsMax),
metricMongodbatlasDiskPartitionLatencyAverage: newMetricMongodbatlasDiskPartitionLatencyAverage(mbc.Metrics.MongodbatlasDiskPartitionLatencyAverage),
metricMongodbatlasDiskPartitionLatencyMax: newMetricMongodbatlasDiskPartitionLatencyMax(mbc.Metrics.MongodbatlasDiskPartitionLatencyMax),
metricMongodbatlasDiskPartitionQueueDepth: newMetricMongodbatlasDiskPartitionQueueDepth(mbc.Metrics.MongodbatlasDiskPartitionQueueDepth),
metricMongodbatlasDiskPartitionSpaceAverage: newMetricMongodbatlasDiskPartitionSpaceAverage(mbc.Metrics.MongodbatlasDiskPartitionSpaceAverage),
metricMongodbatlasDiskPartitionSpaceMax: newMetricMongodbatlasDiskPartitionSpaceMax(mbc.Metrics.MongodbatlasDiskPartitionSpaceMax),
metricMongodbatlasDiskPartitionThroughput: newMetricMongodbatlasDiskPartitionThroughput(mbc.Metrics.MongodbatlasDiskPartitionThroughput),
metricMongodbatlasDiskPartitionUsageAverage: newMetricMongodbatlasDiskPartitionUsageAverage(mbc.Metrics.MongodbatlasDiskPartitionUsageAverage),
metricMongodbatlasDiskPartitionUsageMax: newMetricMongodbatlasDiskPartitionUsageMax(mbc.Metrics.MongodbatlasDiskPartitionUsageMax),
metricMongodbatlasDiskPartitionUtilizationAverage: newMetricMongodbatlasDiskPartitionUtilizationAverage(mbc.Metrics.MongodbatlasDiskPartitionUtilizationAverage),
metricMongodbatlasDiskPartitionUtilizationMax: newMetricMongodbatlasDiskPartitionUtilizationMax(mbc.Metrics.MongodbatlasDiskPartitionUtilizationMax),
metricMongodbatlasProcessAsserts: newMetricMongodbatlasProcessAsserts(mbc.Metrics.MongodbatlasProcessAsserts),
metricMongodbatlasProcessBackgroundFlush: newMetricMongodbatlasProcessBackgroundFlush(mbc.Metrics.MongodbatlasProcessBackgroundFlush),
metricMongodbatlasProcessCacheIo: newMetricMongodbatlasProcessCacheIo(mbc.Metrics.MongodbatlasProcessCacheIo),
metricMongodbatlasProcessCacheRatio: newMetricMongodbatlasProcessCacheRatio(mbc.Metrics.MongodbatlasProcessCacheRatio),
metricMongodbatlasProcessCacheSize: newMetricMongodbatlasProcessCacheSize(mbc.Metrics.MongodbatlasProcessCacheSize),
metricMongodbatlasProcessConnections: newMetricMongodbatlasProcessConnections(mbc.Metrics.MongodbatlasProcessConnections),
metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage),
metricMongodbatlasProcessCPUChildrenNormalizedUsageMax: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageMax),
metricMongodbatlasProcessCPUChildrenUsageAverage: newMetricMongodbatlasProcessCPUChildrenUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenUsageAverage),
metricMongodbatlasProcessCPUChildrenUsageMax: newMetricMongodbatlasProcessCPUChildrenUsageMax(mbc.Metrics.MongodbatlasProcessCPUChildrenUsageMax),
metricMongodbatlasProcessCPUNormalizedUsageAverage: newMetricMongodbatlasProcessCPUNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUNormalizedUsageAverage),
metricMongodbatlasProcessCPUNormalizedUsageMax: newMetricMongodbatlasProcessCPUNormalizedUsageMax(mbc.Metrics.MongodbatlasProcessCPUNormalizedUsageMax),
metricMongodbatlasProcessCPUUsageAverage: newMetricMongodbatlasProcessCPUUsageAverage(mbc.Metrics.MongodbatlasProcessCPUUsageAverage),
metricMongodbatlasProcessCPUUsageMax: newMetricMongodbatlasProcessCPUUsageMax(mbc.Metrics.MongodbatlasProcessCPUUsageMax),
metricMongodbatlasProcessCursors: newMetricMongodbatlasProcessCursors(mbc.Metrics.MongodbatlasProcessCursors),
metricMongodbatlasProcessDbDocumentRate: newMetricMongodbatlasProcessDbDocumentRate(mbc.Metrics.MongodbatlasProcessDbDocumentRate),
metricMongodbatlasProcessDbOperationsRate: newMetricMongodbatlasProcessDbOperationsRate(mbc.Metrics.MongodbatlasProcessDbOperationsRate),
metricMongodbatlasProcessDbOperationsTime: newMetricMongodbatlasProcessDbOperationsTime(mbc.Metrics.MongodbatlasProcessDbOperationsTime),
metricMongodbatlasProcessDbQueryExecutorScanned: newMetricMongodbatlasProcessDbQueryExecutorScanned(mbc.Metrics.MongodbatlasProcessDbQueryExecutorScanned),
metricMongodbatlasProcessDbQueryTargetingScannedPerReturned: newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(mbc.Metrics.MongodbatlasProcessDbQueryTargetingScannedPerReturned),
metricMongodbatlasProcessDbStorage: newMetricMongodbatlasProcessDbStorage(mbc.Metrics.MongodbatlasProcessDbStorage),
metricMongodbatlasProcessGlobalLock: newMetricMongodbatlasProcessGlobalLock(mbc.Metrics.MongodbatlasProcessGlobalLock),
metricMongodbatlasProcessIndexBtreeMissRatio: newMetricMongodbatlasProcessIndexBtreeMissRatio(mbc.Metrics.MongodbatlasProcessIndexBtreeMissRatio),
metricMongodbatlasProcessIndexCounters: newMetricMongodbatlasProcessIndexCounters(mbc.Metrics.MongodbatlasProcessIndexCounters),
metricMongodbatlasProcessJournalingCommits: newMetricMongodbatlasProcessJournalingCommits(mbc.Metrics.MongodbatlasProcessJournalingCommits),
metricMongodbatlasProcessJournalingDataFiles: newMetricMongodbatlasProcessJournalingDataFiles(mbc.Metrics.MongodbatlasProcessJournalingDataFiles),
metricMongodbatlasProcessJournalingWritten: newMetricMongodbatlasProcessJournalingWritten(mbc.Metrics.MongodbatlasProcessJournalingWritten),
metricMongodbatlasProcessMemoryUsage: newMetricMongodbatlasProcessMemoryUsage(mbc.Metrics.MongodbatlasProcessMemoryUsage),
metricMongodbatlasProcessNetworkIo: newMetricMongodbatlasProcessNetworkIo(mbc.Metrics.MongodbatlasProcessNetworkIo),
metricMongodbatlasProcessNetworkRequests: newMetricMongodbatlasProcessNetworkRequests(mbc.Metrics.MongodbatlasProcessNetworkRequests),
metricMongodbatlasProcessOplogRate: newMetricMongodbatlasProcessOplogRate(mbc.Metrics.MongodbatlasProcessOplogRate),
metricMongodbatlasProcessOplogTime: newMetricMongodbatlasProcessOplogTime(mbc.Metrics.MongodbatlasProcessOplogTime),
metricMongodbatlasProcessPageFaults: newMetricMongodbatlasProcessPageFaults(mbc.Metrics.MongodbatlasProcessPageFaults),
metricMongodbatlasProcessRestarts: newMetricMongodbatlasProcessRestarts(mbc.Metrics.MongodbatlasProcessRestarts),
metricMongodbatlasProcessTickets: newMetricMongodbatlasProcessTickets(mbc.Metrics.MongodbatlasProcessTickets),
metricMongodbatlasSystemCPUNormalizedUsageAverage: newMetricMongodbatlasSystemCPUNormalizedUsageAverage(mbc.Metrics.MongodbatlasSystemCPUNormalizedUsageAverage),
metricMongodbatlasSystemCPUNormalizedUsageMax: newMetricMongodbatlasSystemCPUNormalizedUsageMax(mbc.Metrics.MongodbatlasSystemCPUNormalizedUsageMax),
metricMongodbatlasSystemCPUUsageAverage: newMetricMongodbatlasSystemCPUUsageAverage(mbc.Metrics.MongodbatlasSystemCPUUsageAverage),
metricMongodbatlasSystemCPUUsageMax: newMetricMongodbatlasSystemCPUUsageMax(mbc.Metrics.MongodbatlasSystemCPUUsageMax),
metricMongodbatlasSystemFtsCPUNormalizedUsage: newMetricMongodbatlasSystemFtsCPUNormalizedUsage(mbc.Metrics.MongodbatlasSystemFtsCPUNormalizedUsage),
metricMongodbatlasSystemFtsCPUUsage: newMetricMongodbatlasSystemFtsCPUUsage(mbc.Metrics.MongodbatlasSystemFtsCPUUsage),
metricMongodbatlasSystemFtsDiskUsed: newMetricMongodbatlasSystemFtsDiskUsed(mbc.Metrics.MongodbatlasSystemFtsDiskUsed),
metricMongodbatlasSystemFtsMemoryUsage: newMetricMongodbatlasSystemFtsMemoryUsage(mbc.Metrics.MongodbatlasSystemFtsMemoryUsage),
metricMongodbatlasSystemMemoryUsageAverage: newMetricMongodbatlasSystemMemoryUsageAverage(mbc.Metrics.MongodbatlasSystemMemoryUsageAverage),
metricMongodbatlasSystemMemoryUsageMax: newMetricMongodbatlasSystemMemoryUsageMax(mbc.Metrics.MongodbatlasSystemMemoryUsageMax),
metricMongodbatlasSystemNetworkIoAverage: newMetricMongodbatlasSystemNetworkIoAverage(mbc.Metrics.MongodbatlasSystemNetworkIoAverage),
metricMongodbatlasSystemNetworkIoMax: newMetricMongodbatlasSystemNetworkIoMax(mbc.Metrics.MongodbatlasSystemNetworkIoMax),
metricMongodbatlasSystemPagingIoAverage: newMetricMongodbatlasSystemPagingIoAverage(mbc.Metrics.MongodbatlasSystemPagingIoAverage),
metricMongodbatlasSystemPagingIoMax: newMetricMongodbatlasSystemPagingIoMax(mbc.Metrics.MongodbatlasSystemPagingIoMax),
metricMongodbatlasSystemPagingUsageAverage: newMetricMongodbatlasSystemPagingUsageAverage(mbc.Metrics.MongodbatlasSystemPagingUsageAverage),
metricMongodbatlasSystemPagingUsageMax: newMetricMongodbatlasSystemPagingUsageMax(mbc.Metrics.MongodbatlasSystemPagingUsageMax),
resourceAttributeIncludeFilter: make(map[string]filter.Filter),
resourceAttributeExcludeFilter: make(map[string]filter.Filter),
}
if mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.cluster.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasClusterName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasDbName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.db.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDbName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasDbName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.db.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDbName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.disk.partition"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.disk.partition"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasDiskPartition.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasHostName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.host.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasHostName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasHostName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.host.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasHostName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.org_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.org_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasOrgName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessID.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.port"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.port"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessPort.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.process.type_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.process.type_name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProcessTypeName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.project.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.project.id"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectID.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.project.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.project.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProjectName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.provider.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.provider.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasProviderName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.region.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.region.name"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasRegionName.MetricsExclude)
}
if mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsInclude != nil {
mb.resourceAttributeIncludeFilter["mongodb_atlas.user.alias"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsInclude)
}
if mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsExclude != nil {
mb.resourceAttributeExcludeFilter["mongodb_atlas.user.alias"] = filter.CreateFilter(mbc.ResourceAttributes.MongodbAtlasUserAlias.MetricsExclude)
}
for _, op := range options {
op.apply(mb)
}
return mb
}
// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics.
func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder {
return NewResourceBuilder(mb.config.ResourceAttributes)
}
// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
}
}
// ResourceMetricsOption applies changes to provided resource metrics.
type ResourceMetricsOption interface {
apply(pmetric.ResourceMetrics)
}
type resourceMetricsOptionFunc func(pmetric.ResourceMetrics)
func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) {
rmof(rm)
}
// WithResource sets the provided resource on the emitted ResourceMetrics.
// It's recommended to use ResourceBuilder to create the resource.
func WithResource(res pcommon.Resource) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
res.CopyTo(rm.Resource())
})
}
// WithStartTimeOverride overrides start time for all the resource metrics data points.
// This option should be only used if different start time has to be set on metrics coming from different resources.
func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption {
return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) {
var dps pmetric.NumberDataPointSlice
metrics := rm.ScopeMetrics().At(0).Metrics()
for i := 0; i < metrics.Len(); i++ {
switch metrics.At(i).Type() {
case pmetric.MetricTypeGauge:
dps = metrics.At(i).Gauge().DataPoints()
case pmetric.MetricTypeSum:
dps = metrics.At(i).Sum().DataPoints()
}
for j := 0; j < dps.Len(); j++ {
dps.At(j).SetStartTimestamp(start)
}
}
})
}
// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
// recording another set of data points as part of another resource. This function can be helpful when one scraper
// needs to emit metrics from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceMetricsOption arguments.
func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) {
rm := pmetric.NewResourceMetrics()
ils := rm.ScopeMetrics().AppendEmpty()
ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver")
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricMongodbatlasDbCounts.emit(ils.Metrics())
mb.metricMongodbatlasDbSize.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionIopsAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionQueueDepth.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionThroughput.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics())
mb.metricMongodbatlasDiskPartitionUtilizationMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics())
mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheRatio.emit(ils.Metrics())
mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics())
mb.metricMongodbatlasProcessConnections.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUChildrenUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasProcessCPUUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasProcessCursors.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbDocumentRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbOperationsRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbOperationsTime.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbQueryExecutorScanned.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.emit(ils.Metrics())
mb.metricMongodbatlasProcessDbStorage.emit(ils.Metrics())
mb.metricMongodbatlasProcessGlobalLock.emit(ils.Metrics())
mb.metricMongodbatlasProcessIndexBtreeMissRatio.emit(ils.Metrics())
mb.metricMongodbatlasProcessIndexCounters.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingCommits.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingDataFiles.emit(ils.Metrics())
mb.metricMongodbatlasProcessJournalingWritten.emit(ils.Metrics())
mb.metricMongodbatlasProcessMemoryUsage.emit(ils.Metrics())
mb.metricMongodbatlasProcessNetworkIo.emit(ils.Metrics())
mb.metricMongodbatlasProcessNetworkRequests.emit(ils.Metrics())
mb.metricMongodbatlasProcessOplogRate.emit(ils.Metrics())
mb.metricMongodbatlasProcessOplogTime.emit(ils.Metrics())
mb.metricMongodbatlasProcessPageFaults.emit(ils.Metrics())
mb.metricMongodbatlasProcessRestarts.emit(ils.Metrics())
mb.metricMongodbatlasProcessTickets.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUNormalizedUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemCPUUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsCPUUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsDiskUsed.emit(ils.Metrics())
mb.metricMongodbatlasSystemFtsMemoryUsage.emit(ils.Metrics())
mb.metricMongodbatlasSystemMemoryUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemMemoryUsageMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemNetworkIoAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemNetworkIoMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingIoAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingIoMax.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingUsageAverage.emit(ils.Metrics())
mb.metricMongodbatlasSystemPagingUsageMax.emit(ils.Metrics())
for _, op := range options {
op.apply(rm)
}
for attr, filter := range mb.resourceAttributeIncludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) {
return
}
}
for attr, filter := range mb.resourceAttributeExcludeFilter {
if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) {
return
}
}
if ils.Metrics().Len() > 0 {
mb.updateCapacity(rm)
rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
}
}
// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
// recording another set of metrics. This function will be responsible for applying all the transformations required to
// produce metric representation defined in metadata and user config, e.g. delta or cumulative.
func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics {
mb.EmitForResource(options...)
metrics := mb.metricsBuffer
mb.metricsBuffer = pmetric.NewMetrics()
return metrics
}
// RecordMongodbatlasDbCountsDataPoint adds a data point to mongodbatlas.db.counts metric.
func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDbSizeDataPoint adds a data point to mongodbatlas.db.size metric.
func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue AttributeObjectType) {
mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsAverageDataPoint adds a data point to mongodbatlas.disk.partition.iops.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionIopsMaxDataPoint adds a data point to mongodbatlas.disk.partition.iops.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyAverageDataPoint adds a data point to mongodbatlas.disk.partition.latency.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionLatencyMaxDataPoint adds a data point to mongodbatlas.disk.partition.latency.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionQueueDepthDataPoint adds a data point to mongodbatlas.disk.partition.queue.depth metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionQueueDepth.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionSpaceMaxDataPoint adds a data point to mongodbatlas.disk.partition.space.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionThroughputDataPoint adds a data point to mongodbatlas.disk.partition.throughput metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionThroughputDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) {
mb.metricMongodbatlasDiskPartitionThroughput.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUsageMaxDataPoint adds a data point to mongodbatlas.disk.partition.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) {
mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String())
}
// RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint adds a data point to mongodbatlas.disk.partition.utilization.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint adds a data point to mongodbatlas.disk.partition.utilization.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessAssertsDataPoint adds a data point to mongodbatlas.process.asserts metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pcommon.Timestamp, val float64, assertTypeAttributeValue AttributeAssertType) {
mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue.String())
}
// RecordMongodbatlasProcessBackgroundFlushDataPoint adds a data point to mongodbatlas.process.background_flush metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessBackgroundFlush.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessCacheIoDataPoint adds a data point to mongodbatlas.process.cache.io metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue AttributeCacheDirection) {
mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String())
}
// RecordMongodbatlasProcessCacheRatioDataPoint adds a data point to mongodbatlas.process.cache.ratio metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheRatioDataPoint(ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue AttributeCacheRatioType) {
mb.metricMongodbatlasProcessCacheRatio.recordDataPoint(mb.startTime, ts, val, cacheRatioTypeAttributeValue.String())
}
// RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) {
mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String())
}
// RecordMongodbatlasProcessConnectionsDataPoint adds a data point to mongodbatlas.process.connections metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessConnections.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCPUUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasProcessCursorsDataPoint adds a data point to mongodbatlas.process.cursors metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pcommon.Timestamp, val float64, cursorStateAttributeValue AttributeCursorState) {
mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue.String())
}
// RecordMongodbatlasProcessDbDocumentRateDataPoint adds a data point to mongodbatlas.process.db.document.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pcommon.Timestamp, val float64, documentStatusAttributeValue AttributeDocumentStatus) {
mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsRateDataPoint adds a data point to mongodbatlas.process.db.operations.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue AttributeOperation, clusterRoleAttributeValue AttributeClusterRole) {
mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String(), clusterRoleAttributeValue.String())
}
// RecordMongodbatlasProcessDbOperationsTimeDataPoint adds a data point to mongodbatlas.process.db.operations.time metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pcommon.Timestamp, val float64, executionTypeAttributeValue AttributeExecutionType) {
mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint adds a data point to mongodbatlas.process.db.query_executor.scanned metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint adds a data point to mongodbatlas.process.db.query_targeting.scanned_per_returned metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue AttributeScannedType) {
mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue.String())
}
// RecordMongodbatlasProcessDbStorageDataPoint adds a data point to mongodbatlas.process.db.storage metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pcommon.Timestamp, val float64, storageStatusAttributeValue AttributeStorageStatus) {
mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue.String())
}
// RecordMongodbatlasProcessGlobalLockDataPoint adds a data point to mongodbatlas.process.global_lock metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pcommon.Timestamp, val float64, globalLockStateAttributeValue AttributeGlobalLockState) {
mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue.String())
}
// RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint adds a data point to mongodbatlas.process.index.btree_miss_ratio metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessIndexBtreeMissRatio.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessIndexCountersDataPoint adds a data point to mongodbatlas.process.index.counters metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue AttributeBtreeCounterType) {
mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue.String())
}
// RecordMongodbatlasProcessJournalingCommitsDataPoint adds a data point to mongodbatlas.process.journaling.commits metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingCommitsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingCommits.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessJournalingDataFilesDataPoint adds a data point to mongodbatlas.process.journaling.data_files metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingDataFiles.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessJournalingWrittenDataPoint adds a data point to mongodbatlas.process.journaling.written metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessJournalingWritten.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessMemoryUsageDataPoint adds a data point to mongodbatlas.process.memory.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkIoDataPoint adds a data point to mongodbatlas.process.network.io metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasProcessNetworkRequestsDataPoint adds a data point to mongodbatlas.process.network.requests metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkRequestsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessNetworkRequests.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessOplogRateDataPoint adds a data point to mongodbatlas.process.oplog.rate metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessOplogRate.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessOplogTimeDataPoint adds a data point to mongodbatlas.process.oplog.time metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pcommon.Timestamp, val float64, oplogTypeAttributeValue AttributeOplogType) {
mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue.String())
}
// RecordMongodbatlasProcessPageFaultsDataPoint adds a data point to mongodbatlas.process.page_faults metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue AttributeMemoryIssueType) {
mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue.String())
}
// RecordMongodbatlasProcessRestartsDataPoint adds a data point to mongodbatlas.process.restarts metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasProcessRestarts.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasProcessTicketsDataPoint adds a data point to mongodbatlas.process.tickets metric.
func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pcommon.Timestamp, val float64, ticketTypeAttributeValue AttributeTicketType) {
mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemCPUUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.normalized.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsCPUUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue AttributeCPUState) {
mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue.String())
}
// RecordMongodbatlasSystemFtsDiskUsedDataPoint adds a data point to mongodbatlas.system.fts.disk.used metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pcommon.Timestamp, val float64) {
mb.metricMongodbatlasSystemFtsDiskUsed.recordDataPoint(mb.startTime, ts, val)
}
// RecordMongodbatlasSystemFtsMemoryUsageDataPoint adds a data point to mongodbatlas.system.fts.memory.usage metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageAverageDataPoint adds a data point to mongodbatlas.system.memory.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemMemoryUsageMaxDataPoint adds a data point to mongodbatlas.system.memory.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue AttributeMemoryStatus) {
mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoAverageDataPoint adds a data point to mongodbatlas.system.network.io.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemNetworkIoMaxDataPoint adds a data point to mongodbatlas.system.network.io.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoAverageDataPoint adds a data point to mongodbatlas.system.paging.io.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingIoMaxDataPoint adds a data point to mongodbatlas.system.paging.io.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue AttributeDirection) {
mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageAverageDataPoint adds a data point to mongodbatlas.system.paging.usage.average metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// RecordMongodbatlasSystemPagingUsageMaxDataPoint adds a data point to mongodbatlas.system.paging.usage.max metric.
func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue AttributeMemoryState) {
mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue.String())
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) {
mb.startTime = pcommon.NewTimestampFromTime(time.Now())
for _, op := range options {
op.apply(mb)
}
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml.
// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines.
type ResourceBuilder struct {
config ResourceAttributesConfig
res pcommon.Resource
}
// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application.
func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder {
return &ResourceBuilder{
config: rac,
res: pcommon.NewResource(),
}
}
// SetMongodbAtlasClusterName sets provided value as "mongodb_atlas.cluster.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasClusterName(val string) {
if rb.config.MongodbAtlasClusterName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.cluster.name", val)
}
}
// SetMongodbAtlasDbName sets provided value as "mongodb_atlas.db.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasDbName(val string) {
if rb.config.MongodbAtlasDbName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.db.name", val)
}
}
// SetMongodbAtlasDiskPartition sets provided value as "mongodb_atlas.disk.partition" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasDiskPartition(val string) {
if rb.config.MongodbAtlasDiskPartition.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.disk.partition", val)
}
}
// SetMongodbAtlasHostName sets provided value as "mongodb_atlas.host.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasHostName(val string) {
if rb.config.MongodbAtlasHostName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.host.name", val)
}
}
// SetMongodbAtlasOrgName sets provided value as "mongodb_atlas.org_name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasOrgName(val string) {
if rb.config.MongodbAtlasOrgName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.org_name", val)
}
}
// SetMongodbAtlasProcessID sets provided value as "mongodb_atlas.process.id" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessID(val string) {
if rb.config.MongodbAtlasProcessID.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.id", val)
}
}
// SetMongodbAtlasProcessPort sets provided value as "mongodb_atlas.process.port" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessPort(val string) {
if rb.config.MongodbAtlasProcessPort.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.port", val)
}
}
// SetMongodbAtlasProcessTypeName sets provided value as "mongodb_atlas.process.type_name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProcessTypeName(val string) {
if rb.config.MongodbAtlasProcessTypeName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.process.type_name", val)
}
}
// SetMongodbAtlasProjectID sets provided value as "mongodb_atlas.project.id" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProjectID(val string) {
if rb.config.MongodbAtlasProjectID.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.project.id", val)
}
}
// SetMongodbAtlasProjectName sets provided value as "mongodb_atlas.project.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProjectName(val string) {
if rb.config.MongodbAtlasProjectName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.project.name", val)
}
}
// SetMongodbAtlasProviderName sets provided value as "mongodb_atlas.provider.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasProviderName(val string) {
if rb.config.MongodbAtlasProviderName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.provider.name", val)
}
}
// SetMongodbAtlasRegionName sets provided value as "mongodb_atlas.region.name" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasRegionName(val string) {
if rb.config.MongodbAtlasRegionName.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.region.name", val)
}
}
// SetMongodbAtlasUserAlias sets provided value as "mongodb_atlas.user.alias" attribute.
func (rb *ResourceBuilder) SetMongodbAtlasUserAlias(val string) {
if rb.config.MongodbAtlasUserAlias.Enabled {
rb.res.Attributes().PutStr("mongodb_atlas.user.alias", val)
}
}
// Emit returns the built resource and resets the internal builder state.
func (rb *ResourceBuilder) Emit() pcommon.Resource {
r := rb.res
rb.res = pcommon.NewResource()
return r
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
import (
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// metricRecordFunc records the data point to the metric builder at the supplied timestamp
type metricRecordFunc func(*MetricsBuilder, *mongodbatlas.DataPoints, pcommon.Timestamp)
// getRecordFunc returns the metricRecordFunc that matches the metric name. Nil if none is found.
func getRecordFunc(metricName string) metricRecordFunc {
switch metricName {
// MongoDB CPU usage. For hosts with more than one CPU core, these values can exceed 100%.
case "PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// MongoDB CPU usage scaled to a range of 0% to 100%. Atlas computes this value by dividing by the number of CPU cores.
case "PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
// Context: Process
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Rate of asserts for a MongoDB process found in the asserts document that the serverStatus command generates.
case "ASSERT_REGULAR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeRegular)
}
case "ASSERT_WARNING":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeWarning)
}
case "ASSERT_MSG":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeMsg)
}
case "ASSERT_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertTypeUser)
}
// Amount of data flushed in the background.
case "BACKGROUND_FLUSH_AVG":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessBackgroundFlushDataPoint(ts, float64(*dp.Value))
}
// Amount of bytes in the WiredTiger storage engine cache and tickets found in the wiredTiger.cache and wiredTiger.concurrentTransactions documents that the serverStatus command generates.
case "CACHE_BYTES_READ_INTO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionReadInto)
}
case "CACHE_BYTES_WRITTEN_FROM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirectionWrittenFrom)
}
case "CACHE_DIRTY_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusDirty)
}
case "CACHE_USED_BYTES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed)
}
case "CACHE_FILL_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeCacheFill)
}
case "DIRTY_FILL_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeDirtyFill)
}
case "TICKETS_AVAILABLE_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads)
}
case "TICKETS_AVAILABLE_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableWrites)
}
// Number of connections to a MongoDB process found in the connections document that the serverStatus command generates.
case "CONNECTIONS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessConnectionsDataPoint(ts, float64(*dp.Value))
}
// Number of cursors for a MongoDB process found in the metrics.cursor document that the serverStatus command generates.
case "CURSORS_TOTAL_OPEN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateOpen)
}
case "CURSORS_TOTAL_TIMED_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorStateTimedOut)
}
// Numbers of Memory Issues and Page Faults for a MongoDB process.
case "EXTRA_INFO_PAGE_FAULTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExtraInfo)
}
case "GLOBAL_ACCESSES_NOT_IN_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeGlobalAccessesNotInMemory)
}
case "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueTypeExceptionsThrown)
}
// Number of operations waiting on locks for the MongoDB process that the serverStatus command generates. Cloud Manager computes these values based on the type of storage engine.
case "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueTotal)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_READERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueReaders)
}
case "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockStateCurrentQueueWriters)
}
// Number of index btree operations.
case "INDEX_COUNTERS_BTREE_ACCESSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeAccesses)
}
case "INDEX_COUNTERS_BTREE_HITS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeHits)
}
case "INDEX_COUNTERS_BTREE_MISSES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterTypeMisses)
}
case "INDEX_COUNTERS_BTREE_MISS_RATIO":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts, float64(*dp.Value))
}
// Number of journaling operations.
case "JOURNALING_COMMITS_IN_WRITE_LOCK":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingCommitsDataPoint(ts, float64(*dp.Value))
}
case "JOURNALING_MB":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingWrittenDataPoint(ts, float64(*dp.Value))
}
case "JOURNALING_WRITE_DATA_FILES_MB":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts, float64(*dp.Value))
}
// Amount of memory for a MongoDB process found in the mem document that the serverStatus command collects.
case "MEMORY_RESIDENT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "MEMORY_VIRTUAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
case "COMPUTED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateComputed)
}
// Amount of throughput for MongoDB process found in the network document that the serverStatus command collects.
case "NETWORK_BYTES_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "NETWORK_BYTES_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "NETWORK_NUM_REQUESTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessNetworkRequestsDataPoint(ts, float64(*dp.Value))
}
// Durations and throughput of the MongoDB process' oplog.
case "OPLOG_SLAVE_LAG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeSlaveLagMasterTime)
}
case "OPLOG_MASTER_TIME":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterTime)
}
case "OPLOG_MASTER_LAG_TIME_DIFF":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogTypeMasterLagTimeDiff)
}
case "OPLOG_RATE_GB_PER_HOUR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessOplogRateDataPoint(ts, float64(*dp.Value))
}
// Number of database operations on a MongoDB process since the process last started.
case "DB_STORAGE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusTotal)
}
case "DB_DATA_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSize)
}
case "DB_INDEX_SIZE_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusIndexSize)
}
case "DB_DATA_SIZE_TOTAL_WO_SYSTEM":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatusDataSizeWoSystem)
}
// Rate of database operations on a MongoDB process since the process last started found in the opcounters document that the serverStatus command collects.
case "OPCOUNTER_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRolePrimary)
}
case "OPCOUNTER_QUERY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationQuery, AttributeClusterRolePrimary)
}
case "OPCOUNTER_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRolePrimary)
}
case "OPCOUNTER_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRolePrimary)
}
case "OPCOUNTER_GETMORE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationGetmore, AttributeClusterRolePrimary)
}
case "OPCOUNTER_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary)
}
case "OPCOUNTER_TTL_DELETED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationTTLDeleted, AttributeClusterRolePrimary)
}
// Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects.
case "OPCOUNTER_REPL_CMD":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationCmd, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_UPDATE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationUpdate, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_DELETE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationDelete, AttributeClusterRoleReplica)
}
case "OPCOUNTER_REPL_INSERT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRoleReplica)
}
// Average rate of documents returned, inserted, updated, or deleted per second during a selected time period.
case "DOCUMENT_METRICS_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusReturned)
}
case "DOCUMENT_METRICS_INSERTED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusInserted)
}
case "DOCUMENT_METRICS_UPDATED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusUpdated)
}
case "DOCUMENT_METRICS_DELETED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatusDeleted)
}
// Average rate for operations per second during a selected time period that perform a sort but cannot perform the sort using an index.
case "OPERATIONS_SCAN_AND_ORDER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationScanAndOrder, AttributeClusterRolePrimary)
}
// Average execution time in milliseconds per read, write, or command operation during a selected time period.
case "OP_EXECUTION_TIME_READS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeReads)
}
case "OP_EXECUTION_TIME_WRITES":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeWrites)
}
case "OP_EXECUTION_TIME_COMMANDS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionTypeCommands)
}
// Number of times the host restarted within the previous hour.
case "RESTARTS_IN_LAST_HOUR":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessRestartsDataPoint(ts, float64(*dp.Value))
}
// Average rate per second to scan index items during queries and query-plan evaluations found in the value of totalKeysExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Average rate of documents scanned per second during queries and query-plan evaluations found in the value of totalDocsExamined from the explain command.
case "QUERY_EXECUTOR_SCANNED_OBJECTS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// Ratio of the number of index items scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeIndexItems)
}
// Ratio of the number of documents scanned to the number of documents returned.
case "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedTypeObjects)
}
// CPU usage of processes on the host. For hosts with more than one CPU core, this value can exceed 100%.
case "SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "MAX_SYSTEM_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// CPU usage of processes on the host scaled to a range of 0 to 100% by dividing by the number of CPU cores.
case "SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "MAX_SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "MAX_SYSTEM_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "SYSTEM_NORMALIZED_CPU_NICE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateNice)
}
case "SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIowait)
}
case "SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_IRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateIrq)
}
case "SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSoftirq)
}
case "SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "MAX_SYSTEM_NORMALIZED_CPU_GUEST":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateGuest)
}
case "SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
case "MAX_SYSTEM_NORMALIZED_CPU_STEAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUStateSteal)
}
// Physical memory usage, in bytes, that the host uses.
case "SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "MAX_SYSTEM_MEMORY_AVAILABLE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusAvailable)
}
case "SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "MAX_SYSTEM_MEMORY_BUFFERS":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusBuffers)
}
case "SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "MAX_SYSTEM_MEMORY_CACHED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusCached)
}
case "SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "MAX_SYSTEM_MEMORY_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusFree)
}
case "SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "MAX_SYSTEM_MEMORY_SHARED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusShared)
}
case "SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
case "MAX_SYSTEM_MEMORY_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatusUsed)
}
// Average rate of physical bytes per second that the eth0 network interface received and transmitted.
case "SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SYSTEM_NETWORK_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SYSTEM_NETWORK_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Total amount of memory that swap uses.
case "SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "MAX_SWAP_USAGE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateUsed)
}
case "SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
case "MAX_SWAP_USAGE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStateFree)
}
// Total amount of memory written and read from swap.
case "SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "MAX_SWAP_IO_IN":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionReceive)
}
case "SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
case "MAX_SWAP_IO_OUT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirectionTransmit)
}
// Memory usage, in bytes, that Atlas Search processes use.
case "FTS_PROCESS_RESIDENT_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateResident)
}
case "FTS_PROCESS_VIRTUAL_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateVirtual)
}
case "FTS_PROCESS_SHARED_MEMORY":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateShared)
}
case "FTS_MEMORY_MAPPED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryStateMapped)
}
// Disk space, in bytes, that Atlas Search indexes use.
// FTS_DISK_UTILIZATION is the documented field name, but FTS_DISK_USAGE is what is returned from the API.
// Including both so if the API changes to match the documentation this metric is still collected.
case "FTS_DISK_USAGE", "FTS_DISK_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts, float64(*dp.Value))
}
// Percentage of CPU that Atlas Search processes use.
case "FTS_PROCESS_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
case "FTS_PROCESS_NORMALIZED_CPU_USER":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateUser)
}
case "FTS_PROCESS_NORMALIZED_CPU_KERNEL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUStateKernel)
}
// Process Disk Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
// Measures throughput of I/O operations for the disk partition used for MongoDB.
case "DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_IOPS_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_IOPS_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
case "MAX_DISK_PARTITION_IOPS_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
// Measures throughput of data read and written to the disk partition (not cache) used by MongoDB.
case "DISK_PARTITION_THROUGHPUT_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_THROUGHPUT_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
// This is a calculated metric that is the sum of the read and write throughput.
case "DISK_PARTITION_THROUGHPUT_TOTAL":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal)
}
// Measures the queue depth of the disk partition used by MongoDB.
case "DISK_QUEUE_DEPTH":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, float64(*dp.Value))
}
// Measures latency per operation type of the disk partition used by MongoDB.
case "DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "MAX_DISK_PARTITION_LATENCY_READ":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead)
}
case "DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
case "MAX_DISK_PARTITION_LATENCY_WRITE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite)
}
// The percentage of time during which requests are being issued to and serviced by the partition.
// This includes requests from any process, not just MongoDB processes.
case "DISK_PARTITION_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value))
}
case "MAX_DISK_PARTITION_UTILIZATION":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value))
}
// Measures the free disk space and used disk space on the disk partition used by MongoDB.
case "DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_FREE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusFree)
}
case "DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
case "MAX_DISK_PARTITION_SPACE_PERCENT_USED":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatusUsed)
}
// Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/)
case "DATABASE_COLLECTION_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeCollection)
}
case "DATABASE_INDEX_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_EXTENT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeExtent)
}
case "DATABASE_OBJECT_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_VIEW_COUNT":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectTypeView)
}
case "DATABASE_AVERAGE_OBJECT_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeObject)
}
case "DATABASE_STORAGE_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeStorage)
}
case "DATABASE_INDEX_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeIndex)
}
case "DATABASE_DATA_SIZE":
return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) {
mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectTypeData)
}
default:
return nil
}
}
func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements) error {
recordFunc := getRecordFunc(meas.Name)
if recordFunc == nil {
return nil
}
return addDataPoint(mb, meas, recordFunc)
}
func addDataPoint(mb *MetricsBuilder, meas *mongodbatlas.Measurements, recordFunc metricRecordFunc) error {
for _, point := range meas.DataPoints {
if point.Value != nil {
curTime, err := time.Parse(time.RFC3339, point.Timestamp)
if err != nil {
return err
}
recordFunc(mb, point, pcommon.NewTimestampFromTime(curTime))
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
import (
"fmt"
"go.mongodb.org/atlas/mongodbatlas"
"go.uber.org/multierr"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
func processMeasurements(
mb *metadata.MetricsBuilder,
measurements []*mongodbatlas.Measurements,
) error {
var errs error
for _, meas := range measurements {
err := metadata.MeasurementsToMetric(mb, meas)
if err != nil {
errs = multierr.Append(errs, err)
}
}
err := calculateTotalMetrics(mb, measurements)
if err != nil {
errs = multierr.Append(errs, err)
}
if errs != nil {
return fmt.Errorf("errors occurred while processing measurements: %w", errs)
}
return nil
}
func calculateTotalMetrics(
mb *metadata.MetricsBuilder,
measurements []*mongodbatlas.Measurements,
) error {
var err error
dptTotalMeasCombined := false
var dptTotalMeas *mongodbatlas.Measurements
for _, meas := range measurements {
switch meas.Name {
case "DISK_PARTITION_THROUGHPUT_READ", "DISK_PARTITION_THROUGHPUT_WRITE":
if dptTotalMeas == nil {
dptTotalMeas = cloneMeasurement(meas)
dptTotalMeas.Name = "DISK_PARTITION_THROUGHPUT_TOTAL"
continue
}
// Combine data point values with matching timestamps
for j, totalMeas := range dptTotalMeas.DataPoints {
if totalMeas.Timestamp != meas.DataPoints[j].Timestamp ||
(totalMeas.Value == nil && meas.DataPoints[j].Value == nil) {
continue
}
if totalMeas.Value == nil {
totalMeas.Value = new(float32)
}
addValue := *meas.DataPoints[j].Value
if meas.DataPoints[j].Value == nil {
addValue = 0
}
*totalMeas.Value += addValue
dptTotalMeasCombined = true
}
default:
}
}
if dptTotalMeasCombined {
err = metadata.MeasurementsToMetric(mb, dptTotalMeas)
}
return err
}
func cloneMeasurement(meas *mongodbatlas.Measurements) *mongodbatlas.Measurements {
clone := &mongodbatlas.Measurements{
Name: meas.Name,
Units: meas.Units,
DataPoints: make([]*mongodbatlas.DataPoints, len(meas.DataPoints)),
}
for i, dp := range meas.DataPoints {
if dp != nil {
newDP := *dp
clone.DataPoints[i] = &newDP
}
}
return clone
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package model // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
import (
"go.opentelemetry.io/collector/pdata/pcommon"
)
// LogEntry represents a MongoDB Atlas JSON log entry
type LogEntry struct {
Timestamp LogTimestamp `json:"t"`
Severity string `json:"s"`
Component string `json:"c"`
ID int64 `json:"id"`
Context string `json:"ctx"`
Message string `json:"msg"`
Attributes map[string]any `json:"attr"`
// Raw is the original log line. It is not a part of the payload, but transient data added during decoding.
Raw string `json:"-"`
}
// AuditLog represents a MongoDB Atlas JSON audit log entry
type AuditLog struct {
Type string `json:"atype"`
Timestamp LogTimestamp `json:"ts"`
ID *ID `json:"uuid,omitempty"`
Local Address `json:"local"`
Remote Address `json:"remote"`
Users []AuditUser `json:"users"`
Roles []AuditRole `json:"roles"`
Result int `json:"result"`
Param map[string]any `json:"param"`
// Raw is the original log line. It is not a part of the payload, but transient data added during decoding.
Raw string `json:"-"`
}
// logTimestamp is the structure that represents a Log Timestamp
type LogTimestamp struct {
Date string `json:"$date"`
}
type ID struct {
Binary string `json:"$binary"`
Type string `json:"$type"`
}
type Address struct {
IP *string `json:"ip,omitempty"`
Port *int `json:"port,omitempty"`
SystemUser *bool `json:"isSystemUser,omitempty"`
UnixSocket *string `json:"unix,omitempty"`
}
type AuditRole struct {
Role string `json:"role"`
Database string `json:"db"`
}
func (ar AuditRole) Pdata() pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(2)
m.PutStr("role", ar.Role)
m.PutStr("db", ar.Database)
return m
}
type AuditUser struct {
User string `json:"user"`
Database string `json:"db"`
}
func (ar AuditUser) Pdata() pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(2)
m.PutStr("user", ar.User)
m.PutStr("db", ar.Database)
return m
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
import (
"bytes"
"context"
"errors"
"fmt"
"net/http"
"strconv"
"sync"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/mongodb-forks/digest"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/config/configretry"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
type clientRoundTripper struct {
originalTransport http.RoundTripper
log *zap.Logger
backoffConfig configretry.BackOffConfig
stopped bool
mutex sync.Mutex
shutdownChan chan struct{}
}
func newClientRoundTripper(
originalTransport http.RoundTripper,
log *zap.Logger,
backoffConfig configretry.BackOffConfig,
) *clientRoundTripper {
return &clientRoundTripper{
originalTransport: originalTransport,
log: log,
backoffConfig: backoffConfig,
shutdownChan: make(chan struct{}, 1),
}
}
func (rt *clientRoundTripper) isStopped() bool {
rt.mutex.Lock()
defer rt.mutex.Unlock()
return rt.stopped
}
func (rt *clientRoundTripper) stop() {
rt.mutex.Lock()
defer rt.mutex.Unlock()
rt.stopped = true
}
func (rt *clientRoundTripper) Shutdown() error {
if rt.isStopped() {
return nil
}
rt.stop()
rt.shutdownChan <- struct{}{}
close(rt.shutdownChan)
return nil
}
func (rt *clientRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
if rt.isStopped() {
return nil, errors.New("request cancelled due to shutdown")
}
resp, err := rt.originalTransport.RoundTrip(r)
if err != nil {
return nil, err // Can't do anything
}
if resp.StatusCode == http.StatusTooManyRequests {
expBackoff := &backoff.ExponentialBackOff{
InitialInterval: rt.backoffConfig.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: rt.backoffConfig.MaxInterval,
MaxElapsedTime: rt.backoffConfig.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
expBackoff.Reset()
attempts := 0
for {
attempts++
delay := expBackoff.NextBackOff()
if delay == backoff.Stop {
return resp, err
}
rt.log.Warn("server busy, retrying request",
zap.Int("attempts", attempts),
zap.Duration("delay", delay))
select {
case <-r.Context().Done():
return resp, errors.New("request was cancelled or timed out")
case <-rt.shutdownChan:
return resp, errors.New("request is cancelled due to server shutdown")
case <-time.After(delay):
}
resp, err = rt.originalTransport.RoundTrip(r)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusTooManyRequests {
break
}
}
}
return resp, err
}
// MongoDBAtlasClient wraps the official MongoDB Atlas client to manage pagination
// and mapping to OpenTelmetry metric and log structures.
type MongoDBAtlasClient struct {
log *zap.Logger
client *mongodbatlas.Client
transport *http.Transport
roundTripper *clientRoundTripper
}
// NewMongoDBAtlasClient creates a new MongoDB Atlas client wrapper
func NewMongoDBAtlasClient(
publicKey string,
privateKey string,
backoffConfig configretry.BackOffConfig,
log *zap.Logger,
) *MongoDBAtlasClient {
defaultTransporter := http.DefaultTransport.(*http.Transport)
t := digest.NewTransportWithHTTPTransport(publicKey, privateKey, defaultTransporter)
roundTripper := newClientRoundTripper(t, log, backoffConfig)
tc := &http.Client{Transport: roundTripper}
client := mongodbatlas.NewClient(tc)
return &MongoDBAtlasClient{
log,
client,
defaultTransporter,
roundTripper,
}
}
func (s *MongoDBAtlasClient) Shutdown() error {
s.transport.CloseIdleConnections()
return s.roundTripper.Shutdown()
}
// Check both the returned error and the status of the HTTP response
func checkMongoDBClientErr(err error, response *mongodbatlas.Response) error {
if err != nil {
return err
}
if response != nil {
return response.CheckResponse(response.Body)
}
return nil
}
func hasNext(links []*mongodbatlas.Link) bool {
for _, link := range links {
if link.Rel == "next" {
return true
}
}
return false
}
// Organizations returns a list of all organizations available with the supplied credentials
func (s *MongoDBAtlasClient) Organizations(ctx context.Context) ([]*mongodbatlas.Organization, error) {
var allOrgs []*mongodbatlas.Organization
page := 1
for {
orgs, hasNext, err := s.getOrganizationsPage(ctx, page)
page++
if err != nil {
// TODO: Add error to a metric
// Stop, returning what we have (probably empty slice)
return allOrgs, fmt.Errorf("error retrieving organizations from MongoDB Atlas API: %w", err)
}
allOrgs = append(allOrgs, orgs...)
if !hasNext {
break
}
}
return allOrgs, nil
}
func (s *MongoDBAtlasClient) getOrganizationsPage(
ctx context.Context,
pageNum int,
) ([]*mongodbatlas.Organization, bool, error) {
orgs, response, err := s.client.Organizations.List(ctx, &mongodbatlas.OrganizationsListOptions{
ListOptions: mongodbatlas.ListOptions{
PageNum: pageNum,
},
})
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, fmt.Errorf("error in retrieving organizations: %w", err)
}
return orgs.Results, hasNext(orgs.Links), nil
}
// GetOrganization retrieves a single organization specified by orgID
func (s *MongoDBAtlasClient) GetOrganization(ctx context.Context, orgID string) (*mongodbatlas.Organization, error) {
org, response, err := s.client.Organizations.Get(ctx, orgID)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving project page: %w", err)
}
return org, nil
}
// Projects returns a list of projects accessible within the provided organization
func (s *MongoDBAtlasClient) Projects(
ctx context.Context,
orgID string,
) ([]*mongodbatlas.Project, error) {
var allProjects []*mongodbatlas.Project
page := 1
for {
projects, hasNext, err := s.getProjectsPage(ctx, orgID, page)
page++
if err != nil {
return allProjects, fmt.Errorf("error retrieving list of projects from MongoDB Atlas API: %w", err)
}
allProjects = append(allProjects, projects...)
if !hasNext {
break
}
}
return allProjects, nil
}
// GetProject returns a single project specified by projectName
func (s *MongoDBAtlasClient) GetProject(ctx context.Context, projectName string) (*mongodbatlas.Project, error) {
project, response, err := s.client.Projects.GetOneProjectByName(ctx, projectName)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving project page: %w", err)
}
return project, nil
}
func (s *MongoDBAtlasClient) getProjectsPage(
ctx context.Context,
orgID string,
pageNum int,
) ([]*mongodbatlas.Project, bool, error) {
projects, response, err := s.client.Organizations.Projects(
ctx,
orgID,
&mongodbatlas.ProjectsListOptions{
ListOptions: mongodbatlas.ListOptions{PageNum: pageNum},
},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, fmt.Errorf("error retrieving project page: %w", err)
}
return projects.Results, hasNext(projects.Links), nil
}
// Processes returns the list of processes running for a given project.
func (s *MongoDBAtlasClient) Processes(
ctx context.Context,
projectID string,
) ([]*mongodbatlas.Process, error) {
// A paginated API, but the MongoDB client just returns the values from the first page
// Note: MongoDB Atlas also has the idea of a Cluster- we can retrieve a list of clusters from
// the Project, but a Cluster does not have a link to its Process list and a Process does not
// have a link to its Cluster (save through the hostname, which is not a documented relationship).
processes, response, err := s.client.Processes.List(
ctx,
projectID,
&mongodbatlas.ProcessesListOptions{
ListOptions: mongodbatlas.ListOptions{
PageNum: 0,
ItemsPerPage: 0,
IncludeCount: true,
},
},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, fmt.Errorf("error retrieving processes from MongoDB Atlas API: %w", err)
}
return processes, nil
}
func (s *MongoDBAtlasClient) getProcessDatabasesPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
) ([]*mongodbatlas.ProcessDatabase, bool, error) {
databases, response, err := s.client.ProcessDatabases.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ListOptions{PageNum: pageNum},
)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return databases.Results, hasNext(databases.Links), nil
}
// ProcessDatabases lists databases that are running in a given MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDatabases(
ctx context.Context,
projectID string,
host string,
port int,
) ([]*mongodbatlas.ProcessDatabase, error) {
var allProcessDatabases []*mongodbatlas.ProcessDatabase
pageNum := 1
for {
processes, hasMore, err := s.getProcessDatabasesPage(ctx, projectID, host, port, pageNum)
pageNum++
if err != nil {
return allProcessDatabases, err
}
allProcessDatabases = append(allProcessDatabases, processes...)
if !hasMore {
break
}
}
return allProcessDatabases, nil
}
// ProcessMetrics returns a set of metrics associated with the specified running process.
func (s *MongoDBAtlasClient) ProcessMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.getProcessMeasurementsPage(
ctx,
projectID,
host,
port,
pageNum,
start,
end,
resolution,
)
if err != nil {
s.log.Debug("Error retrieving process metrics from MongoDB Atlas API", zap.Error(err))
break // Return partial results
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) getProcessMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessMeasurements.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// ProcessDatabaseMetrics returns metrics about a particular database running within a MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDatabaseMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
dbname string,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.getProcessDatabaseMeasurementsPage(
ctx,
projectID,
host,
port,
dbname,
pageNum,
start,
end,
resolution,
)
if err != nil {
return err
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) getProcessDatabaseMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
dbname string,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessDatabaseMeasurements.List(
ctx,
projectID,
host,
port,
dbname,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// ProcessDisks enumerates the disks accessible to a specified MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDisks(
ctx context.Context,
projectID string,
host string,
port int,
) []*mongodbatlas.ProcessDisk {
var allDisks []*mongodbatlas.ProcessDisk
pageNum := 1
for {
disks, hasMore, err := s.getProcessDisksPage(ctx, projectID, host, port, pageNum)
if err != nil {
s.log.Debug("Error retrieving disk metrics from MongoDB Atlas API", zap.Error(err))
break // Return partial results
}
pageNum++
allDisks = append(allDisks, disks...)
if !hasMore {
break
}
}
return allDisks
}
func (s *MongoDBAtlasClient) getProcessDisksPage(
ctx context.Context,
projectID string,
host string,
port int,
pageNum int,
) ([]*mongodbatlas.ProcessDisk, bool, error) {
disks, result, err := s.client.ProcessDisks.List(
ctx,
projectID,
host,
port,
&mongodbatlas.ListOptions{PageNum: pageNum},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return disks.Results, hasNext(disks.Links), nil
}
// ProcessDiskMetrics returns metrics supplied for a particular disk partition used by a MongoDB Atlas process
func (s *MongoDBAtlasClient) ProcessDiskMetrics(
ctx context.Context,
mb *metadata.MetricsBuilder,
projectID string,
host string,
port int,
partitionName string,
start string,
end string,
resolution string,
) error {
var allMeasurements []*mongodbatlas.Measurements
pageNum := 1
for {
measurements, hasMore, err := s.processDiskMeasurementsPage(
ctx,
projectID,
host,
port,
partitionName,
pageNum,
start,
end,
resolution,
)
if err != nil {
return err
}
pageNum++
allMeasurements = append(allMeasurements, measurements...)
if !hasMore {
break
}
}
return processMeasurements(mb, allMeasurements)
}
func (s *MongoDBAtlasClient) processDiskMeasurementsPage(
ctx context.Context,
projectID string,
host string,
port int,
partitionName string,
pageNum int,
start string,
end string,
resolution string,
) ([]*mongodbatlas.Measurements, bool, error) {
measurements, result, err := s.client.ProcessDiskMeasurements.List(
ctx,
projectID,
host,
port,
partitionName,
&mongodbatlas.ProcessMeasurementListOptions{
ListOptions: &mongodbatlas.ListOptions{PageNum: pageNum},
Granularity: resolution,
Start: start,
End: end,
},
)
err = checkMongoDBClientErr(err, result)
if err != nil {
return nil, false, err
}
return measurements.Measurements, hasNext(measurements.Links), nil
}
// GetLogs retrieves the logs from the mongo API using API call: https://www.mongodb.com/docs/atlas/reference/api/logs/#syntax
func (s *MongoDBAtlasClient) GetLogs(ctx context.Context, groupID, hostname, logName string, start, end time.Time) (*bytes.Buffer, error) {
buf := bytes.NewBuffer([]byte{})
dateRange := &mongodbatlas.DateRangetOptions{StartDate: toUnixString(start), EndDate: toUnixString(end)}
resp, err := s.client.Logs.Get(ctx, groupID, hostname, logName, buf, dateRange)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("received status code: %d", resp.StatusCode)
}
return buf, nil
}
// GetClusters retrieves the clusters from the mongo API using API call: https://www.mongodb.com/docs/atlas/reference/api/clusters-get-all/#request
func (s *MongoDBAtlasClient) GetClusters(ctx context.Context, groupID string) ([]mongodbatlas.Cluster, error) {
options := mongodbatlas.ListOptions{}
clusters, _, err := s.client.Clusters.List(ctx, groupID, &options)
if err != nil {
return nil, err
}
return clusters, nil
}
type AlertPollOptions struct {
PageNum int
PageSize int
}
// GetAlerts returns the alerts specified for the set projects
func (s *MongoDBAtlasClient) GetAlerts(ctx context.Context, groupID string, opts *AlertPollOptions) (ret []mongodbatlas.Alert, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.AlertsListOptions{ListOptions: lo}
alerts, response, err := s.client.Alerts.List(ctx, groupID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return alerts.Results, hasNext(response.Links), nil
}
// GetEventsOptions are the options to use for making a request to get Project Events
type GetEventsOptions struct {
// Which page of the paginated events
PageNum int
// How large the Pages will be
PageSize int
// The list of Event Types https://www.mongodb.com/docs/atlas/reference/api/events-projects-get-all/#event-type-values
// to grab from the API
EventTypes []string
// The oldest date to look back for the events
MinDate time.Time
// the newest time to accept events
MaxDate time.Time
}
// GetProjectEvents returns the events specified for the set projects
func (s *MongoDBAtlasClient) GetProjectEvents(ctx context.Context, groupID string, opts *GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.EventListOptions{
ListOptions: lo,
// Earliest Timestamp in ISO 8601 date and time format in UTC from when Atlas should return events.
MinDate: opts.MinDate.Format(time.RFC3339),
}
if len(opts.EventTypes) > 0 {
options.EventType = opts.EventTypes
}
events, response, err := s.client.Events.ListProjectEvents(ctx, groupID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return events.Results, hasNext(response.Links), nil
}
// GetOrgEvents returns the events specified for the set organizations
func (s *MongoDBAtlasClient) GetOrganizationEvents(ctx context.Context, orgID string, opts *GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) {
lo := mongodbatlas.ListOptions{
PageNum: opts.PageNum,
ItemsPerPage: opts.PageSize,
}
options := mongodbatlas.EventListOptions{
ListOptions: lo,
// Earliest Timestamp in ISO 8601 date and time format in UTC from when Atlas should return events.
MinDate: opts.MinDate.Format(time.RFC3339),
}
if len(opts.EventTypes) > 0 {
options.EventType = opts.EventTypes
}
events, response, err := s.client.Events.ListOrganizationEvents(ctx, orgID, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, false, err
}
return events.Results, hasNext(response.Links), nil
}
// GetAccessLogsOptions are the options to use for making a request to get Access Logs
type GetAccessLogsOptions struct {
// The oldest date to look back for the events
MinDate time.Time
// the newest time to accept events
MaxDate time.Time
// If true, only return successful access attempts; if false, only return failed access attempts
// If nil, return both successful and failed access attempts
AuthResult *bool
// Maximum number of entries to return
NLogs int
}
// GetAccessLogs returns the access logs specified for the cluster requested
func (s *MongoDBAtlasClient) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) {
options := mongodbatlas.AccessLogOptions{
// Earliest Timestamp in epoch milliseconds from when Atlas should access log results
Start: strconv.FormatInt(opts.MinDate.UTC().UnixMilli(), 10),
// Latest Timestamp in epoch milliseconds from when Atlas should access log results
End: strconv.FormatInt(opts.MaxDate.UTC().UnixMilli(), 10),
// If true, only return successful access attempts; if false, only return failed access attempts
// If nil, return both successful and failed access attempts
AuthResult: opts.AuthResult,
// Maximum number of entries to return (0-20000)
NLogs: opts.NLogs,
}
accessLogs, response, err := s.client.AccessTracking.ListByCluster(ctx, groupID, clusterName, &options)
err = checkMongoDBClientErr(err, response)
if err != nil {
return nil, err
}
return accessLogs.AccessLogs, nil
}
func toUnixString(t time.Time) string {
return strconv.Itoa(int(t.Unix()))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"bufio"
"compress/gzip"
"encoding/json"
"io"
"regexp"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
func decodeLogs(logger *zap.Logger, clusterMajorVersion string, r io.Reader) ([]model.LogEntry, error) {
switch clusterMajorVersion {
case mongoDBMajorVersion4_2:
// 4.2 clusters use a console log format
return decode4_2(logger.Named("console_decoder"), r)
default:
// All other versions use JSON logging
return decodeJSON(logger.Named("json_decoder"), r)
}
}
func decodeJSON(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.LogEntry
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
var entry model.LogEntry
if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil {
logger.Error("Failed to parse log entry as JSON", zap.String("entry", scanner.Text()))
continue
}
entry.Raw = scanner.Text()
entries = append(entries, entry)
}
}
var mongo4_2LogRegex = regexp.MustCompile(`^(?P<timestamp>\S+)\s+(?P<severity>\w+)\s+(?P<component>[\w-]+)\s+\[(?P<context>\S+)\]\s+(?P<message>.*)$`)
func decode4_2(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.LogEntry
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
submatches := mongo4_2LogRegex.FindStringSubmatch(scanner.Text())
if len(submatches) != 6 {
// Match failed for line; We will skip this line and continue processing others.
logger.Error("Entry did not match regex", zap.String("entry", scanner.Text()))
continue
}
entry := model.LogEntry{
Timestamp: model.LogTimestamp{
Date: submatches[1],
},
Severity: submatches[2],
Component: submatches[3],
Context: submatches[4],
Message: submatches[5],
Raw: submatches[0],
}
entries = append(entries, entry)
}
}
func decodeAuditJSON(logger *zap.Logger, r io.Reader) ([]model.AuditLog, error) {
// Pass this into a gzip reader for decoding
gzipReader, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(gzipReader)
var entries []model.AuditLog
for {
if !scanner.Scan() {
// Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error.
return entries, scanner.Err()
}
var entry model.AuditLog
if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil {
logger.Error("Failed to parse audit log entry as JSON", zap.String("entry", scanner.Text()))
continue
}
entry.Raw = scanner.Text()
entries = append(entries, entry)
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"errors"
"io"
"net"
"strings"
"sync"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
rcvr "go.opentelemetry.io/collector/receiver"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
const mongoDBMajorVersion4_2 = "4.2"
type logsReceiver struct {
log *zap.Logger
cfg *Config
client *internal.MongoDBAtlasClient
consumer consumer.Logs
stopperChan chan struct{}
wg sync.WaitGroup
start time.Time
end time.Time
}
type ProjectContext struct {
Project mongodbatlas.Project
orgName string
}
// MongoDB Atlas Documentation recommends a polling interval of 5 minutes: https://www.mongodb.com/docs/atlas/reference/api/logs/#logs
const collectionInterval = time.Minute * 5
func newMongoDBAtlasLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *logsReceiver {
client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger)
for _, p := range cfg.Logs.Projects {
p.populateIncludesAndExcludes()
}
return &logsReceiver{
log: settings.Logger,
cfg: cfg,
client: client,
stopperChan: make(chan struct{}),
consumer: consumer,
}
}
// Log receiver logic
func (s *logsReceiver) Start(ctx context.Context, _ component.Host) error {
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.start = time.Now().Add(-collectionInterval)
s.end = time.Now()
for {
s.collect(ctx)
// collection interval loop,
select {
case <-ctx.Done():
return
case <-s.stopperChan:
return
case <-time.After(collectionInterval):
s.start = s.end
s.end = time.Now()
}
}
}()
return nil
}
func (s *logsReceiver) Shutdown(_ context.Context) error {
close(s.stopperChan)
s.wg.Wait()
return s.client.Shutdown()
}
// parseHostNames parses out the hostname from the specified cluster host
func parseHostNames(s string, logger *zap.Logger) []string {
var hostnames []string
if s == "" {
return []string{}
}
for _, t := range strings.Split(s, ",") {
// separate hostname from scheme and port
host, _, err := net.SplitHostPort(strings.TrimPrefix(t, "mongodb://"))
if err != nil {
logger.Error("Could not parse out hostname: " + host)
continue
}
hostnames = append(hostnames, host)
}
return hostnames
}
// collect spins off functionality of the receiver from the Start function
func (s *logsReceiver) collect(ctx context.Context) {
for _, projectCfg := range s.cfg.Logs.Projects {
project, err := s.client.GetProject(ctx, projectCfg.Name)
if err != nil {
s.log.Error("Error retrieving project "+projectCfg.Name+":", zap.Error(err))
continue
}
pc := ProjectContext{Project: *project}
org, err := s.client.GetOrganization(ctx, project.OrgID)
if err != nil {
s.log.Error("Error retrieving organization", zap.Error(err))
pc.orgName = "unknown"
} else {
pc.orgName = org.Name
}
// get clusters for each of the projects
clusters, err := s.processClusters(ctx, *projectCfg, project.ID)
if err != nil {
s.log.Error("Failure to process Clusters", zap.Error(err))
}
s.collectClusterLogs(clusters, *projectCfg, pc)
}
}
func (s *logsReceiver) processClusters(ctx context.Context, projectCfg LogsProjectConfig, projectID string) ([]mongodbatlas.Cluster, error) {
clusters, err := s.client.GetClusters(ctx, projectID)
if err != nil {
s.log.Error("Failure to collect clusters from project: %w", zap.Error(err))
return nil, err
}
return filterClusters(clusters, projectCfg.ProjectConfig)
}
type ClusterInfo struct {
ClusterName string
RegionName string
ProviderName string
MongoDBMajorVersion string
}
func (s *logsReceiver) collectClusterLogs(clusters []mongodbatlas.Cluster, projectCfg LogsProjectConfig, pc ProjectContext) {
for _, cluster := range clusters {
clusterInfo := ClusterInfo{
ClusterName: cluster.Name,
RegionName: cluster.ProviderSettings.RegionName,
ProviderName: cluster.ProviderSettings.ProviderName,
MongoDBMajorVersion: cluster.MongoDBMajorVersion,
}
hostnames := parseHostNames(cluster.ConnectionStrings.Standard, s.log)
for _, hostname := range hostnames {
// Defaults to true if not specified
if projectCfg.EnableHostLogs == nil || *projectCfg.EnableHostLogs {
s.log.Debug("Collecting logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name))
s.collectLogs(pc, hostname, "mongodb.gz", clusterInfo)
s.collectLogs(pc, hostname, "mongos.gz", clusterInfo)
}
// Defaults to false if not specified
if projectCfg.EnableAuditLogs {
s.log.Debug("Collecting audit logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name))
s.collectAuditLogs(pc, hostname, "mongodb-audit-log.gz", clusterInfo)
s.collectAuditLogs(pc, hostname, "mongos-audit-log.gz", clusterInfo)
}
}
}
}
func filterClusters(clusters []mongodbatlas.Cluster, projectCfg ProjectConfig) ([]mongodbatlas.Cluster, error) {
include, exclude := projectCfg.IncludeClusters, projectCfg.ExcludeClusters
var allowed bool
var clusterNameSet map[string]struct{}
// check to include or exclude clusters
switch {
// keep all clusters if include and exclude are not specified
case len(include) == 0 && len(exclude) == 0:
return clusters, nil
// include is initialized
case len(include) > 0 && len(exclude) == 0:
allowed = true
clusterNameSet = projectCfg.includesByClusterName
// exclude is initialized
case len(exclude) > 0 && len(include) == 0:
allowed = false
clusterNameSet = projectCfg.excludesByClusterName
// both are initialized
default:
return nil, errors.New("both Include and Exclude clusters configured")
}
var filtered []mongodbatlas.Cluster
for _, cluster := range clusters {
if _, ok := clusterNameSet[cluster.Name]; (!ok && !allowed) || (ok && allowed) {
filtered = append(filtered, cluster)
}
}
return filtered, nil
}
func (s *logsReceiver) getHostLogs(groupID, hostname, logName string, clusterMajorVersion string) ([]model.LogEntry, error) {
// Get gzip bytes buffer from API
buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end)
if err != nil {
return nil, err
}
return decodeLogs(s.log, clusterMajorVersion, buf)
}
func (s *logsReceiver) getHostAuditLogs(groupID, hostname, logName string) ([]model.AuditLog, error) {
// Get gzip bytes buffer from API
buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end)
if err != nil {
return nil, err
}
return decodeAuditJSON(s.log, buf)
}
func (s *logsReceiver) collectLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) {
logs, err := s.getHostLogs(pc.Project.ID, hostname, logName, clusterInfo.MongoDBMajorVersion)
if err != nil && !errors.Is(err, io.EOF) {
s.log.Warn("Failed to retrieve host logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
if len(logs) == 0 {
s.log.Warn("Attempted to retrieve host logs but received 0 logs", zap.Error(err), zap.String("log", logName), zap.String("hostname", hostname), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
plog := mongodbEventToLogData(s.log,
logs,
pc,
hostname,
logName,
clusterInfo)
err = s.consumer.ConsumeLogs(context.Background(), plog)
if err != nil {
s.log.Error("Failed to consume logs", zap.Error(err))
}
}
func (s *logsReceiver) collectAuditLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) {
logs, err := s.getHostAuditLogs(
pc.Project.ID,
hostname,
logName,
)
if err != nil && !errors.Is(err, io.EOF) {
s.log.Warn("Failed to retrieve audit logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
if len(logs) == 0 {
s.log.Warn("Attempted to retrieve audit logs but received 0 logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end))
return
}
plog, err := mongodbAuditEventToLogData(s.log,
logs,
pc,
hostname,
logName,
clusterInfo)
if err != nil {
s.log.Warn("Failed to translate audit logs: "+logName, zap.Error(err))
return
}
err = s.consumer.ConsumeLogs(context.Background(), plog)
if err != nil {
s.log.Error("Failed to consume logs", zap.Error(err))
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/multierr"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model"
)
const (
// Number of log attributes to add to the plog.LogRecordSlice for host logs.
totalLogAttributes = 10
// Number of log attributes to add to the plog.LogRecordSlice for audit logs.
totalAuditLogAttributes = 16
// Number of resource attributes to add to the plog.ResourceLogs.
totalResourceAttributes = 4
)
// jsonTimestampLayout for the timestamp format in the plog.Logs structure
const (
jsonTimestampLayout = "2006-01-02T15:04:05.000-07:00"
consoleTimestampLayout = "2006-01-02T15:04:05.000-0700"
)
// Severity mapping of the mongodb atlas logs
var severityMap = map[string]plog.SeverityNumber{
"F": plog.SeverityNumberFatal,
"E": plog.SeverityNumberError,
"W": plog.SeverityNumberWarn,
"I": plog.SeverityNumberInfo,
"D": plog.SeverityNumberDebug,
"D1": plog.SeverityNumberDebug,
"D2": plog.SeverityNumberDebug2,
"D3": plog.SeverityNumberDebug3,
"D4": plog.SeverityNumberDebug4,
"D5": plog.SeverityNumberDebug4,
}
// mongoAuditEventToLogRecord converts model.AuditLog event to plog.LogRecordSlice and adds the resource attributes.
func mongodbAuditEventToLogData(logger *zap.Logger, logs []model.AuditLog, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) (plog.Logs, error) {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
resourceAttrs := rl.Resource().Attributes()
resourceAttrs.EnsureCapacity(totalResourceAttributes)
// Attributes related to the object causing the event.
resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName)
resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name)
resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName)
resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName)
resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName)
resourceAttrs.PutStr("mongodb_atlas.host.name", hostname)
var errs []error
for _, log := range logs {
lr := sl.LogRecords().AppendEmpty()
logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion)
t, err := time.Parse(logTsFormat, log.Timestamp.Date)
if err != nil {
logger.Warn("Time failed to parse correctly", zap.Error(err))
}
lr.SetTimestamp(pcommon.NewTimestampFromTime(t))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
// Insert Raw Log message into Body of LogRecord
lr.Body().SetStr(log.Raw)
// Since Audit Logs don't have a severity/level
// Set the "SeverityNumber" and "SeverityText" to INFO
lr.SetSeverityNumber(plog.SeverityNumberInfo)
lr.SetSeverityText("INFO")
attrs := lr.Attributes()
attrs.EnsureCapacity(totalAuditLogAttributes)
attrs.PutStr("atype", log.Type)
if log.Local.IP != nil {
attrs.PutStr("local.ip", *log.Local.IP)
}
if log.Local.Port != nil {
attrs.PutInt("local.port", int64(*log.Local.Port))
}
if log.Local.SystemUser != nil {
attrs.PutBool("local.isSystemUser", *log.Local.SystemUser)
}
if log.Local.UnixSocket != nil {
attrs.PutStr("local.unix", *log.Local.UnixSocket)
}
if log.Remote.IP != nil {
attrs.PutStr("remote.ip", *log.Remote.IP)
}
if log.Remote.Port != nil {
attrs.PutInt("remote.port", int64(*log.Remote.Port))
}
if log.Remote.SystemUser != nil {
attrs.PutBool("remote.isSystemUser", *log.Remote.SystemUser)
}
if log.Remote.UnixSocket != nil {
attrs.PutStr("remote.unix", *log.Remote.UnixSocket)
}
if log.ID != nil {
attrs.PutStr("uuid.binary", log.ID.Binary)
attrs.PutStr("uuid.type", log.ID.Type)
}
attrs.PutInt("result", int64(log.Result))
if err = attrs.PutEmptyMap("param").FromRaw(log.Param); err != nil {
errs = append(errs, err)
}
usersSlice := attrs.PutEmptySlice("users")
usersSlice.EnsureCapacity(len(log.Users))
for _, user := range log.Users {
user.Pdata().CopyTo(usersSlice.AppendEmpty().SetEmptyMap())
}
rolesSlice := attrs.PutEmptySlice("roles")
rolesSlice.EnsureCapacity(len(log.Roles))
for _, roles := range log.Roles {
roles.Pdata().CopyTo(rolesSlice.AppendEmpty().SetEmptyMap())
}
attrs.PutStr("log_name", logName)
}
return ld, multierr.Combine(errs...)
}
// mongoEventToLogRecord converts model.LogEntry event to plog.LogRecordSlice and adds the resource attributes.
func mongodbEventToLogData(logger *zap.Logger, logs []model.LogEntry, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) plog.Logs {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
resourceAttrs := rl.Resource().Attributes()
resourceAttrs.EnsureCapacity(totalResourceAttributes)
// Attributes related to the object causing the event.
resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName)
resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name)
resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName)
resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName)
resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName)
resourceAttrs.PutStr("mongodb_atlas.host.name", hostname)
logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion)
for _, log := range logs {
lr := sl.LogRecords().AppendEmpty()
t, err := time.Parse(logTsFormat, log.Timestamp.Date)
if err != nil {
logger.Warn("Time failed to parse correctly", zap.Error(err))
}
lr.SetTimestamp(pcommon.NewTimestampFromTime(t))
lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
// Insert Raw Log message into Body of LogRecord
lr.Body().SetStr(log.Raw)
// Set the "SeverityNumber" and "SeverityText" if a known type of
// severity is found.
if severityNumber, ok := severityMap[log.Severity]; ok {
lr.SetSeverityNumber(severityNumber)
lr.SetSeverityText(log.Severity)
} else {
logger.Debug("unknown severity type", zap.String("type", log.Severity))
}
attrs := lr.Attributes()
attrs.EnsureCapacity(totalLogAttributes)
//nolint:errcheck
attrs.FromRaw(log.Attributes)
attrs.PutStr("message", log.Message)
attrs.PutStr("component", log.Component)
attrs.PutStr("context", log.Context)
// log ID is not present on MongoDB 4.2 systems
if clusterInfo.MongoDBMajorVersion != mongoDBMajorVersion4_2 {
attrs.PutInt("id", log.ID)
}
attrs.PutStr("log_name", logName)
}
return ld
}
func tsLayout(clusterVersion string) string {
switch clusterVersion {
case mongoDBMajorVersion4_2:
return consoleTimestampLayout
default:
return jsonTimestampLayout
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver"
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go.mongodb.org/atlas/mongodbatlas"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/scraper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata"
)
type mongodbatlasreceiver struct {
log *zap.Logger
cfg *Config
client *internal.MongoDBAtlasClient
lastRun time.Time
mb *metadata.MetricsBuilder
stopperChan chan struct{}
}
type timeconstraints struct {
start string
end string
resolution string
}
func newMongoDBAtlasReceiver(settings receiver.Settings, cfg *Config) *mongodbatlasreceiver {
client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger)
for _, p := range cfg.Projects {
p.populateIncludesAndExcludes()
}
return &mongodbatlasreceiver{
log: settings.Logger,
cfg: cfg,
client: client,
mb: metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings),
stopperChan: make(chan struct{}),
}
}
func newMongoDBAtlasScraper(recv *mongodbatlasreceiver) (scraper.Metrics, error) {
return scraper.NewMetrics(recv.scrape, scraper.WithShutdown(recv.shutdown))
}
func (s *mongodbatlasreceiver) scrape(ctx context.Context) (pmetric.Metrics, error) {
now := time.Now()
if err := s.poll(ctx, s.timeConstraints(now)); err != nil {
return pmetric.Metrics{}, err
}
s.lastRun = now
return s.mb.Emit(), nil
}
func (s *mongodbatlasreceiver) timeConstraints(now time.Time) timeconstraints {
var start time.Time
if s.lastRun.IsZero() {
start = now.Add(s.cfg.CollectionInterval * -1)
} else {
start = s.lastRun
}
return timeconstraints{
start.UTC().Format(time.RFC3339),
now.UTC().Format(time.RFC3339),
s.cfg.Granularity,
}
}
func (s *mongodbatlasreceiver) shutdown(context.Context) error {
return s.client.Shutdown()
}
// poll decides whether to poll all projects or a specific project based on the configuration.
func (s *mongodbatlasreceiver) poll(ctx context.Context, time timeconstraints) error {
if len(s.cfg.Projects) == 0 {
return s.pollAllProjects(ctx, time)
}
return s.pollProjects(ctx, time)
}
// pollAllProjects handles polling across all projects within the organizations.
func (s *mongodbatlasreceiver) pollAllProjects(ctx context.Context, time timeconstraints) error {
orgs, err := s.client.Organizations(ctx)
if err != nil {
return fmt.Errorf("error retrieving organizations: %w", err)
}
for _, org := range orgs {
proj, err := s.client.Projects(ctx, org.ID)
if err != nil {
s.log.Error("error retrieving projects", zap.String("orgID", org.ID), zap.Error(err))
continue
}
for _, project := range proj {
// Since there is no specific ProjectConfig for these projects, pass nil.
if err := s.processProject(ctx, time, org.Name, project, nil); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
}
return nil
}
// pollProject handles polling for specific projects as configured.
func (s *mongodbatlasreceiver) pollProjects(ctx context.Context, time timeconstraints) error {
for _, projectCfg := range s.cfg.Projects {
project, err := s.client.GetProject(ctx, projectCfg.Name)
if err != nil {
s.log.Error("error retrieving project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}
org, err := s.client.GetOrganization(ctx, project.OrgID)
if err != nil {
s.log.Error("error retrieving organization from project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}
if err := s.processProject(ctx, time, org.Name, project, projectCfg); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
return nil
}
func (s *mongodbatlasreceiver) processProject(ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, projectCfg *ProjectConfig) error {
nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID)
if err != nil {
return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err)
}
processes, err := s.client.Processes(ctx, project.ID)
if err != nil {
return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err)
}
for _, process := range processes {
clusterName := nodeClusterMap[process.UserAlias]
providerValues := providerMap[clusterName]
if !shouldProcessCluster(projectCfg, clusterName) {
// Skip processing for this cluster
continue
}
if err := s.extractProcessMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
}
return nil
}
// shouldProcessCluster checks whether a given cluster should be processed based on the project configuration.
func shouldProcessCluster(projectCfg *ProjectConfig, clusterName string) bool {
if projectCfg == nil {
// If there is no project config, process all clusters.
return true
}
_, isIncluded := projectCfg.includesByClusterName[clusterName]
_, isExcluded := projectCfg.excludesByClusterName[clusterName]
// Return false immediately if the cluster is excluded.
if isExcluded {
return false
}
// If IncludeClusters is empty, or the cluster is explicitly included, return true.
return len(projectCfg.IncludeClusters) == 0 || isIncluded
}
type providerValues struct {
RegionName string
ProviderName string
}
func (s *mongodbatlasreceiver) getNodeClusterNameMap(
ctx context.Context,
projectID string,
) (map[string]string, map[string]providerValues, error) {
providerMap := make(map[string]providerValues)
clusterMap := make(map[string]string)
clusters, err := s.client.GetClusters(ctx, projectID)
if err != nil {
return nil, nil, err
}
for _, cluster := range clusters {
// URI in the form mongodb://host1.mongodb.net:27017,host2.mongodb.net:27017,host3.mongodb.net:27017
nodes := strings.Split(strings.TrimPrefix(cluster.MongoURI, "mongodb://"), ",")
for _, node := range nodes {
// Remove the port from the node
n, _, _ := strings.Cut(node, ":")
clusterMap[n] = cluster.Name
}
providerMap[cluster.Name] = providerValues{
RegionName: cluster.ProviderSettings.RegionName,
ProviderName: cluster.ProviderSettings.ProviderName,
}
}
return clusterMap, providerMap, nil
}
func (s *mongodbatlasreceiver) extractProcessMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
if err := s.client.ProcessMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
return nil
}
func (s *mongodbatlasreceiver) extractProcessDatabaseMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
processDatabases, err := s.client.ProcessDatabases(
ctx,
project.ID,
process.Hostname,
process.Port,
)
if err != nil {
return fmt.Errorf("error retrieving process databases: %w", err)
}
for _, db := range processDatabases {
if err := s.client.ProcessDatabaseMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
db.DatabaseName,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling database metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasDbName(db.DatabaseName)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
}
return nil
}
func (s *mongodbatlasreceiver) extractProcessDiskMetrics(
ctx context.Context,
time timeconstraints,
orgName string,
project *mongodbatlas.Project,
process *mongodbatlas.Process,
clusterName string,
providerValues providerValues,
) error {
for _, disk := range s.client.ProcessDisks(ctx, project.ID, process.Hostname, process.Port) {
if err := s.client.ProcessDiskMetrics(
ctx,
s.mb,
project.ID,
process.Hostname,
process.Port,
disk.PartitionName,
time.start,
time.end,
time.resolution,
); err != nil {
return fmt.Errorf("error when polling disk metrics from MongoDB Atlas: %w", err)
}
rb := s.mb.NewResourceBuilder()
rb.SetMongodbAtlasOrgName(orgName)
rb.SetMongodbAtlasProjectName(project.Name)
rb.SetMongodbAtlasProjectID(project.ID)
rb.SetMongodbAtlasHostName(process.Hostname)
rb.SetMongodbAtlasUserAlias(process.UserAlias)
rb.SetMongodbAtlasClusterName(clusterName)
rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port))
rb.SetMongodbAtlasProcessTypeName(process.TypeName)
rb.SetMongodbAtlasProcessID(process.ID)
rb.SetMongodbAtlasDiskPartition(disk.PartitionName)
rb.SetMongodbAtlasRegionName(providerValues.RegionName)
rb.SetMongodbAtlasProviderName(providerValues.ProviderName)
s.mb.EmitForResource(metadata.WithResource(rb.Emit()))
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver"
// This file implements factory for SAPM receiver.
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver/internal/metadata"
)
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:7276"
)
// NewFactory creates a factory for SAPM receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithTraces(createTracesReceiver, metadata.TracesStability))
}
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
}
}
// extract the port number from string in "address:port" format. If the
// port number cannot be extracted returns an error.
// TODO make this a utility function
func extractPortFromEndpoint(endpoint string) (int, error) {
_, portStr, err := net.SplitHostPort(endpoint)
if err != nil {
return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err)
}
port, err := strconv.ParseInt(portStr, 10, 0)
if err != nil {
return 0, fmt.Errorf("endpoint port is not a number: %w", err)
}
if port < 1 || port > 65535 {
return 0, errors.New("port number must be between 1 and 65535")
}
return int(port), nil
}
// verify that the configured port is not 0
func (rCfg *Config) validate() error {
_, err := extractPortFromEndpoint(rCfg.Endpoint)
if err != nil {
return err
}
return nil
}
// CreateTraces creates a trace receiver based on provided config.
func createTracesReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
// assert config is SAPM config
rCfg := cfg.(*Config)
err := rCfg.validate()
if err != nil {
return nil, err
}
// Create the receiver.
return newReceiver(params, rCfg, nextConsumer)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver"
import (
"bytes"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net/http"
"sync"
"github.com/gorilla/mux"
splunksapm "github.com/signalfx/sapm-proto/gen"
"github.com/signalfx/sapm-proto/sapmprotocol"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger"
)
var gzipWriterPool = &sync.Pool{
New: func() any {
return gzip.NewWriter(io.Discard)
},
}
// sapmReceiver receives spans in the Splunk SAPM format over HTTP
type sapmReceiver struct {
settings component.TelemetrySettings
config *Config
server *http.Server
shutdownWG sync.WaitGroup
nextConsumer consumer.Traces
// defaultResponse is a placeholder. For now this receiver returns an empty sapm response.
// This defaultResponse is an optimization so we don't have to proto.Marshal the response
// for every request. At some point this may be removed when there is actual content to return.
defaultResponse []byte
obsrecv *receiverhelper.ObsReport
}
// handleRequest parses an http request containing sapm and passes the trace data to the next consumer
func (sr *sapmReceiver) handleRequest(req *http.Request) error {
sapm, err := sapmprotocol.ParseTraceV2Request(req)
// errors processing the request should return http.StatusBadRequest
if err != nil {
return err
}
ctx := sr.obsrecv.StartTracesOp(req.Context())
td, err := jaeger.ProtoToTraces(sapm.Batches)
if err != nil {
return err
}
// pass the trace data to the next consumer
err = sr.nextConsumer.ConsumeTraces(ctx, td)
if err != nil {
err = fmt.Errorf("error passing trace data to next consumer: %w", err)
}
sr.obsrecv.EndTracesOp(ctx, "protobuf", td.SpanCount(), err)
return err
}
// HTTPHandlerFunc returns an http.HandlerFunc that handles SAPM requests
func (sr *sapmReceiver) HTTPHandlerFunc(rw http.ResponseWriter, req *http.Request) {
// handle the request payload
err := sr.handleRequest(req)
if err != nil {
errorutil.HTTPError(rw, err)
return
}
// respBytes are bytes to write to the http.Response
// build the response message
// NOTE currently the response is an empty struct. As an optimization this receiver will pass a
// byte array that was generated in the receiver's constructor. If this receiver needs to return
// more than an empty struct, then the sapm.PostSpansResponse{} struct will need to be marshaled
// and on error a http.StatusInternalServerError should be written to the http.ResponseWriter and
// this function should immediately return.
respBytes := sr.defaultResponse
rw.Header().Set(sapmprotocol.ContentTypeHeaderName, sapmprotocol.ContentTypeHeaderValue)
// write the response if client does not accept gzip encoding
if req.Header.Get(sapmprotocol.AcceptEncodingHeaderName) != sapmprotocol.GZipEncodingHeaderValue {
// write the response bytes
_, err = rw.Write(respBytes)
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
}
return
}
// gzip the response
// get the gzip writer
writer := gzipWriterPool.Get().(*gzip.Writer)
defer gzipWriterPool.Put(writer)
var gzipBuffer bytes.Buffer
// reset the writer with the gzip buffer
writer.Reset(&gzipBuffer)
// gzip the responseBytes
_, err = writer.Write(respBytes)
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
return
}
// close the gzip writer and write gzip footer
err = writer.Close()
if err != nil {
rw.WriteHeader(http.StatusInternalServerError)
return
}
// write the successfully gzipped payload
rw.Header().Set(sapmprotocol.ContentEncodingHeaderName, sapmprotocol.GZipEncodingHeaderValue)
_, err = rw.Write(gzipBuffer.Bytes())
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
}
}
// Start starts the sapmReceiver's server.
func (sr *sapmReceiver) Start(ctx context.Context, host component.Host) error {
// server.Handler will be nil on initial call, otherwise noop.
if sr.server != nil && sr.server.Handler != nil {
return nil
}
// set up the listener
ln, err := sr.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", sr.config.Endpoint, err)
}
// use gorilla mux to create a router/handler
nr := mux.NewRouter()
nr.HandleFunc(sapmprotocol.TraceEndpointV2, sr.HTTPHandlerFunc)
// create a server with the handler
sr.server, err = sr.config.ServerConfig.ToServer(ctx, host, sr.settings, nr)
if err != nil {
return err
}
sr.shutdownWG.Add(1)
// run the server on a routine
go func() {
defer sr.shutdownWG.Done()
if errHTTP := sr.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown stops the the sapmReceiver's server.
func (sr *sapmReceiver) Shutdown(context.Context) error {
if sr.server == nil {
return nil
}
err := sr.server.Close()
sr.shutdownWG.Wait()
return err
}
// this validates at compile time that sapmReceiver implements the receiver.Traces interface
var _ receiver.Traces = (*sapmReceiver)(nil)
// newReceiver creates a sapmReceiver that receives SAPM over http
func newReceiver(
params receiver.Settings,
config *Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
// build the response message
defaultResponse := &splunksapm.PostSpansResponse{}
defaultResponseBytes, err := defaultResponse.Marshal()
if err != nil {
return nil, fmt.Errorf("failed to marshal default response body for %v receiver: %w", params.ID, err)
}
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: params.ID,
Transport: transport,
ReceiverCreateSettings: params,
})
if err != nil {
return nil, err
}
return &sapmReceiver{
settings: params.TelemetrySettings,
config: config,
nextConsumer: nextConsumer,
defaultResponse: defaultResponseBytes,
obsrecv: obsrecv,
}, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"errors"
"go.opentelemetry.io/collector/config/confighttp"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
var errEmptyEndpoint = errors.New("empty endpoint")
// Config defines configuration for the SignalFx receiver.
type Config struct {
confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct
// Deprecated: `access_token_passthrough` is deprecated.
// Please enable include_metadata in the receiver and add the following config to the batch processor:
// batch:
// metadata_keys: [X-Sf-Token]
splunk.AccessTokenPassthroughConfig `mapstructure:",squash"`
}
// Validate verifies that the endpoint is valid and the configured port is not 0
func (rCfg *Config) Validate() error {
if rCfg.ServerConfig.Endpoint == "" {
return errEmptyEndpoint
}
_, err := extractPortFromEndpoint(rCfg.ServerConfig.Endpoint)
if err != nil {
return err
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"sync"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata"
)
// This file implements factory for SignalFx receiver.
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:9943"
)
// NewFactory creates a factory for SignalFx receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createLogsReceiver, metadata.LogsStability))
}
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
}
}
// extract the port number from string in "address:port" format. If the
// port number cannot be extracted returns an error.
func extractPortFromEndpoint(endpoint string) (int, error) {
_, portStr, err := net.SplitHostPort(endpoint)
if err != nil {
return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err)
}
port, err := strconv.ParseInt(portStr, 10, 0)
if err != nil {
return 0, fmt.Errorf("endpoint port is not a number: %w", err)
}
if port < 1 || port > 65535 {
return 0, errors.New("port number must be between 1 and 65535")
}
return int(port), nil
}
// createMetricsReceiver creates a metrics receiver based on provided config.
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
rCfg := cfg.(*Config)
if rCfg.AccessTokenPassthrough {
params.Logger.Warn(
"access_token_passthrough is deprecated. " +
"Please enable include_metadata in the receiver and add " +
"`metadata_keys: [X-Sf-Token]` to the batch processor",
)
}
receiverLock.Lock()
r := receivers[rCfg]
if r == nil {
var err error
r, err = newReceiver(params, *rCfg)
if err != nil {
return nil, err
}
receivers[rCfg] = r
}
receiverLock.Unlock()
r.RegisterMetricsConsumer(consumer)
return r, nil
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
rCfg := cfg.(*Config)
if rCfg.AccessTokenPassthrough {
params.Logger.Warn(
"access_token_passthrough is deprecated. " +
"Please enable include_metadata in the receiver and add " +
"`metadata_keys: [X-Sf-Token]` to the batch processor",
)
}
receiverLock.Lock()
r := receivers[rCfg]
if r == nil {
var err error
r, err = newReceiver(params, *rCfg)
if err != nil {
return nil, err
}
receivers[rCfg] = r
}
receiverLock.Unlock()
r.RegisterLogsConsumer(consumer)
return r, nil
}
var (
receiverLock sync.Mutex
receivers = map[*Config]*sfxReceiver{}
)
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
"unsafe"
"github.com/gorilla/mux"
sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata"
)
const (
defaultServerTimeout = 20 * time.Second
responseOK = "OK"
responseInvalidMethod = "Only \"POST\" method is supported"
responseEventsInvalidContentType = "\"Content-Type\" must be \"application/x-protobuf\""
responseInvalidContentType = "\"Content-Type\" must be either \"application/x-protobuf\" or \"application/x-protobuf;format=otlp\""
responseInvalidEncoding = "\"Content-Encoding\" must be \"gzip\" or empty"
responseErrGzipReader = "Error on gzip body"
responseErrReadBody = "Failed to read message body"
responseErrUnmarshalBody = "Failed to unmarshal message body"
responseErrNextConsumer = "Internal Server Error"
responseErrLogsNotConfigured = "Log pipeline has not been configured to handle events"
responseErrMetricsNotConfigured = "Metric pipeline has not been configured to handle datapoints"
// Centralizing some HTTP and related string constants.
protobufContentType = "application/x-protobuf"
otlpProtobufContentType = "application/x-protobuf;format=otlp"
gzipEncoding = "gzip"
httpContentTypeHeader = "Content-Type"
httpContentEncodingHeader = "Content-Encoding"
)
var (
okRespBody = initJSONResponse(responseOK)
invalidMethodRespBody = initJSONResponse(responseInvalidMethod)
invalidContentRespBody = initJSONResponse(responseInvalidContentType)
invalidEventsContentRespBody = initJSONResponse(responseEventsInvalidContentType)
invalidEncodingRespBody = initJSONResponse(responseInvalidEncoding)
errGzipReaderRespBody = initJSONResponse(responseErrGzipReader)
errReadBodyRespBody = initJSONResponse(responseErrReadBody)
errUnmarshalBodyRespBody = initJSONResponse(responseErrUnmarshalBody)
errNextConsumerRespBody = initJSONResponse(responseErrNextConsumer)
errLogsNotConfigured = initJSONResponse(responseErrLogsNotConfigured)
errMetricsNotConfigured = initJSONResponse(responseErrMetricsNotConfigured)
translator = &signalfx.ToTranslator{}
)
// sfxReceiver implements the receiver.Metrics for SignalFx metric protocol.
type sfxReceiver struct {
settings receiver.Settings
config *Config
metricsConsumer consumer.Metrics
logsConsumer consumer.Logs
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
}
var _ receiver.Metrics = (*sfxReceiver)(nil)
// New creates the SignalFx receiver with the given configuration.
func newReceiver(
settings receiver.Settings,
config Config,
) (*sfxReceiver, error) {
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: settings.ID,
Transport: transport,
ReceiverCreateSettings: settings,
})
if err != nil {
return nil, err
}
r := &sfxReceiver{
settings: settings,
config: &config,
obsrecv: obsrecv,
}
return r, nil
}
func (r *sfxReceiver) RegisterMetricsConsumer(mc consumer.Metrics) {
r.metricsConsumer = mc
}
func (r *sfxReceiver) RegisterLogsConsumer(lc consumer.Logs) {
r.logsConsumer = lc
}
// Start tells the receiver to start its processing.
// By convention the consumer of the received data is set when the receiver
// instance is created.
func (r *sfxReceiver) Start(ctx context.Context, host component.Host) error {
if r.server != nil {
return nil
}
// set up the listener
ln, err := r.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err)
}
mx := mux.NewRouter()
mx.HandleFunc("/v2/datapoint", r.handleDatapointReq)
mx.HandleFunc("/v2/event", r.handleEventReq)
r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx)
if err != nil {
return err
}
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
r.server.ReadHeaderTimeout = defaultServerTimeout
r.server.WriteTimeout = defaultServerTimeout
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown tells the receiver that should stop reception,
// giving it a chance to perform any necessary clean-up.
func (r *sfxReceiver) Shutdown(context.Context) error {
if r.server == nil {
return nil
}
err := r.server.Close()
r.shutdownWG.Wait()
return err
}
func (r *sfxReceiver) readBody(ctx context.Context, resp http.ResponseWriter, req *http.Request) ([]byte, bool) {
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, nil)
return nil, false
}
bodyReader := req.Body
if encoding == gzipEncoding {
var err error
bodyReader, err = gzip.NewReader(bodyReader)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errGzipReaderRespBody, err)
return nil, false
}
}
body, err := io.ReadAll(bodyReader)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errReadBodyRespBody, err)
return nil, false
}
return body, true
}
func (r *sfxReceiver) writeResponse(ctx context.Context, resp http.ResponseWriter, err error) {
if err != nil {
r.failRequest(ctx, resp, errorutil.GetHTTPStatusCodeFromError(err), errNextConsumerRespBody, err)
return
}
resp.WriteHeader(http.StatusOK)
_, err = resp.Write(okRespBody)
if err != nil {
r.failRequest(ctx, resp, http.StatusInternalServerError, errNextConsumerRespBody, err)
}
}
func (r *sfxReceiver) handleDatapointReq(resp http.ResponseWriter, req *http.Request) {
ctx := r.obsrecv.StartMetricsOp(req.Context())
if r.metricsConsumer == nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errMetricsNotConfigured, nil)
return
}
if req.Method != http.MethodPost {
r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil)
return
}
otlpFormat := false
switch req.Header.Get(httpContentTypeHeader) {
case protobufContentType:
case otlpProtobufContentType:
otlpFormat = true
default:
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidContentRespBody, nil)
return
}
body, ok := r.readBody(ctx, resp, req)
if !ok {
return
}
r.settings.Logger.Debug("Handling metrics data")
var md pmetric.Metrics
if otlpFormat {
r.settings.Logger.Debug("Received request is in OTLP format")
otlpreq := pmetricotlp.NewExportRequest()
if err := otlpreq.UnmarshalProto(body); err != nil {
r.settings.Logger.Debug("OTLP data unmarshalling failed", zap.Error(err))
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
md = otlpreq.Metrics()
} else {
msg := &sfxpb.DataPointUploadMessage{}
err := msg.Unmarshal(body)
if err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
md, err = translator.ToMetrics(msg.Datapoints)
if err != nil {
r.settings.Logger.Debug("SignalFx conversion error", zap.Error(err))
}
}
dataPointCount := md.DataPointCount()
if dataPointCount == 0 {
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil)
_, _ = resp.Write(okRespBody)
return
}
r.addAccessTokenLabel(md, req)
err := r.metricsConsumer.ConsumeMetrics(ctx, md)
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), dataPointCount, err)
r.writeResponse(ctx, resp, err)
}
func (r *sfxReceiver) handleEventReq(resp http.ResponseWriter, req *http.Request) {
ctx := r.obsrecv.StartMetricsOp(req.Context())
if r.logsConsumer == nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errLogsNotConfigured, nil)
return
}
if req.Method != http.MethodPost {
r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil)
return
}
if req.Header.Get(httpContentTypeHeader) != protobufContentType {
r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEventsContentRespBody, nil)
return
}
body, ok := r.readBody(ctx, resp, req)
if !ok {
return
}
msg := &sfxpb.EventUploadMessage{}
if err := msg.Unmarshal(body); err != nil {
r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
if len(msg.Events) == 0 {
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil)
_, _ = resp.Write(okRespBody)
return
}
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
sl := rl.ScopeLogs().AppendEmpty()
signalFxV2EventsToLogRecords(msg.Events, sl.LogRecords())
if r.config.AccessTokenPassthrough {
if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" {
rl.Resource().Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken)
}
}
err := r.logsConsumer.ConsumeLogs(ctx, ld)
r.obsrecv.EndMetricsOp(
ctx,
metadata.Type.String(),
len(msg.Events),
err)
r.writeResponse(ctx, resp, err)
}
func (r *sfxReceiver) failRequest(
ctx context.Context,
resp http.ResponseWriter,
httpStatusCode int,
jsonResponse []byte,
err error,
) {
resp.WriteHeader(httpStatusCode)
if len(jsonResponse) > 0 {
// The response needs to be written as a JSON string.
_, writeErr := resp.Write(jsonResponse)
if writeErr != nil {
r.settings.Logger.Warn(
"Error writing HTTP response message",
zap.Error(writeErr),
zap.String("receiver", r.settings.ID.String()))
}
}
// Use the same pattern as strings.Builder String().
msg := *(*string)(unsafe.Pointer(&jsonResponse))
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, err)
r.settings.Logger.Debug(
"SignalFx receiver request failed",
zap.Int("http_status_code", httpStatusCode),
zap.String("msg", msg),
zap.Error(err), // It handles nil error
)
}
func (r *sfxReceiver) addAccessTokenLabel(md pmetric.Metrics, req *http.Request) {
if r.config.AccessTokenPassthrough {
if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" {
for i := 0; i < md.ResourceMetrics().Len(); i++ {
rm := md.ResourceMetrics().At(i)
res := rm.Resource()
res.Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken)
}
}
}
}
func initJSONResponse(s string) []byte {
respBody, err := json.Marshal(s)
if err != nil {
// This is to be used in initialization so panic here is fine.
panic(err)
}
return respBody
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver"
import (
sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
// signalFxV2ToMetricsData converts SignalFx event proto data points to
// plog.LogRecordSlice. Returning the converted data and the number of dropped log
// records.
func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs plog.LogRecordSlice) {
lrs.EnsureCapacity(len(events))
for _, event := range events {
lr := lrs.AppendEmpty()
attrs := lr.Attributes()
attrs.EnsureCapacity(2 + len(event.Dimensions) + len(event.Properties))
for _, dim := range event.Dimensions {
attrs.PutStr(dim.Key, dim.Value)
}
// The EventType field is stored as an attribute.
eventType := event.EventType
if eventType == "" {
eventType = "unknown"
}
attrs.PutStr(splunk.SFxEventType, eventType)
// SignalFx timestamps are in millis so convert to nanos by multiplying
// by 1 million.
lr.SetTimestamp(pcommon.Timestamp(event.Timestamp * 1e6))
if event.Category != nil {
attrs.PutInt(splunk.SFxEventCategoryKey, int64(*event.Category))
} else {
// This gives us an unambiguous way of determining that a log record
// represents a SignalFx event, even if category is missing from the
// event.
attrs.PutEmpty(splunk.SFxEventCategoryKey)
}
if len(event.Properties) > 0 {
propMap := attrs.PutEmptyMap(splunk.SFxEventPropertiesKey)
propMap.EnsureCapacity(len(event.Properties))
for _, prop := range event.Properties {
// No way to tell what value type is without testing each
// individually.
switch {
case prop.Value.StrValue != nil:
propMap.PutStr(prop.Key, prop.Value.GetStrValue())
case prop.Value.IntValue != nil:
propMap.PutInt(prop.Key, prop.Value.GetIntValue())
case prop.Value.DoubleValue != nil:
propMap.PutDouble(prop.Key, prop.Value.GetDoubleValue())
case prop.Value.BoolValue != nil:
propMap.PutBool(prop.Key, prop.Value.GetBoolValue())
default:
// If there is no property value, just insert a null to
// record that the key was present.
propMap.PutEmpty(prop.Key)
}
}
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
conventions "go.opentelemetry.io/collector/semconv/v1.27.0"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata"
)
// This file implements factory for Splunk HEC receiver.
const (
// Default endpoint to bind to.
defaultEndpoint = "localhost:8088"
)
// NewFactory creates a factory for Splunk HEC receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),
receiver.WithLogs(createLogsReceiver, metadata.LogsStability))
}
// CreateDefaultConfig creates the default configuration for Splunk HEC receiver.
func createDefaultConfig() component.Config {
return &Config{
ServerConfig: confighttp.ServerConfig{
Endpoint: defaultEndpoint,
},
AccessTokenPassthroughConfig: splunk.AccessTokenPassthroughConfig{},
HecToOtelAttrs: splunk.HecToOtelAttrs{
Source: splunk.DefaultSourceLabel,
SourceType: splunk.DefaultSourceTypeLabel,
Index: splunk.DefaultIndexLabel,
Host: conventions.AttributeHostName,
},
RawPath: splunk.DefaultRawPath,
HealthPath: splunk.DefaultHealthPath,
Ack: Ack{
Extension: nil,
Path: splunk.DefaultAckPath,
},
Splitting: SplittingStrategyLine,
}
}
// CreateMetrics creates a metrics receiver based on provided config.
func createMetricsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
var err error
var recv receiver.Metrics
rCfg := cfg.(*Config)
r := receivers.GetOrAdd(cfg, func() component.Component {
recv, err = newReceiver(params, *rCfg)
return recv
})
if err != nil {
return nil, err
}
r.Unwrap().(*splunkReceiver).metricsConsumer = consumer
return r, nil
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
var err error
var recv receiver.Logs
rCfg := cfg.(*Config)
r := receivers.GetOrAdd(cfg, func() component.Component {
recv, err = newReceiver(params, *rCfg)
return recv
})
if err != nil {
return nil, err
}
r.Unwrap().(*splunkReceiver).logsConsumer = consumer
return r, nil
}
var receivers = sharedcomponent.NewSharedComponents()
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"compress/gzip"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
jsoniter "github.com/json-iterator/go"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata"
)
const (
defaultServerTimeout = 20 * time.Second
ackResponse = `{"acks": %s}`
responseOK = `{"text": "Success", "code": 0}`
responseOKWithAckID = `{"text": "Success", "code": 0, "ackId": %d}`
responseHecHealthy = `{"text": "HEC is healthy", "code": 17}`
responseInvalidMethodPostOnly = `"Only \"POST\" method is supported"`
responseInvalidEncoding = `"\"Content-Encoding\" must be \"gzip\" or empty"`
responseInvalidDataFormat = `{"text":"Invalid data format","code":6}`
responseErrEventRequired = `{"text":"Event field is required","code":12}`
responseErrEventBlank = `{"text":"Event field cannot be blank","code":13}`
responseErrGzipReader = `"Error on gzip body"`
responseErrUnmarshalBody = `"Failed to unmarshal message body"`
responseErrInternalServerError = `"Internal Server Error"`
responseErrUnsupportedMetricEvent = `"Unsupported metric event"`
responseErrUnsupportedLogEvent = `"Unsupported log event"`
responseErrHandlingIndexedFields = `{"text":"Error in handling indexed fields","code":15,"invalid-event-number":%d}`
responseErrDataChannelMissing = `{"text": "Data channel is missing","code":10}`
responseErrInvalidDataChannel = `{"text": "Invalid data channel", "code": 11}`
responseNoData = `{"text":"No data","code":5}`
// Centralizing some HTTP and related string constants.
gzipEncoding = "gzip"
httpContentEncodingHeader = "Content-Encoding"
httpContentTypeHeader = "Content-Type"
httpJSONTypeHeader = "application/json"
)
var (
errEmptyEndpoint = errors.New("empty endpoint")
errInvalidMethod = errors.New("invalid http method")
errInvalidEncoding = errors.New("invalid encoding")
errExtensionMissing = errors.New("ack extension not found")
okRespBody = []byte(responseOK)
eventRequiredRespBody = []byte(responseErrEventRequired)
eventBlankRespBody = []byte(responseErrEventBlank)
requiredDataChannelHeader = []byte(responseErrDataChannelMissing)
invalidEncodingRespBody = []byte(responseInvalidEncoding)
invalidFormatRespBody = []byte(responseInvalidDataFormat)
invalidMethodRespBodyPostOnly = []byte(responseInvalidMethodPostOnly)
errGzipReaderRespBody = []byte(responseErrGzipReader)
errUnmarshalBodyRespBody = []byte(responseErrUnmarshalBody)
errInternalServerError = []byte(responseErrInternalServerError)
errUnsupportedMetricEvent = []byte(responseErrUnsupportedMetricEvent)
errUnsupportedLogEvent = []byte(responseErrUnsupportedLogEvent)
noDataRespBody = []byte(responseNoData)
)
// splunkReceiver implements the receiver.Metrics for Splunk HEC metric protocol.
type splunkReceiver struct {
settings receiver.Settings
config *Config
logsConsumer consumer.Logs
metricsConsumer consumer.Metrics
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
gzipReaderPool *sync.Pool
ackExt ackextension.AckExtension
}
var (
_ receiver.Metrics = (*splunkReceiver)(nil)
_ receiver.Logs = (*splunkReceiver)(nil)
)
// newReceiver creates the Splunk HEC receiver with the given configuration.
func newReceiver(settings receiver.Settings, config Config) (*splunkReceiver, error) {
if config.Endpoint == "" {
return nil, errEmptyEndpoint
}
transport := "http"
if config.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: settings.ID,
Transport: transport,
ReceiverCreateSettings: settings,
})
if err != nil {
return nil, err
}
r := &splunkReceiver{
settings: settings,
config: &config,
server: &http.Server{
Addr: config.Endpoint,
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
ReadHeaderTimeout: defaultServerTimeout,
WriteTimeout: defaultServerTimeout,
},
obsrecv: obsrecv,
gzipReaderPool: &sync.Pool{New: func() any { return new(gzip.Reader) }},
}
return r, nil
}
// Start tells the receiver to start its processing.
// By convention the consumer of the received data is set when the receiver
// instance is created.
func (r *splunkReceiver) Start(ctx context.Context, host component.Host) error {
// server.Handler will be nil on initial call, otherwise noop.
if r.server != nil && r.server.Handler != nil {
return nil
}
mx := mux.NewRouter()
// set up the ack API handler if the ack extension is present
if r.config.Ack.Extension != nil {
if ext, found := host.GetExtensions()[*r.config.Ack.Extension]; found {
r.ackExt = ext.(ackextension.AckExtension)
mx.NewRoute().Path(r.config.Ack.Path).HandlerFunc(r.handleAck)
} else {
return fmt.Errorf("specified ack extension with id %q could not be found", *r.config.Ack.Extension)
}
}
mx.NewRoute().Path(r.config.HealthPath).HandlerFunc(r.handleHealthReq)
mx.NewRoute().Path(r.config.HealthPath + "/1.0").HandlerFunc(r.handleHealthReq).Methods(http.MethodGet)
if r.logsConsumer != nil {
mx.NewRoute().Path(r.config.RawPath).HandlerFunc(r.handleRawReq)
}
mx.NewRoute().HandlerFunc(r.handleReq)
// set up the listener
ln, err := r.config.ServerConfig.ToListener(ctx)
if err != nil {
return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err)
}
r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx)
if err != nil {
return err
}
// TODO: Evaluate what properties should be configurable, for now
// set some hard-coded values.
r.server.ReadHeaderTimeout = defaultServerTimeout
r.server.WriteTimeout = defaultServerTimeout
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return err
}
// Shutdown tells the receiver that should stop reception,
// giving it a chance to perform any necessary clean-up.
func (r *splunkReceiver) Shutdown(context.Context) error {
err := r.server.Close()
r.shutdownWG.Wait()
return err
}
func (r *splunkReceiver) processSuccessResponseWithAck(resp http.ResponseWriter, channelID string) error {
if r.ackExt == nil {
panic("writing response with ack when ack extension is not configured")
}
ackID := r.ackExt.ProcessEvent(channelID)
r.ackExt.Ack(channelID, ackID)
return r.processSuccessResponse(resp, []byte(fmt.Sprintf(responseOKWithAckID, ackID)))
}
func (r *splunkReceiver) processSuccessResponse(resp http.ResponseWriter, bodyContent []byte) error {
resp.Header().Set(httpContentTypeHeader, httpJSONTypeHeader)
resp.WriteHeader(http.StatusOK)
_, err := resp.Write(bodyContent)
return err
}
func (r *splunkReceiver) handleAck(resp http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
// shouldn't run into this case since we only enable this handler IF ackExt exists. But we have this check just in case
if r.ackExt == nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, errExtensionMissing)
return
}
var channelID string
var extracted bool
if channelID, extracted = r.extractChannel(req); extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
} else {
r.failRequest(resp, http.StatusBadRequest, requiredDataChannelHeader, nil)
return
}
dec := json.NewDecoder(req.Body)
var ackRequest splunk.AckRequest
err := dec.Decode(&ackRequest)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
if len(ackRequest.Acks) == 0 {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, errors.New("request body must include at least one ackID to be queried"))
return
}
queriedAcks := r.ackExt.QueryAcks(channelID, ackRequest.Acks)
ackString, _ := json.Marshal(queriedAcks)
if err := r.processSuccessResponse(resp, []byte(fmt.Sprintf(ackResponse, ackString))); err != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
}
}
func (r *splunkReceiver) handleRawReq(resp http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx = r.obsrecv.StartLogsOp(ctx)
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding)
return
}
var channelID string
var extracted bool
if channelID, extracted = r.extractChannel(req); extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
}
if req.ContentLength == 0 {
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil)
r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil)
return
}
bodyReader := req.Body
if encoding == gzipEncoding {
reader := r.gzipReaderPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err)
_, _ = io.ReadAll(req.Body)
_ = req.Body.Close()
return
}
bodyReader = reader
defer r.gzipReaderPool.Put(reader)
}
resourceCustomizer := r.createResourceCustomizer(req)
query := req.URL.Query()
var timestamp pcommon.Timestamp
if query.Has(queryTime) {
t, err := strconv.ParseInt(query.Get(queryTime), 10, 64)
if t < 0 {
err = errors.New("time cannot be less than 0")
}
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
timestamp = pcommon.NewTimestampFromTime(time.Unix(t, 0))
}
ld, slLen, err := splunkHecRawToLogData(bodyReader, query, resourceCustomizer, r.config, timestamp)
if err != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
return
}
consumerErr := r.logsConsumer.ConsumeLogs(ctx, ld)
_ = bodyReader.Close()
if consumerErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, consumerErr)
} else {
var ackErr error
if len(channelID) > 0 && r.ackExt != nil {
ackErr = r.processSuccessResponseWithAck(resp, channelID)
} else {
ackErr = r.processSuccessResponse(resp, okRespBody)
}
if ackErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err)
} else {
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), slLen, nil)
}
}
}
func (r *splunkReceiver) extractChannel(req *http.Request) (string, bool) {
// check header
for k, v := range req.Header {
if strings.EqualFold(k, splunk.HTTPSplunkChannelHeader) {
return strings.ToUpper(v[0]), true
}
}
// check query param
for k, v := range req.URL.Query() {
if strings.EqualFold(k, "channel") {
return strings.ToUpper(v[0]), true
}
}
return "", false
}
func (r *splunkReceiver) validateChannelHeader(channelID string) error {
if len(channelID) == 0 {
return errors.New(responseErrDataChannelMissing)
}
// channel id must be a valid uuid
// https://docs.splunk.com/Documentation/Splunk/9.2.1/Data/AboutHECIDXAck#:~:text=close%20the%20file.-,About%20channels%20and%20sending%20data,-Sending%20events%20to
_, err := uuid.Parse(channelID)
if err != nil {
return errors.New(responseErrInvalidDataChannel)
}
return nil
}
func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if req.Method != http.MethodPost {
r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod)
return
}
encoding := req.Header.Get(httpContentEncodingHeader)
if encoding != "" && encoding != gzipEncoding {
r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding)
return
}
channelID, extracted := r.extractChannel(req)
if extracted {
if channelErr := r.validateChannelHeader(channelID); channelErr != nil {
r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr)
return
}
}
bodyReader := req.Body
if encoding == gzipEncoding {
reader := r.gzipReaderPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err)
return
}
bodyReader = reader
defer r.gzipReaderPool.Put(reader)
}
if req.ContentLength == 0 {
r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil)
return
}
dec := jsoniter.NewDecoder(bodyReader)
var events []*splunk.Event
var metricEvents []*splunk.Event
for dec.More() {
var msg splunk.Event
err := dec.Decode(&msg)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err)
return
}
if msg.Event == nil {
r.failRequest(resp, http.StatusBadRequest, eventRequiredRespBody, nil)
return
}
if msg.Event == "" {
r.failRequest(resp, http.StatusBadRequest, eventBlankRespBody, nil)
return
}
for _, v := range msg.Fields {
if !isFlatJSONField(v) {
r.failRequest(resp, http.StatusBadRequest, []byte(fmt.Sprintf(responseErrHandlingIndexedFields, len(events)+len(metricEvents))), nil)
return
}
}
if msg.IsMetric() {
if r.metricsConsumer == nil {
r.failRequest(resp, http.StatusBadRequest, errUnsupportedMetricEvent, err)
return
}
metricEvents = append(metricEvents, &msg)
} else {
if r.logsConsumer == nil {
r.failRequest(resp, http.StatusBadRequest, errUnsupportedLogEvent, err)
return
}
events = append(events, &msg)
}
}
resourceCustomizer := r.createResourceCustomizer(req)
if r.logsConsumer != nil && len(events) > 0 {
ld, err := splunkHecToLogData(r.settings.Logger, events, resourceCustomizer, r.config)
if err != nil {
r.failRequest(resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err)
return
}
ctx = r.obsrecv.StartLogsOp(ctx)
decodeErr := r.logsConsumer.ConsumeLogs(ctx, ld)
r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), len(events), nil)
if decodeErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr)
return
}
}
if r.metricsConsumer != nil && len(metricEvents) > 0 {
md, _ := splunkHecToMetricsData(r.settings.Logger, metricEvents, resourceCustomizer, r.config)
ctx = r.obsrecv.StartMetricsOp(ctx)
decodeErr := r.metricsConsumer.ConsumeMetrics(ctx, md)
r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), len(metricEvents), nil)
if decodeErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr)
return
}
}
var ackErr error
if len(channelID) > 0 && r.ackExt != nil {
ackErr = r.processSuccessResponseWithAck(resp, channelID)
} else {
ackErr = r.processSuccessResponse(resp, okRespBody)
}
if ackErr != nil {
r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, ackErr)
}
}
func (r *splunkReceiver) createResourceCustomizer(req *http.Request) func(resource pcommon.Resource) {
if r.config.AccessTokenPassthrough {
accessToken := req.Header.Get("Authorization")
if strings.HasPrefix(accessToken, splunk.HECTokenHeader+" ") {
accessTokenValue := accessToken[len(splunk.HECTokenHeader)+1:]
return func(resource pcommon.Resource) {
resource.Attributes().PutStr(splunk.HecTokenLabel, accessTokenValue)
}
}
}
return nil
}
func (r *splunkReceiver) failRequest(
resp http.ResponseWriter,
httpStatusCode int,
jsonResponse []byte,
err error,
) {
resp.WriteHeader(httpStatusCode)
if len(jsonResponse) > 0 {
// The response needs to be written as a JSON string.
resp.Header().Add("Content-Type", "application/json")
_, writeErr := resp.Write(jsonResponse)
if writeErr != nil {
r.settings.Logger.Warn("Error writing HTTP response message", zap.Error(writeErr))
}
}
if r.settings.Logger.Core().Enabled(zap.DebugLevel) {
msg := string(jsonResponse)
r.settings.Logger.Debug(
"Splunk HEC receiver request failed",
zap.Int("http_status_code", httpStatusCode),
zap.String("msg", msg),
zap.Error(err), // It handles nil error
)
}
}
func (r *splunkReceiver) handleHealthReq(writer http.ResponseWriter, _ *http.Request) {
writer.Header().Add("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write([]byte(responseHecHealthy))
}
func isFlatJSONField(field any) bool {
switch value := field.(type) {
case map[string]any:
return false
case []any:
for _, v := range value {
switch v.(type) {
case map[string]any, []any:
return false
}
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"bufio"
"errors"
"io"
"net/url"
"sort"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
const (
// splunk metadata
index = "index"
source = "source"
sourcetype = "sourcetype"
host = "host"
queryTime = "time"
)
var errCannotConvertValue = errors.New("cannot convert field value to attribute")
// splunkHecToLogData transforms splunk events into logs
func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (plog.Logs, error) {
ld := plog.NewLogs()
scopeLogsMap := make(map[[4]string]plog.ScopeLogs)
for _, event := range events {
key := [4]string{event.Host, event.Source, event.SourceType, event.Index}
var sl plog.ScopeLogs
var found bool
if sl, found = scopeLogsMap[key]; !found {
rl := ld.ResourceLogs().AppendEmpty()
sl = rl.ScopeLogs().AppendEmpty()
scopeLogsMap[key] = sl
appendSplunkMetadata(rl, config.HecToOtelAttrs, event.Host, event.Source, event.SourceType, event.Index)
if resourceCustomizer != nil {
resourceCustomizer(rl.Resource())
}
}
// The SourceType field is the most logical "name" of the event.
logRecord := sl.LogRecords().AppendEmpty()
if err := convertToValue(logger, event.Event, logRecord.Body()); err != nil {
return ld, err
}
// Splunk timestamps are in seconds so convert to nanos by multiplying
// by 1 billion.
logRecord.SetTimestamp(pcommon.Timestamp(event.Time * 1e9))
// Set event fields first, so the specialized attributes overwrite them if needed.
keys := make([]string, 0, len(event.Fields))
for k := range event.Fields {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
val := event.Fields[key]
err := convertToValue(logger, val, logRecord.Attributes().PutEmpty(key))
if err != nil {
return ld, err
}
}
}
return ld, nil
}
// splunkHecRawToLogData transforms raw splunk event into log
func splunkHecRawToLogData(bodyReader io.Reader, query url.Values, resourceCustomizer func(pcommon.Resource), config *Config, timestamp pcommon.Timestamp) (plog.Logs, int, error) {
ld := plog.NewLogs()
rl := ld.ResourceLogs().AppendEmpty()
appendSplunkMetadata(rl, config.HecToOtelAttrs, query.Get(host), query.Get(source), query.Get(sourcetype), query.Get(index))
if resourceCustomizer != nil {
resourceCustomizer(rl.Resource())
}
sl := rl.ScopeLogs().AppendEmpty()
if config.Splitting == SplittingStrategyNone {
b, err := io.ReadAll(bodyReader)
if err != nil {
return ld, 0, err
}
logRecord := sl.LogRecords().AppendEmpty()
logRecord.Body().SetStr(string(b))
logRecord.SetTimestamp(timestamp)
} else {
sc := bufio.NewScanner(bodyReader)
for sc.Scan() {
logRecord := sl.LogRecords().AppendEmpty()
logLine := sc.Text()
logRecord.Body().SetStr(logLine)
logRecord.SetTimestamp(timestamp)
}
}
return ld, sl.LogRecords().Len(), nil
}
func appendSplunkMetadata(rl plog.ResourceLogs, attrs splunk.HecToOtelAttrs, host, source, sourceType, index string) {
if host != "" {
rl.Resource().Attributes().PutStr(attrs.Host, host)
}
if source != "" {
rl.Resource().Attributes().PutStr(attrs.Source, source)
}
if sourceType != "" {
rl.Resource().Attributes().PutStr(attrs.SourceType, sourceType)
}
if index != "" {
rl.Resource().Attributes().PutStr(attrs.Index, index)
}
}
func convertToValue(logger *zap.Logger, src any, dest pcommon.Value) error {
switch value := src.(type) {
case nil:
case string:
dest.SetStr(value)
case int64:
dest.SetInt(value)
case float64:
dest.SetDouble(value)
case bool:
dest.SetBool(value)
case map[string]any:
return convertToAttributeMap(logger, value, dest)
case []any:
return convertToSliceVal(logger, value, dest)
default:
logger.Debug("Unsupported value conversion", zap.Any("value", src))
return errCannotConvertValue
}
return nil
}
func convertToSliceVal(logger *zap.Logger, value []any, dest pcommon.Value) error {
arr := dest.SetEmptySlice()
for _, elt := range value {
err := convertToValue(logger, elt, arr.AppendEmpty())
if err != nil {
return err
}
}
return nil
}
func convertToAttributeMap(logger *zap.Logger, value map[string]any, dest pcommon.Value) error {
attrMap := dest.SetEmptyMap()
keys := make([]string, 0, len(value))
for k := range value {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := value[k]
if err := convertToValue(logger, v, attrMap.PutEmpty(k)); err != nil {
return err
}
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver"
import (
"fmt"
"strconv"
"strings"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
// splunkHecToMetricsData converts Splunk HEC metric points to
// pmetric.Metrics. Returning the converted data and the number of
// dropped time series.
func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (pmetric.Metrics, int) {
numDroppedTimeSeries := 0
md := pmetric.NewMetrics()
scopeMetricsMap := make(map[[4]string]pmetric.ScopeMetrics)
for _, event := range events {
values := event.GetMetricValues()
labels := buildAttributes(event.Fields)
metrics := pmetric.NewMetricSlice()
for metricName, metricValue := range values {
pointTimestamp := convertTimestamp(event.Time)
metric := pmetric.NewMetric()
metric.SetName(metricName)
switch v := metricValue.(type) {
case int64:
addIntGauge(metrics, metricName, v, pointTimestamp, labels)
case *int64:
addIntGauge(metrics, metricName, *v, pointTimestamp, labels)
case float64:
addDoubleGauge(metrics, metricName, v, pointTimestamp, labels)
case *float64:
addDoubleGauge(metrics, metricName, *v, pointTimestamp, labels)
case string:
convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, v, labels)
case *string:
convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, *v, labels)
default:
// drop this point as we do not know how to extract a value from it
numDroppedTimeSeries++
logger.Debug("Cannot convert metric, unknown input type",
zap.String("metric", metricName))
}
}
if metrics.Len() == 0 {
continue
}
key := [4]string{event.Host, event.Source, event.SourceType, event.Index}
var sm pmetric.ScopeMetrics
var found bool
if sm, found = scopeMetricsMap[key]; !found {
resourceMetrics := md.ResourceMetrics().AppendEmpty()
sm = resourceMetrics.ScopeMetrics().AppendEmpty()
scopeMetricsMap[key] = sm
attrs := resourceMetrics.Resource().Attributes()
if event.Host != "" {
attrs.PutStr(config.HecToOtelAttrs.Host, event.Host)
}
if event.Source != "" {
attrs.PutStr(config.HecToOtelAttrs.Source, event.Source)
}
if event.SourceType != "" {
attrs.PutStr(config.HecToOtelAttrs.SourceType, event.SourceType)
}
if event.Index != "" {
attrs.PutStr(config.HecToOtelAttrs.Index, event.Index)
}
if resourceCustomizer != nil {
resourceCustomizer(resourceMetrics.Resource())
}
}
metrics.MoveAndAppendTo(sm.Metrics())
}
return md, numDroppedTimeSeries
}
func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pmetric.MetricSlice, metricName string, pointTimestamp pcommon.Timestamp, s string, attributes pcommon.Map) {
// best effort, cast to string and turn into a number
dbl, err := strconv.ParseFloat(s, 64)
if err != nil {
*numDroppedTimeSeries++
logger.Debug("Cannot convert metric value from string to number",
zap.String("metric", metricName))
} else {
addDoubleGauge(metrics, metricName, dbl, pointTimestamp, attributes)
}
}
func addIntGauge(metrics pmetric.MetricSlice, metricName string, value int64, ts pcommon.Timestamp, attributes pcommon.Map) {
metric := metrics.AppendEmpty()
metric.SetName(metricName)
intPt := metric.SetEmptyGauge().DataPoints().AppendEmpty()
intPt.SetTimestamp(ts)
intPt.SetIntValue(value)
attributes.CopyTo(intPt.Attributes())
}
func addDoubleGauge(metrics pmetric.MetricSlice, metricName string, value float64, ts pcommon.Timestamp, attributes pcommon.Map) {
metric := metrics.AppendEmpty()
metric.SetName(metricName)
doublePt := metric.SetEmptyGauge().DataPoints().AppendEmpty()
doublePt.SetTimestamp(ts)
doublePt.SetDoubleValue(value)
attributes.CopyTo(doublePt.Attributes())
}
func convertTimestamp(sec float64) pcommon.Timestamp {
return pcommon.Timestamp(sec * 1e9)
}
// Extract dimensions from the Splunk event fields to populate metric data point attributes.
func buildAttributes(dimensions map[string]any) pcommon.Map {
attributes := pcommon.NewMap()
attributes.EnsureCapacity(len(dimensions))
for key, val := range dimensions {
if strings.HasPrefix(key, "metric_name") || key == "_value" {
continue
}
if key == "" || val == nil {
// TODO: Log or metric for this odd ball?
continue
}
attributes.PutStr(key, fmt.Sprintf("%v", val))
}
return attributes
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"errors"
"time"
"go.opentelemetry.io/collector/config/confighttp"
"go.uber.org/multierr"
)
var (
errMissingEndpointFromConfig = errors.New("missing receiver server endpoint from config")
errReadTimeoutExceedsMaxValue = errors.New("the duration specified for read_timeout exceeds the maximum allowed value of 10s")
errWriteTimeoutExceedsMaxValue = errors.New("the duration specified for write_timeout exceeds the maximum allowed value of 10s")
errRequiredHeader = errors.New("both key and value are required to assign a required_header")
)
// Config defines configuration for the Generic Webhook receiver.
type Config struct {
confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct
ReadTimeout string `mapstructure:"read_timeout"` // wait time for reading request headers in ms. Default is 500ms.
WriteTimeout string `mapstructure:"write_timeout"` // wait time for writing request response in ms. Default is 500ms.
Path string `mapstructure:"path"` // path for data collection. Default is /events
HealthPath string `mapstructure:"health_path"` // path for health check api. Default is /health_check
RequiredHeader RequiredHeader `mapstructure:"required_header"` // optional setting to set a required header for all requests to have
}
type RequiredHeader struct {
Key string `mapstructure:"key"`
Value string `mapstructure:"value"`
}
func (cfg *Config) Validate() error {
var errs error
maxReadWriteTimeout, _ := time.ParseDuration("10s")
if cfg.ServerConfig.Endpoint == "" {
errs = multierr.Append(errs, errMissingEndpointFromConfig)
}
// If a user defines a custom read/write timeout there is a maximum value
// of 10s imposed here.
if cfg.ReadTimeout != "" {
readTimeout, err := time.ParseDuration(cfg.ReadTimeout)
if err != nil {
errs = multierr.Append(errs, err)
}
if readTimeout > maxReadWriteTimeout {
errs = multierr.Append(errs, errReadTimeoutExceedsMaxValue)
}
}
if cfg.WriteTimeout != "" {
writeTimeout, err := time.ParseDuration(cfg.WriteTimeout)
if err != nil {
errs = multierr.Append(errs, err)
}
if writeTimeout > maxReadWriteTimeout {
errs = multierr.Append(errs, errWriteTimeoutExceedsMaxValue)
}
}
if (cfg.RequiredHeader.Key != "" && cfg.RequiredHeader.Value == "") || (cfg.RequiredHeader.Value != "" && cfg.RequiredHeader.Key == "") {
errs = multierr.Append(errs, errRequiredHeader)
}
return errs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
var scopeLogName = "otlp/" + metadata.Type.String()
const (
// might add this later, for now I wish to require a valid
// endpoint to be declared by the user.
// Default endpoints to bind to.
// defaultEndpoint = "localhost:8080"
defaultReadTimeout = "500ms"
defaultWriteTimeout = "500ms"
defaultPath = "/events"
defaultHealthPath = "/health_check"
)
// NewFactory creates a factory for Generic Webhook Receiver.
func NewFactory() receiver.Factory {
return receiver.NewFactory(
metadata.Type,
createDefaultConfig,
receiver.WithLogs(createLogsReceiver, metadata.LogsStability),
)
}
// Default configuration for the generic webhook receiver
func createDefaultConfig() component.Config {
return &Config{
Path: defaultPath,
HealthPath: defaultHealthPath,
ReadTimeout: defaultReadTimeout,
WriteTimeout: defaultWriteTimeout,
}
}
// createLogsReceiver creates a logs receiver based on provided config.
func createLogsReceiver(
_ context.Context,
params receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
conf := cfg.(*Config)
return newLogsReceiver(params, *conf, consumer)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"bufio"
"compress/gzip"
"context"
"errors"
"io"
"net/http"
"sync"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/julienschmidt/httprouter"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/receiverhelper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
var (
errNilLogsConsumer = errors.New("missing a logs consumer")
errMissingEndpoint = errors.New("missing a receiver endpoint")
errInvalidRequestMethod = errors.New("invalid method. Valid method is POST")
errInvalidEncodingType = errors.New("invalid encoding type")
errEmptyResponseBody = errors.New("request body content length is zero")
errMissingRequiredHeader = errors.New("request was missing required header or incorrect header value")
)
const healthyResponse = `{"text": "Webhookevent receiver is healthy"}`
type eventReceiver struct {
settings receiver.Settings
cfg *Config
logConsumer consumer.Logs
server *http.Server
shutdownWG sync.WaitGroup
obsrecv *receiverhelper.ObsReport
gzipPool *sync.Pool
}
func newLogsReceiver(params receiver.Settings, cfg Config, consumer consumer.Logs) (receiver.Logs, error) {
if consumer == nil {
return nil, errNilLogsConsumer
}
if cfg.Endpoint == "" {
return nil, errMissingEndpoint
}
transport := "http"
if cfg.TLSSetting != nil {
transport = "https"
}
obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: params.ID,
Transport: transport,
ReceiverCreateSettings: params,
})
if err != nil {
return nil, err
}
// create eventReceiver instance
er := &eventReceiver{
settings: params,
cfg: &cfg,
logConsumer: consumer,
obsrecv: obsrecv,
gzipPool: &sync.Pool{New: func() any { return new(gzip.Reader) }},
}
return er, nil
}
// Start function manages receiver startup tasks. part of the receiver.Logs interface.
func (er *eventReceiver) Start(ctx context.Context, host component.Host) error {
// noop if not nil. if start has not been called before these values should be nil.
if er.server != nil && er.server.Handler != nil {
return nil
}
// create listener from config
ln, err := er.cfg.ServerConfig.ToListener(ctx)
if err != nil {
return err
}
// set up router.
router := httprouter.New()
router.POST(er.cfg.Path, er.handleReq)
router.GET(er.cfg.HealthPath, er.handleHealthCheck)
// webhook server standup and configuration
er.server, err = er.cfg.ServerConfig.ToServer(ctx, host, er.settings.TelemetrySettings, router)
if err != nil {
return err
}
readTimeout, err := time.ParseDuration(er.cfg.ReadTimeout)
if err != nil {
return err
}
writeTimeout, err := time.ParseDuration(er.cfg.WriteTimeout)
if err != nil {
return err
}
// set timeouts
er.server.ReadHeaderTimeout = readTimeout
er.server.WriteTimeout = writeTimeout
// shutdown
er.shutdownWG.Add(1)
go func() {
defer er.shutdownWG.Done()
if errHTTP := er.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Shutdown function manages receiver shutdown tasks. part of the receiver.Logs interface.
func (er *eventReceiver) Shutdown(_ context.Context) error {
// server must exist to be closed.
if er.server == nil {
return nil
}
err := er.server.Close()
er.shutdownWG.Wait()
return err
}
// handleReq handles incoming request from webhook. On success returns a 200 response code to the webhook
func (er *eventReceiver) handleReq(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
ctx := r.Context()
ctx = er.obsrecv.StartLogsOp(ctx)
if r.Method != http.MethodPost {
er.failBadReq(ctx, w, http.StatusBadRequest, errInvalidRequestMethod)
return
}
if er.cfg.RequiredHeader.Key != "" {
requiredHeaderValue := r.Header.Get(er.cfg.RequiredHeader.Key)
if requiredHeaderValue != er.cfg.RequiredHeader.Value {
er.failBadReq(ctx, w, http.StatusUnauthorized, errMissingRequiredHeader)
return
}
}
encoding := r.Header.Get("Content-Encoding")
// only support gzip if encoding header is set.
if encoding != "" && encoding != "gzip" {
er.failBadReq(ctx, w, http.StatusUnsupportedMediaType, errInvalidEncodingType)
return
}
if r.ContentLength == 0 {
er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil)
er.failBadReq(ctx, w, http.StatusBadRequest, errEmptyResponseBody)
}
bodyReader := r.Body
// gzip encoded case
if encoding == "gzip" || encoding == "x-gzip" {
reader := er.gzipPool.Get().(*gzip.Reader)
err := reader.Reset(bodyReader)
if err != nil {
er.failBadReq(ctx, w, http.StatusBadRequest, err)
_, _ = io.ReadAll(r.Body)
_ = r.Body.Close()
return
}
bodyReader = reader
defer er.gzipPool.Put(reader)
}
// send body into a scanner and then convert the request body into a log
sc := bufio.NewScanner(bodyReader)
ld, numLogs := reqToLog(sc, r.URL.Query(), er.cfg, er.settings)
consumerErr := er.logConsumer.ConsumeLogs(ctx, ld)
_ = bodyReader.Close()
if consumerErr != nil {
er.failBadReq(ctx, w, http.StatusInternalServerError, consumerErr)
} else {
w.WriteHeader(http.StatusOK)
}
er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), numLogs, consumerErr)
}
// Simple healthcheck endpoint.
func (er *eventReceiver) handleHealthCheck(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte(healthyResponse))
}
// write response on a failed/bad request. Generates a small json body based on the thrown by
// the handle func and the appropriate http status code. many webhooks will either log these responses or
// notify webhook users should a none 2xx code be detected.
func (er *eventReceiver) failBadReq(_ context.Context,
w http.ResponseWriter,
httpStatusCode int,
err error,
) {
jsonResp, err := jsoniter.Marshal(err.Error())
if err != nil {
er.settings.Logger.Warn("failed to marshall error to json")
}
// write response to webhook
w.WriteHeader(httpStatusCode)
if len(jsonResp) > 0 {
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(jsonResp)
if err != nil {
er.settings.Logger.Warn("failed to write json response", zap.Error(err))
}
}
// log bad webhook request if debug is enabled
if er.settings.Logger.Core().Enabled(zap.DebugLevel) {
msg := string(jsonResp)
er.settings.Logger.Debug(msg, zap.Int("http_status_code", httpStatusCode), zap.Error(err))
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver"
import (
"bufio"
"net/url"
"time"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/receiver"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata"
)
func reqToLog(sc *bufio.Scanner,
query url.Values,
_ *Config,
settings receiver.Settings,
) (plog.Logs, int) {
// we simply dont split the data passed into scan (i.e. scan the whole thing)
// the downside to this approach is that only 1 log per request can be handled.
// NOTE: logs will contain these newline characters which could have formatting
// consequences downstream.
split := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if !atEOF {
return 0, nil, nil
}
return 0, data, bufio.ErrFinalToken
}
sc.Split(split)
log := plog.NewLogs()
resourceLog := log.ResourceLogs().AppendEmpty()
appendMetadata(resourceLog, query)
scopeLog := resourceLog.ScopeLogs().AppendEmpty()
scopeLog.Scope().SetName(scopeLogName)
scopeLog.Scope().SetVersion(settings.BuildInfo.Version)
scopeLog.Scope().Attributes().PutStr("source", settings.ID.String())
scopeLog.Scope().Attributes().PutStr("receiver", metadata.Type.String())
for sc.Scan() {
logRecord := scopeLog.LogRecords().AppendEmpty()
logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now()))
line := sc.Text()
logRecord.Body().SetStr(line)
}
return log, scopeLog.LogRecords().Len()
}
// append query parameters and webhook source as resource attributes
func appendMetadata(resourceLog plog.ResourceLogs, query url.Values) {
for k := range query {
if query.Get(k) != "" {
resourceLog.Resource().Attributes().PutStr(k, query.Get(k))
}
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
// Called by Protobuf JSON deserialization.
func unmarshalJSON(dst []byte, iter *json.Iterator) {
src := iter.ReadStringAsSlice()
if len(src) == 0 {
return
}
if len(dst) != hex.DecodedLen(len(src)) {
iter.ReportError("ID.UnmarshalJSONIter", "length mismatch")
return
}
_, err := hex.Decode(dst, src)
if err != nil {
iter.ReportError("ID.UnmarshalJSONIter", err.Error())
return
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *AnyValue) GetValue() any {
if m != nil {
return m.Value
}
return nil
}
type AnyValue_StringValue struct {
StringValue string
}
func (m *AnyValue) GetStringValue() string {
if v, ok := m.GetValue().(*AnyValue_StringValue); ok {
return v.StringValue
}
return ""
}
type AnyValue_BoolValue struct {
BoolValue bool
}
func (m *AnyValue) GetBoolValue() bool {
if v, ok := m.GetValue().(*AnyValue_BoolValue); ok {
return v.BoolValue
}
return false
}
type AnyValue_IntValue struct {
IntValue int64
}
func (m *AnyValue) GetIntValue() int64 {
if v, ok := m.GetValue().(*AnyValue_IntValue); ok {
return v.IntValue
}
return int64(0)
}
type AnyValue_DoubleValue struct {
DoubleValue float64
}
func (m *AnyValue) GetDoubleValue() float64 {
if v, ok := m.GetValue().(*AnyValue_DoubleValue); ok {
return v.DoubleValue
}
return float64(0)
}
type AnyValue_ArrayValue struct {
ArrayValue *ArrayValue
}
func (m *AnyValue) GetArrayValue() *ArrayValue {
if v, ok := m.GetValue().(*AnyValue_ArrayValue); ok {
return v.ArrayValue
}
return nil
}
type AnyValue_KvlistValue struct {
KvlistValue *KeyValueList
}
func (m *AnyValue) GetKvlistValue() *KeyValueList {
if v, ok := m.GetValue().(*AnyValue_KvlistValue); ok {
return v.KvlistValue
}
return nil
}
type AnyValue_BytesValue struct {
BytesValue []byte
}
func (m *AnyValue) GetBytesValue() []byte {
if v, ok := m.GetValue().(*AnyValue_BytesValue); ok {
return v.BytesValue
}
return nil
}
type AnyValue struct {
Value any
}
var (
protoPoolAnyValue = sync.Pool{
New: func() any {
return &AnyValue{}
},
}
ProtoPoolAnyValue_StringValue = sync.Pool{
New: func() any {
return &AnyValue_StringValue{}
},
}
ProtoPoolAnyValue_BoolValue = sync.Pool{
New: func() any {
return &AnyValue_BoolValue{}
},
}
ProtoPoolAnyValue_IntValue = sync.Pool{
New: func() any {
return &AnyValue_IntValue{}
},
}
ProtoPoolAnyValue_DoubleValue = sync.Pool{
New: func() any {
return &AnyValue_DoubleValue{}
},
}
ProtoPoolAnyValue_ArrayValue = sync.Pool{
New: func() any {
return &AnyValue_ArrayValue{}
},
}
ProtoPoolAnyValue_KvlistValue = sync.Pool{
New: func() any {
return &AnyValue_KvlistValue{}
},
}
ProtoPoolAnyValue_BytesValue = sync.Pool{
New: func() any {
return &AnyValue_BytesValue{}
},
}
)
func NewAnyValue() *AnyValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue{}
}
return protoPoolAnyValue.Get().(*AnyValue)
}
func DeleteAnyValue(orig *AnyValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
switch ov := orig.Value.(type) {
case *AnyValue_StringValue:
if UseProtoPooling.IsEnabled() {
ov.StringValue = ""
ProtoPoolAnyValue_StringValue.Put(ov)
}
case *AnyValue_BoolValue:
if UseProtoPooling.IsEnabled() {
ov.BoolValue = false
ProtoPoolAnyValue_BoolValue.Put(ov)
}
case *AnyValue_IntValue:
if UseProtoPooling.IsEnabled() {
ov.IntValue = int64(0)
ProtoPoolAnyValue_IntValue.Put(ov)
}
case *AnyValue_DoubleValue:
if UseProtoPooling.IsEnabled() {
ov.DoubleValue = float64(0)
ProtoPoolAnyValue_DoubleValue.Put(ov)
}
case *AnyValue_ArrayValue:
DeleteArrayValue(ov.ArrayValue, true)
ov.ArrayValue = nil
ProtoPoolAnyValue_ArrayValue.Put(ov)
case *AnyValue_KvlistValue:
DeleteKeyValueList(ov.KvlistValue, true)
ov.KvlistValue = nil
ProtoPoolAnyValue_KvlistValue.Put(ov)
case *AnyValue_BytesValue:
if UseProtoPooling.IsEnabled() {
ov.BytesValue = nil
ProtoPoolAnyValue_BytesValue.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolAnyValue.Put(orig)
}
}
func CopyAnyValue(dest, src *AnyValue) *AnyValue {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewAnyValue()
}
switch t := src.Value.(type) {
case *AnyValue_StringValue:
var ov *AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
ov.StringValue = t.StringValue
dest.Value = ov
case *AnyValue_BoolValue:
var ov *AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
}
ov.BoolValue = t.BoolValue
dest.Value = ov
case *AnyValue_IntValue:
var ov *AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
ov.IntValue = t.IntValue
dest.Value = ov
case *AnyValue_DoubleValue:
var ov *AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
}
ov.DoubleValue = t.DoubleValue
dest.Value = ov
case *AnyValue_ArrayValue:
var ov *AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
CopyArrayValue(ov.ArrayValue, t.ArrayValue)
dest.Value = ov
case *AnyValue_KvlistValue:
var ov *AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
CopyKeyValueList(ov.KvlistValue, t.KvlistValue)
dest.Value = ov
case *AnyValue_BytesValue:
var ov *AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
}
ov.BytesValue = t.BytesValue
dest.Value = ov
default:
dest.Value = nil
}
return dest
}
func CopyAnyValueSlice(dest, src []AnyValue) []AnyValue {
var newDest []AnyValue
if cap(dest) < len(src) {
newDest = make([]AnyValue, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteAnyValue(&dest[i], false)
}
}
for i := range src {
CopyAnyValue(&newDest[i], &src[i])
}
return newDest
}
func CopyAnyValuePtrSlice(dest, src []*AnyValue) []*AnyValue {
var newDest []*AnyValue
if cap(dest) < len(src) {
newDest = make([]*AnyValue, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewAnyValue()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteAnyValue(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewAnyValue()
}
}
for i := range src {
CopyAnyValue(newDest[i], src[i])
}
return newDest
}
func (orig *AnyValue) Reset() {
*orig = AnyValue{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *AnyValue) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
switch orig := orig.Value.(type) {
case *AnyValue_StringValue:
dest.WriteObjectField("stringValue")
dest.WriteString(orig.StringValue)
case *AnyValue_BoolValue:
dest.WriteObjectField("boolValue")
dest.WriteBool(orig.BoolValue)
case *AnyValue_IntValue:
dest.WriteObjectField("intValue")
dest.WriteInt64(orig.IntValue)
case *AnyValue_DoubleValue:
dest.WriteObjectField("doubleValue")
dest.WriteFloat64(orig.DoubleValue)
case *AnyValue_ArrayValue:
if orig.ArrayValue != nil {
dest.WriteObjectField("arrayValue")
orig.ArrayValue.MarshalJSON(dest)
}
case *AnyValue_KvlistValue:
if orig.KvlistValue != nil {
dest.WriteObjectField("kvlistValue")
orig.KvlistValue.MarshalJSON(dest)
}
case *AnyValue_BytesValue:
dest.WriteObjectField("bytesValue")
dest.WriteBytes(orig.BytesValue)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *AnyValue) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "stringValue", "string_value":
{
var ov *AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
ov.StringValue = iter.ReadString()
orig.Value = ov
}
case "boolValue", "bool_value":
{
var ov *AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
}
ov.BoolValue = iter.ReadBool()
orig.Value = ov
}
case "intValue", "int_value":
{
var ov *AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
ov.IntValue = iter.ReadInt64()
orig.Value = ov
}
case "doubleValue", "double_value":
{
var ov *AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
}
ov.DoubleValue = iter.ReadFloat64()
orig.Value = ov
}
case "arrayValue", "array_value":
{
var ov *AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
ov.ArrayValue.UnmarshalJSON(iter)
orig.Value = ov
}
case "kvlistValue", "kvlist_value":
{
var ov *AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
ov.KvlistValue.UnmarshalJSON(iter)
orig.Value = ov
}
case "bytesValue", "bytes_value":
{
var ov *AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
}
ov.BytesValue = iter.ReadBytes()
orig.Value = ov
}
default:
iter.Skip()
}
}
}
func (orig *AnyValue) SizeProto() int {
var n int
var l int
_ = l
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *AnyValue_StringValue:
l = len(orig.StringValue)
n += 1 + proto.Sov(uint64(l)) + l
case *AnyValue_BoolValue:
n += 2
case *AnyValue_IntValue:
n += 1 + proto.Sov(uint64(orig.IntValue))
case *AnyValue_DoubleValue:
n += 9
case *AnyValue_ArrayValue:
if orig.ArrayValue != nil {
l = orig.ArrayValue.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *AnyValue_KvlistValue:
if orig.KvlistValue != nil {
l = orig.KvlistValue.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *AnyValue_BytesValue:
l = len(orig.BytesValue)
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *AnyValue) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
switch orig := orig.Value.(type) {
case *AnyValue_StringValue:
l = len(orig.StringValue)
pos -= l
copy(buf[pos:], orig.StringValue)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
case *AnyValue_BoolValue:
pos--
if orig.BoolValue {
buf[pos] = 1
} else {
buf[pos] = 0
}
pos--
buf[pos] = 0x10
case *AnyValue_IntValue:
pos = proto.EncodeVarint(buf, pos, uint64(orig.IntValue))
pos--
buf[pos] = 0x18
case *AnyValue_DoubleValue:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.DoubleValue))
pos--
buf[pos] = 0x21
case *AnyValue_ArrayValue:
if orig.ArrayValue != nil {
l = orig.ArrayValue.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
case *AnyValue_KvlistValue:
if orig.KvlistValue != nil {
l = orig.KvlistValue.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
case *AnyValue_BytesValue:
l = len(orig.BytesValue)
pos -= l
copy(buf[pos:], orig.BytesValue)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
return len(buf) - pos
}
func (orig *AnyValue) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *AnyValue_StringValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_StringValue{}
} else {
ov = ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
ov.StringValue = string(buf[startPos:pos])
orig.Value = ov
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
var ov *AnyValue_BoolValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BoolValue{}
} else {
ov = ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
}
ov.BoolValue = num != 0
orig.Value = ov
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
var ov *AnyValue_IntValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_IntValue{}
} else {
ov = ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
ov.IntValue = int64(num)
orig.Value = ov
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *AnyValue_DoubleValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_DoubleValue{}
} else {
ov = ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
}
ov.DoubleValue = math.Float64frombits(num)
orig.Value = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *AnyValue_ArrayValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_ArrayValue{}
} else {
ov = ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
}
ov.ArrayValue = NewArrayValue()
err = ov.ArrayValue.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Value = ov
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *AnyValue_KvlistValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_KvlistValue{}
} else {
ov = ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
ov.KvlistValue = NewKeyValueList()
err = ov.KvlistValue.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Value = ov
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *AnyValue_BytesValue
if !UseProtoPooling.IsEnabled() {
ov = &AnyValue_BytesValue{}
} else {
ov = ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
}
if length != 0 {
ov.BytesValue = make([]byte, length)
copy(ov.BytesValue, buf[startPos:pos])
}
orig.Value = ov
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestAnyValue() *AnyValue {
orig := NewAnyValue()
orig.Value = &AnyValue_StringValue{StringValue: "test_stringvalue"}
return orig
}
func GenTestAnyValuePtrSlice() []*AnyValue {
orig := make([]*AnyValue, 5)
orig[0] = NewAnyValue()
orig[1] = GenTestAnyValue()
orig[2] = NewAnyValue()
orig[3] = GenTestAnyValue()
orig[4] = NewAnyValue()
return orig
}
func GenTestAnyValueSlice() []AnyValue {
orig := make([]AnyValue, 5)
orig[1] = *GenTestAnyValue()
orig[3] = *GenTestAnyValue()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message since oneof in AnyValue does not allow repeated fields.
type ArrayValue struct {
Values []AnyValue
}
var (
protoPoolArrayValue = sync.Pool{
New: func() any {
return &ArrayValue{}
},
}
)
func NewArrayValue() *ArrayValue {
if !UseProtoPooling.IsEnabled() {
return &ArrayValue{}
}
return protoPoolArrayValue.Get().(*ArrayValue)
}
func DeleteArrayValue(orig *ArrayValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Values {
DeleteAnyValue(&orig.Values[i], false)
}
orig.Reset()
if nullable {
protoPoolArrayValue.Put(orig)
}
}
func CopyArrayValue(dest, src *ArrayValue) *ArrayValue {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewArrayValue()
}
dest.Values = CopyAnyValueSlice(dest.Values, src.Values)
return dest
}
func CopyArrayValueSlice(dest, src []ArrayValue) []ArrayValue {
var newDest []ArrayValue
if cap(dest) < len(src) {
newDest = make([]ArrayValue, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteArrayValue(&dest[i], false)
}
}
for i := range src {
CopyArrayValue(&newDest[i], &src[i])
}
return newDest
}
func CopyArrayValuePtrSlice(dest, src []*ArrayValue) []*ArrayValue {
var newDest []*ArrayValue
if cap(dest) < len(src) {
newDest = make([]*ArrayValue, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewArrayValue()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteArrayValue(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewArrayValue()
}
}
for i := range src {
CopyArrayValue(newDest[i], src[i])
}
return newDest
}
func (orig *ArrayValue) Reset() {
*orig = ArrayValue{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ArrayValue) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
orig.Values[0].MarshalJSON(dest)
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
orig.Values[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ArrayValue) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, AnyValue{})
orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ArrayValue) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Values {
l = orig.Values[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ArrayValue) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Values) - 1; i >= 0; i-- {
l = orig.Values[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *ArrayValue) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Values = append(orig.Values, AnyValue{})
err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestArrayValue() *ArrayValue {
orig := NewArrayValue()
orig.Values = []AnyValue{{}, *GenTestAnyValue()}
return orig
}
func GenTestArrayValuePtrSlice() []*ArrayValue {
orig := make([]*ArrayValue, 5)
orig[0] = NewArrayValue()
orig[1] = GenTestArrayValue()
orig[2] = NewArrayValue()
orig[3] = GenTestArrayValue()
orig[4] = NewArrayValue()
return orig
}
func GenTestArrayValueSlice() []ArrayValue {
orig := make([]ArrayValue, 5)
orig[1] = *GenTestArrayValue()
orig[3] = *GenTestArrayValue()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type EntityRef struct {
SchemaUrl string
Type string
IdKeys []string
DescriptionKeys []string
}
var (
protoPoolEntityRef = sync.Pool{
New: func() any {
return &EntityRef{}
},
}
)
func NewEntityRef() *EntityRef {
if !UseProtoPooling.IsEnabled() {
return &EntityRef{}
}
return protoPoolEntityRef.Get().(*EntityRef)
}
func DeleteEntityRef(orig *EntityRef, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolEntityRef.Put(orig)
}
}
func CopyEntityRef(dest, src *EntityRef) *EntityRef {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewEntityRef()
}
dest.SchemaUrl = src.SchemaUrl
dest.Type = src.Type
dest.IdKeys = append(dest.IdKeys[:0], src.IdKeys...)
dest.DescriptionKeys = append(dest.DescriptionKeys[:0], src.DescriptionKeys...)
return dest
}
func CopyEntityRefSlice(dest, src []EntityRef) []EntityRef {
var newDest []EntityRef
if cap(dest) < len(src) {
newDest = make([]EntityRef, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteEntityRef(&dest[i], false)
}
}
for i := range src {
CopyEntityRef(&newDest[i], &src[i])
}
return newDest
}
func CopyEntityRefPtrSlice(dest, src []*EntityRef) []*EntityRef {
var newDest []*EntityRef
if cap(dest) < len(src) {
newDest = make([]*EntityRef, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewEntityRef()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteEntityRef(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewEntityRef()
}
}
for i := range src {
CopyEntityRef(newDest[i], src[i])
}
return newDest
}
func (orig *EntityRef) Reset() {
*orig = EntityRef{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *EntityRef) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
if orig.Type != "" {
dest.WriteObjectField("type")
dest.WriteString(orig.Type)
}
if len(orig.IdKeys) > 0 {
dest.WriteObjectField("idKeys")
dest.WriteArrayStart()
dest.WriteString(orig.IdKeys[0])
for i := 1; i < len(orig.IdKeys); i++ {
dest.WriteMore()
dest.WriteString(orig.IdKeys[i])
}
dest.WriteArrayEnd()
}
if len(orig.DescriptionKeys) > 0 {
dest.WriteObjectField("descriptionKeys")
dest.WriteArrayStart()
dest.WriteString(orig.DescriptionKeys[0])
for i := 1; i < len(orig.DescriptionKeys); i++ {
dest.WriteMore()
dest.WriteString(orig.DescriptionKeys[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *EntityRef) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
case "type":
orig.Type = iter.ReadString()
case "idKeys", "id_keys":
for iter.ReadArray() {
orig.IdKeys = append(orig.IdKeys, iter.ReadString())
}
case "descriptionKeys", "description_keys":
for iter.ReadArray() {
orig.DescriptionKeys = append(orig.DescriptionKeys, iter.ReadString())
}
default:
iter.Skip()
}
}
}
func (orig *EntityRef) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Type)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.IdKeys {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.DescriptionKeys {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *EntityRef) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Type)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Type)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.IdKeys) - 1; i >= 0; i-- {
l = len(orig.IdKeys[i])
pos -= l
copy(buf[pos:], orig.IdKeys[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.DescriptionKeys) - 1; i >= 0; i-- {
l = len(orig.DescriptionKeys[i])
pos -= l
copy(buf[pos:], orig.DescriptionKeys[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
return len(buf) - pos
}
func (orig *EntityRef) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Type = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IdKeys", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.IdKeys = append(orig.IdKeys, string(buf[startPos:pos]))
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DescriptionKeys", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DescriptionKeys = append(orig.DescriptionKeys, string(buf[startPos:pos]))
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestEntityRef() *EntityRef {
orig := NewEntityRef()
orig.SchemaUrl = "test_schemaurl"
orig.Type = "test_type"
orig.IdKeys = []string{"", "test_idkeys"}
orig.DescriptionKeys = []string{"", "test_descriptionkeys"}
return orig
}
func GenTestEntityRefPtrSlice() []*EntityRef {
orig := make([]*EntityRef, 5)
orig[0] = NewEntityRef()
orig[1] = GenTestEntityRef()
orig[2] = NewEntityRef()
orig[3] = GenTestEntityRef()
orig[4] = NewEntityRef()
return orig
}
func GenTestEntityRefSlice() []EntityRef {
orig := make([]EntityRef, 5)
orig[1] = *GenTestEntityRef()
orig[3] = *GenTestEntityRef()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *Exemplar) GetValue() any {
if m != nil {
return m.Value
}
return nil
}
type Exemplar_AsDouble struct {
AsDouble float64
}
func (m *Exemplar) GetAsDouble() float64 {
if v, ok := m.GetValue().(*Exemplar_AsDouble); ok {
return v.AsDouble
}
return float64(0)
}
type Exemplar_AsInt struct {
AsInt int64
}
func (m *Exemplar) GetAsInt() int64 {
if v, ok := m.GetValue().(*Exemplar_AsInt); ok {
return v.AsInt
}
return int64(0)
}
// Exemplar is a sample input double measurement.
//
// Exemplars also hold information about the environment when the measurement was recorded,
// for example the span and trace ID of the active span when the exemplar was recorded.
type Exemplar struct {
FilteredAttributes []KeyValue
TimeUnixNano uint64
Value any
TraceId TraceID
SpanId SpanID
}
var (
protoPoolExemplar = sync.Pool{
New: func() any {
return &Exemplar{}
},
}
ProtoPoolExemplar_AsDouble = sync.Pool{
New: func() any {
return &Exemplar_AsDouble{}
},
}
ProtoPoolExemplar_AsInt = sync.Pool{
New: func() any {
return &Exemplar_AsInt{}
},
}
)
func NewExemplar() *Exemplar {
if !UseProtoPooling.IsEnabled() {
return &Exemplar{}
}
return protoPoolExemplar.Get().(*Exemplar)
}
func DeleteExemplar(orig *Exemplar, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.FilteredAttributes {
DeleteKeyValue(&orig.FilteredAttributes[i], false)
}
switch ov := orig.Value.(type) {
case *Exemplar_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolExemplar_AsDouble.Put(ov)
}
case *Exemplar_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolExemplar_AsInt.Put(ov)
}
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
protoPoolExemplar.Put(orig)
}
}
func CopyExemplar(dest, src *Exemplar) *Exemplar {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExemplar()
}
dest.FilteredAttributes = CopyKeyValueSlice(dest.FilteredAttributes, src.FilteredAttributes)
dest.TimeUnixNano = src.TimeUnixNano
switch t := src.Value.(type) {
case *Exemplar_AsDouble:
var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
case *Exemplar_AsInt:
var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
default:
dest.Value = nil
}
CopyTraceID(&dest.TraceId, &src.TraceId)
CopySpanID(&dest.SpanId, &src.SpanId)
return dest
}
func CopyExemplarSlice(dest, src []Exemplar) []Exemplar {
var newDest []Exemplar
if cap(dest) < len(src) {
newDest = make([]Exemplar, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExemplar(&dest[i], false)
}
}
for i := range src {
CopyExemplar(&newDest[i], &src[i])
}
return newDest
}
func CopyExemplarPtrSlice(dest, src []*Exemplar) []*Exemplar {
var newDest []*Exemplar
if cap(dest) < len(src) {
newDest = make([]*Exemplar, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExemplar()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExemplar(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExemplar()
}
}
for i := range src {
CopyExemplar(newDest[i], src[i])
}
return newDest
}
func (orig *Exemplar) Reset() {
*orig = Exemplar{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Exemplar) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.FilteredAttributes) > 0 {
dest.WriteObjectField("filteredAttributes")
dest.WriteArrayStart()
orig.FilteredAttributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.FilteredAttributes); i++ {
dest.WriteMore()
orig.FilteredAttributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
case *Exemplar_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
case *Exemplar_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
orig.TraceId.MarshalJSON(dest)
}
if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
orig.SpanId.MarshalJSON(dest)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Exemplar) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "filteredAttributes", "filtered_attributes":
for iter.ReadArray() {
orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalJSON(iter)
}
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "asDouble", "as_double":
{
var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
}
case "asInt", "as_int":
{
var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
}
case "traceId", "trace_id":
orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
orig.SpanId.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *Exemplar) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.FilteredAttributes {
l = orig.FilteredAttributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.TimeUnixNano != 0 {
n += 9
}
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *Exemplar_AsDouble:
n += 9
case *Exemplar_AsInt:
n += 9
}
l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *Exemplar) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.FilteredAttributes) - 1; i >= 0; i-- {
l = orig.FilteredAttributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x11
}
switch orig := orig.Value.(type) {
case *Exemplar_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x19
case *Exemplar_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
buf[pos] = 0x31
}
l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
return len(buf) - pos
}
func (orig *Exemplar) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.FilteredAttributes = append(orig.FilteredAttributes, KeyValue{})
err = orig.FilteredAttributes[len(orig.FilteredAttributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *Exemplar_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsDouble{}
} else {
ov = ProtoPoolExemplar_AsDouble.Get().(*Exemplar_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
case 6:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *Exemplar_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &Exemplar_AsInt{}
} else {
ov = ProtoPoolExemplar_AsInt.Get().(*Exemplar_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExemplar() *Exemplar {
orig := NewExemplar()
orig.FilteredAttributes = []KeyValue{{}, *GenTestKeyValue()}
orig.TimeUnixNano = uint64(13)
orig.Value = &Exemplar_AsDouble{AsDouble: float64(3.1415926)}
orig.TraceId = *GenTestTraceID()
orig.SpanId = *GenTestSpanID()
return orig
}
func GenTestExemplarPtrSlice() []*Exemplar {
orig := make([]*Exemplar, 5)
orig[0] = NewExemplar()
orig[1] = GenTestExemplar()
orig[2] = NewExemplar()
orig[3] = GenTestExemplar()
orig[4] = NewExemplar()
return orig
}
func GenTestExemplarSlice() []Exemplar {
orig := make([]Exemplar, 5)
orig[1] = *GenTestExemplar()
orig[3] = *GenTestExemplar()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
type ExponentialHistogram struct {
DataPoints []*ExponentialHistogramDataPoint
AggregationTemporality AggregationTemporality
}
var (
protoPoolExponentialHistogram = sync.Pool{
New: func() any {
return &ExponentialHistogram{}
},
}
)
func NewExponentialHistogram() *ExponentialHistogram {
if !UseProtoPooling.IsEnabled() {
return &ExponentialHistogram{}
}
return protoPoolExponentialHistogram.Get().(*ExponentialHistogram)
}
func DeleteExponentialHistogram(orig *ExponentialHistogram, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteExponentialHistogramDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolExponentialHistogram.Put(orig)
}
}
func CopyExponentialHistogram(dest, src *ExponentialHistogram) *ExponentialHistogram {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExponentialHistogram()
}
dest.DataPoints = CopyExponentialHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
return dest
}
func CopyExponentialHistogramSlice(dest, src []ExponentialHistogram) []ExponentialHistogram {
var newDest []ExponentialHistogram
if cap(dest) < len(src) {
newDest = make([]ExponentialHistogram, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogram(&dest[i], false)
}
}
for i := range src {
CopyExponentialHistogram(&newDest[i], &src[i])
}
return newDest
}
func CopyExponentialHistogramPtrSlice(dest, src []*ExponentialHistogram) []*ExponentialHistogram {
var newDest []*ExponentialHistogram
if cap(dest) < len(src) {
newDest = make([]*ExponentialHistogram, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogram()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogram(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogram()
}
}
for i := range src {
CopyExponentialHistogram(newDest[i], src[i])
}
return newDest
}
func (orig *ExponentialHistogram) Reset() {
*orig = ExponentialHistogram{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExponentialHistogram) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExponentialHistogram) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
default:
iter.Skip()
}
}
}
func (orig *ExponentialHistogram) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
return n
}
func (orig *ExponentialHistogram) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
return len(buf) - pos
}
func (orig *ExponentialHistogram) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewExponentialHistogramDataPoint())
err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExponentialHistogram() *ExponentialHistogram {
orig := NewExponentialHistogram()
orig.DataPoints = []*ExponentialHistogramDataPoint{{}, GenTestExponentialHistogramDataPoint()}
orig.AggregationTemporality = AggregationTemporality(13)
return orig
}
func GenTestExponentialHistogramPtrSlice() []*ExponentialHistogram {
orig := make([]*ExponentialHistogram, 5)
orig[0] = NewExponentialHistogram()
orig[1] = GenTestExponentialHistogram()
orig[2] = NewExponentialHistogram()
orig[3] = GenTestExponentialHistogram()
orig[4] = NewExponentialHistogram()
return orig
}
func GenTestExponentialHistogramSlice() []ExponentialHistogram {
orig := make([]ExponentialHistogram, 5)
orig[1] = *GenTestExponentialHistogram()
orig[3] = *GenTestExponentialHistogram()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *ExponentialHistogramDataPoint) GetSum_() any {
if m != nil {
return m.Sum_
}
return nil
}
type ExponentialHistogramDataPoint_Sum struct {
Sum float64
}
func (m *ExponentialHistogramDataPoint) GetSum() float64 {
if v, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok {
return v.Sum
}
return float64(0)
}
func (m *ExponentialHistogramDataPoint) GetMin_() any {
if m != nil {
return m.Min_
}
return nil
}
type ExponentialHistogramDataPoint_Min struct {
Min float64
}
func (m *ExponentialHistogramDataPoint) GetMin() float64 {
if v, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok {
return v.Min
}
return float64(0)
}
func (m *ExponentialHistogramDataPoint) GetMax_() any {
if m != nil {
return m.Max_
}
return nil
}
type ExponentialHistogramDataPoint_Max struct {
Max float64
}
func (m *ExponentialHistogramDataPoint) GetMax() float64 {
if v, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok {
return v.Max
}
return float64(0)
}
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
type ExponentialHistogramDataPoint struct {
Attributes []KeyValue
StartTimeUnixNano uint64
TimeUnixNano uint64
Count uint64
Sum_ any
Scale int32
ZeroCount uint64
Positive ExponentialHistogramDataPointBuckets
Negative ExponentialHistogramDataPointBuckets
Flags uint32
Exemplars []Exemplar
Min_ any
Max_ any
ZeroThreshold float64
}
var (
protoPoolExponentialHistogramDataPoint = sync.Pool{
New: func() any {
return &ExponentialHistogramDataPoint{}
},
}
ProtoPoolExponentialHistogramDataPoint_Sum = sync.Pool{
New: func() any {
return &ExponentialHistogramDataPoint_Sum{}
},
}
ProtoPoolExponentialHistogramDataPoint_Min = sync.Pool{
New: func() any {
return &ExponentialHistogramDataPoint_Min{}
},
}
ProtoPoolExponentialHistogramDataPoint_Max = sync.Pool{
New: func() any {
return &ExponentialHistogramDataPoint_Max{}
},
}
)
func NewExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
return &ExponentialHistogramDataPoint{}
}
return protoPoolExponentialHistogramDataPoint.Get().(*ExponentialHistogramDataPoint)
}
func DeleteExponentialHistogramDataPoint(orig *ExponentialHistogramDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
case *ExponentialHistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolExponentialHistogramDataPoint_Sum.Put(ov)
}
}
DeleteExponentialHistogramDataPointBuckets(&orig.Positive, false)
DeleteExponentialHistogramDataPointBuckets(&orig.Negative, false)
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
case *ExponentialHistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolExponentialHistogramDataPoint_Min.Put(ov)
}
}
switch ov := orig.Max_.(type) {
case *ExponentialHistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolExponentialHistogramDataPoint_Max.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolExponentialHistogramDataPoint.Put(orig)
}
}
func CopyExponentialHistogramDataPoint(dest, src *ExponentialHistogramDataPoint) *ExponentialHistogramDataPoint {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExponentialHistogramDataPoint()
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
switch t := src.Sum_.(type) {
case *ExponentialHistogramDataPoint_Sum:
var ov *ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Sum{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
ov.Sum = t.Sum
dest.Sum_ = ov
default:
dest.Sum_ = nil
}
dest.Scale = src.Scale
dest.ZeroCount = src.ZeroCount
CopyExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive)
CopyExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative)
dest.Flags = src.Flags
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
switch t := src.Min_.(type) {
case *ExponentialHistogramDataPoint_Min:
var ov *ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Min{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
ov.Min = t.Min
dest.Min_ = ov
default:
dest.Min_ = nil
}
switch t := src.Max_.(type) {
case *ExponentialHistogramDataPoint_Max:
var ov *ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Max{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
ov.Max = t.Max
dest.Max_ = ov
default:
dest.Max_ = nil
}
dest.ZeroThreshold = src.ZeroThreshold
return dest
}
func CopyExponentialHistogramDataPointSlice(dest, src []ExponentialHistogramDataPoint) []ExponentialHistogramDataPoint {
var newDest []ExponentialHistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]ExponentialHistogramDataPoint, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogramDataPoint(&dest[i], false)
}
}
for i := range src {
CopyExponentialHistogramDataPoint(&newDest[i], &src[i])
}
return newDest
}
func CopyExponentialHistogramDataPointPtrSlice(dest, src []*ExponentialHistogramDataPoint) []*ExponentialHistogramDataPoint {
var newDest []*ExponentialHistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]*ExponentialHistogramDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogramDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPoint()
}
}
for i := range src {
CopyExponentialHistogramDataPoint(newDest[i], src[i])
}
return newDest
}
func (orig *ExponentialHistogramDataPoint) Reset() {
*orig = ExponentialHistogramDataPoint{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExponentialHistogramDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if orig.Scale != int32(0) {
dest.WriteObjectField("scale")
dest.WriteInt32(orig.Scale)
}
if orig.ZeroCount != uint64(0) {
dest.WriteObjectField("zeroCount")
dest.WriteUint64(orig.ZeroCount)
}
dest.WriteObjectField("positive")
orig.Positive.MarshalJSON(dest)
dest.WriteObjectField("negative")
orig.Negative.MarshalJSON(dest)
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
if orig.ZeroThreshold != float64(0) {
dest.WriteObjectField("zeroThreshold")
dest.WriteFloat64(orig.ZeroThreshold)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExponentialHistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
{
var ov *ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Sum{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
}
case "scale":
orig.Scale = iter.ReadInt32()
case "zeroCount", "zero_count":
orig.ZeroCount = iter.ReadUint64()
case "positive":
orig.Positive.UnmarshalJSON(iter)
case "negative":
orig.Negative.UnmarshalJSON(iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, Exemplar{})
orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "min":
{
var ov *ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Min{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
}
case "max":
{
var ov *ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Max{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
}
case "zeroThreshold", "zero_threshold":
orig.ZeroThreshold = iter.ReadFloat64()
default:
iter.Skip()
}
}
}
func (orig *ExponentialHistogramDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
if orig.Scale != 0 {
n += 1 + proto.Soz(uint64(orig.Scale))
}
if orig.ZeroCount != 0 {
n += 9
}
l = orig.Positive.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.Negative.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
for i := range orig.Exemplars {
l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
_ = orig
n += 9
}
if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
_ = orig
n += 9
}
if orig.ZeroThreshold != 0 {
n += 9
}
return n
}
func (orig *ExponentialHistogramDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig, ok := orig.Sum_.(*ExponentialHistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
if orig.Scale != 0 {
pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Scale)<<1)^uint32(orig.Scale>>31)))
pos--
buf[pos] = 0x30
}
if orig.ZeroCount != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ZeroCount))
pos--
buf[pos] = 0x39
}
l = orig.Positive.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
l = orig.Negative.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x50
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
if orig, ok := orig.Min_.(*ExponentialHistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x61
}
if orig, ok := orig.Max_.(*ExponentialHistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
buf[pos] = 0x69
}
if orig.ZeroThreshold != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ZeroThreshold))
pos--
buf[pos] = 0x71
}
return len(buf) - pos
}
func (orig *ExponentialHistogramDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *ExponentialHistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Sum{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Sum.Get().(*ExponentialHistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Scale = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
case 7:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ZeroCount = uint64(num)
case 8:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Positive.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Negative.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, Exemplar{})
err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *ExponentialHistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Min{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Min.Get().(*ExponentialHistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
case 13:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *ExponentialHistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &ExponentialHistogramDataPoint_Max{}
} else {
ov = ProtoPoolExponentialHistogramDataPoint_Max.Get().(*ExponentialHistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
case 14:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ZeroThreshold = math.Float64frombits(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExponentialHistogramDataPoint() *ExponentialHistogramDataPoint {
orig := NewExponentialHistogramDataPoint()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.StartTimeUnixNano = uint64(13)
orig.TimeUnixNano = uint64(13)
orig.Count = uint64(13)
orig.Sum_ = &ExponentialHistogramDataPoint_Sum{Sum: float64(3.1415926)}
orig.Scale = int32(13)
orig.ZeroCount = uint64(13)
orig.Positive = *GenTestExponentialHistogramDataPointBuckets()
orig.Negative = *GenTestExponentialHistogramDataPointBuckets()
orig.Flags = uint32(13)
orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
orig.Min_ = &ExponentialHistogramDataPoint_Min{Min: float64(3.1415926)}
orig.Max_ = &ExponentialHistogramDataPoint_Max{Max: float64(3.1415926)}
orig.ZeroThreshold = float64(3.1415926)
return orig
}
func GenTestExponentialHistogramDataPointPtrSlice() []*ExponentialHistogramDataPoint {
orig := make([]*ExponentialHistogramDataPoint, 5)
orig[0] = NewExponentialHistogramDataPoint()
orig[1] = GenTestExponentialHistogramDataPoint()
orig[2] = NewExponentialHistogramDataPoint()
orig[3] = GenTestExponentialHistogramDataPoint()
orig[4] = NewExponentialHistogramDataPoint()
return orig
}
func GenTestExponentialHistogramDataPointSlice() []ExponentialHistogramDataPoint {
orig := make([]ExponentialHistogramDataPoint, 5)
orig[1] = *GenTestExponentialHistogramDataPoint()
orig[3] = *GenTestExponentialHistogramDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts.
type ExponentialHistogramDataPointBuckets struct {
Offset int32
BucketCounts []uint64
}
var (
protoPoolExponentialHistogramDataPointBuckets = sync.Pool{
New: func() any {
return &ExponentialHistogramDataPointBuckets{}
},
}
)
func NewExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
if !UseProtoPooling.IsEnabled() {
return &ExponentialHistogramDataPointBuckets{}
}
return protoPoolExponentialHistogramDataPointBuckets.Get().(*ExponentialHistogramDataPointBuckets)
}
func DeleteExponentialHistogramDataPointBuckets(orig *ExponentialHistogramDataPointBuckets, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExponentialHistogramDataPointBuckets.Put(orig)
}
}
func CopyExponentialHistogramDataPointBuckets(dest, src *ExponentialHistogramDataPointBuckets) *ExponentialHistogramDataPointBuckets {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExponentialHistogramDataPointBuckets()
}
dest.Offset = src.Offset
dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
return dest
}
func CopyExponentialHistogramDataPointBucketsSlice(dest, src []ExponentialHistogramDataPointBuckets) []ExponentialHistogramDataPointBuckets {
var newDest []ExponentialHistogramDataPointBuckets
if cap(dest) < len(src) {
newDest = make([]ExponentialHistogramDataPointBuckets, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogramDataPointBuckets(&dest[i], false)
}
}
for i := range src {
CopyExponentialHistogramDataPointBuckets(&newDest[i], &src[i])
}
return newDest
}
func CopyExponentialHistogramDataPointBucketsPtrSlice(dest, src []*ExponentialHistogramDataPointBuckets) []*ExponentialHistogramDataPointBuckets {
var newDest []*ExponentialHistogramDataPointBuckets
if cap(dest) < len(src) {
newDest = make([]*ExponentialHistogramDataPointBuckets, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPointBuckets()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExponentialHistogramDataPointBuckets(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExponentialHistogramDataPointBuckets()
}
}
for i := range src {
CopyExponentialHistogramDataPointBuckets(newDest[i], src[i])
}
return newDest
}
func (orig *ExponentialHistogramDataPointBuckets) Reset() {
*orig = ExponentialHistogramDataPointBuckets{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExponentialHistogramDataPointBuckets) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Offset != int32(0) {
dest.WriteObjectField("offset")
dest.WriteInt32(orig.Offset)
}
if len(orig.BucketCounts) > 0 {
dest.WriteObjectField("bucketCounts")
dest.WriteArrayStart()
dest.WriteUint64(orig.BucketCounts[0])
for i := 1; i < len(orig.BucketCounts); i++ {
dest.WriteMore()
dest.WriteUint64(orig.BucketCounts[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExponentialHistogramDataPointBuckets) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "offset":
orig.Offset = iter.ReadInt32()
case "bucketCounts", "bucket_counts":
for iter.ReadArray() {
orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
}
default:
iter.Skip()
}
}
}
func (orig *ExponentialHistogramDataPointBuckets) SizeProto() int {
var n int
var l int
_ = l
if orig.Offset != 0 {
n += 1 + proto.Soz(uint64(orig.Offset))
}
if len(orig.BucketCounts) > 0 {
l = 0
for _, e := range orig.BucketCounts {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExponentialHistogramDataPointBuckets) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.Offset != 0 {
pos = proto.EncodeVarint(buf, pos, uint64((uint32(orig.Offset)<<1)^uint32(orig.Offset>>31)))
pos--
buf[pos] = 0x8
}
l = len(orig.BucketCounts)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.BucketCounts[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *ExponentialHistogramDataPointBuckets) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Offset = int32(uint32(num>>1) ^ uint32(int32((num&1)<<31)>>31))
case 2:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExponentialHistogramDataPointBuckets() *ExponentialHistogramDataPointBuckets {
orig := NewExponentialHistogramDataPointBuckets()
orig.Offset = int32(13)
orig.BucketCounts = []uint64{uint64(0), uint64(13)}
return orig
}
func GenTestExponentialHistogramDataPointBucketsPtrSlice() []*ExponentialHistogramDataPointBuckets {
orig := make([]*ExponentialHistogramDataPointBuckets, 5)
orig[0] = NewExponentialHistogramDataPointBuckets()
orig[1] = GenTestExponentialHistogramDataPointBuckets()
orig[2] = NewExponentialHistogramDataPointBuckets()
orig[3] = GenTestExponentialHistogramDataPointBuckets()
orig[4] = NewExponentialHistogramDataPointBuckets()
return orig
}
func GenTestExponentialHistogramDataPointBucketsSlice() []ExponentialHistogramDataPointBuckets {
orig := make([]ExponentialHistogramDataPointBuckets, 5)
orig[1] = *GenTestExponentialHistogramDataPointBuckets()
orig[3] = *GenTestExponentialHistogramDataPointBuckets()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportPartialSuccess represents the details of a partially successful export request.
type ExportLogsPartialSuccess struct {
RejectedLogRecords int64
ErrorMessage string
}
var (
protoPoolExportLogsPartialSuccess = sync.Pool{
New: func() any {
return &ExportLogsPartialSuccess{}
},
}
)
func NewExportLogsPartialSuccess() *ExportLogsPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &ExportLogsPartialSuccess{}
}
return protoPoolExportLogsPartialSuccess.Get().(*ExportLogsPartialSuccess)
}
func DeleteExportLogsPartialSuccess(orig *ExportLogsPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportLogsPartialSuccess.Put(orig)
}
}
func CopyExportLogsPartialSuccess(dest, src *ExportLogsPartialSuccess) *ExportLogsPartialSuccess {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportLogsPartialSuccess()
}
dest.RejectedLogRecords = src.RejectedLogRecords
dest.ErrorMessage = src.ErrorMessage
return dest
}
func CopyExportLogsPartialSuccessSlice(dest, src []ExportLogsPartialSuccess) []ExportLogsPartialSuccess {
var newDest []ExportLogsPartialSuccess
if cap(dest) < len(src) {
newDest = make([]ExportLogsPartialSuccess, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsPartialSuccess(&dest[i], false)
}
}
for i := range src {
CopyExportLogsPartialSuccess(&newDest[i], &src[i])
}
return newDest
}
func CopyExportLogsPartialSuccessPtrSlice(dest, src []*ExportLogsPartialSuccess) []*ExportLogsPartialSuccess {
var newDest []*ExportLogsPartialSuccess
if cap(dest) < len(src) {
newDest = make([]*ExportLogsPartialSuccess, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsPartialSuccess()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsPartialSuccess(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsPartialSuccess()
}
}
for i := range src {
CopyExportLogsPartialSuccess(newDest[i], src[i])
}
return newDest
}
func (orig *ExportLogsPartialSuccess) Reset() {
*orig = ExportLogsPartialSuccess{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportLogsPartialSuccess) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedLogRecords != int64(0) {
dest.WriteObjectField("rejectedLogRecords")
dest.WriteInt64(orig.RejectedLogRecords)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportLogsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedLogRecords", "rejected_log_records":
orig.RejectedLogRecords = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ExportLogsPartialSuccess) SizeProto() int {
var n int
var l int
_ = l
if orig.RejectedLogRecords != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedLogRecords))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportLogsPartialSuccess) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedLogRecords != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedLogRecords))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *ExportLogsPartialSuccess) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedLogRecords", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedLogRecords = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportLogsPartialSuccess() *ExportLogsPartialSuccess {
orig := NewExportLogsPartialSuccess()
orig.RejectedLogRecords = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
func GenTestExportLogsPartialSuccessPtrSlice() []*ExportLogsPartialSuccess {
orig := make([]*ExportLogsPartialSuccess, 5)
orig[0] = NewExportLogsPartialSuccess()
orig[1] = GenTestExportLogsPartialSuccess()
orig[2] = NewExportLogsPartialSuccess()
orig[3] = GenTestExportLogsPartialSuccess()
orig[4] = NewExportLogsPartialSuccess()
return orig
}
func GenTestExportLogsPartialSuccessSlice() []ExportLogsPartialSuccess {
orig := make([]ExportLogsPartialSuccess, 5)
orig[1] = *GenTestExportLogsPartialSuccess()
orig[3] = *GenTestExportLogsPartialSuccess()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Logs is the top-level struct that is propagated through the logs pipeline.
// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
type ExportLogsServiceRequest struct {
ResourceLogs []*ResourceLogs
}
var (
protoPoolExportLogsServiceRequest = sync.Pool{
New: func() any {
return &ExportLogsServiceRequest{}
},
}
)
func NewExportLogsServiceRequest() *ExportLogsServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &ExportLogsServiceRequest{}
}
return protoPoolExportLogsServiceRequest.Get().(*ExportLogsServiceRequest)
}
func DeleteExportLogsServiceRequest(orig *ExportLogsServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceLogs {
DeleteResourceLogs(orig.ResourceLogs[i], true)
}
orig.Reset()
if nullable {
protoPoolExportLogsServiceRequest.Put(orig)
}
}
func CopyExportLogsServiceRequest(dest, src *ExportLogsServiceRequest) *ExportLogsServiceRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportLogsServiceRequest()
}
dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
return dest
}
func CopyExportLogsServiceRequestSlice(dest, src []ExportLogsServiceRequest) []ExportLogsServiceRequest {
var newDest []ExportLogsServiceRequest
if cap(dest) < len(src) {
newDest = make([]ExportLogsServiceRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsServiceRequest(&dest[i], false)
}
}
for i := range src {
CopyExportLogsServiceRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyExportLogsServiceRequestPtrSlice(dest, src []*ExportLogsServiceRequest) []*ExportLogsServiceRequest {
var newDest []*ExportLogsServiceRequest
if cap(dest) < len(src) {
newDest = make([]*ExportLogsServiceRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsServiceRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsServiceRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsServiceRequest()
}
}
for i := range src {
CopyExportLogsServiceRequest(newDest[i], src[i])
}
return newDest
}
func (orig *ExportLogsServiceRequest) Reset() {
*orig = ExportLogsServiceRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportLogsServiceRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceLogs) > 0 {
dest.WriteObjectField("resourceLogs")
dest.WriteArrayStart()
orig.ResourceLogs[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceLogs); i++ {
dest.WriteMore()
orig.ResourceLogs[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportLogsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceLogs", "resource_logs":
for iter.ReadArray() {
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ExportLogsServiceRequest) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceLogs {
l = orig.ResourceLogs[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportLogsServiceRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *ExportLogsServiceRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportLogsServiceRequest() *ExportLogsServiceRequest {
orig := NewExportLogsServiceRequest()
orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
return orig
}
func GenTestExportLogsServiceRequestPtrSlice() []*ExportLogsServiceRequest {
orig := make([]*ExportLogsServiceRequest, 5)
orig[0] = NewExportLogsServiceRequest()
orig[1] = GenTestExportLogsServiceRequest()
orig[2] = NewExportLogsServiceRequest()
orig[3] = GenTestExportLogsServiceRequest()
orig[4] = NewExportLogsServiceRequest()
return orig
}
func GenTestExportLogsServiceRequestSlice() []ExportLogsServiceRequest {
orig := make([]ExportLogsServiceRequest, 5)
orig[1] = *GenTestExportLogsServiceRequest()
orig[3] = *GenTestExportLogsServiceRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
type ExportLogsServiceResponse struct {
PartialSuccess ExportLogsPartialSuccess
}
var (
protoPoolExportLogsServiceResponse = sync.Pool{
New: func() any {
return &ExportLogsServiceResponse{}
},
}
)
func NewExportLogsServiceResponse() *ExportLogsServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &ExportLogsServiceResponse{}
}
return protoPoolExportLogsServiceResponse.Get().(*ExportLogsServiceResponse)
}
func DeleteExportLogsServiceResponse(orig *ExportLogsServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportLogsPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportLogsServiceResponse.Put(orig)
}
}
func CopyExportLogsServiceResponse(dest, src *ExportLogsServiceResponse) *ExportLogsServiceResponse {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportLogsServiceResponse()
}
CopyExportLogsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
return dest
}
func CopyExportLogsServiceResponseSlice(dest, src []ExportLogsServiceResponse) []ExportLogsServiceResponse {
var newDest []ExportLogsServiceResponse
if cap(dest) < len(src) {
newDest = make([]ExportLogsServiceResponse, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsServiceResponse(&dest[i], false)
}
}
for i := range src {
CopyExportLogsServiceResponse(&newDest[i], &src[i])
}
return newDest
}
func CopyExportLogsServiceResponsePtrSlice(dest, src []*ExportLogsServiceResponse) []*ExportLogsServiceResponse {
var newDest []*ExportLogsServiceResponse
if cap(dest) < len(src) {
newDest = make([]*ExportLogsServiceResponse, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsServiceResponse()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportLogsServiceResponse(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportLogsServiceResponse()
}
}
for i := range src {
CopyExportLogsServiceResponse(newDest[i], src[i])
}
return newDest
}
func (orig *ExportLogsServiceResponse) Reset() {
*orig = ExportLogsServiceResponse{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportLogsServiceResponse) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
orig.PartialSuccess.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportLogsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
orig.PartialSuccess.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ExportLogsServiceResponse) SizeProto() int {
var n int
var l int
_ = l
l = orig.PartialSuccess.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ExportLogsServiceResponse) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.PartialSuccess.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func (orig *ExportLogsServiceResponse) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportLogsServiceResponse() *ExportLogsServiceResponse {
orig := NewExportLogsServiceResponse()
orig.PartialSuccess = *GenTestExportLogsPartialSuccess()
return orig
}
func GenTestExportLogsServiceResponsePtrSlice() []*ExportLogsServiceResponse {
orig := make([]*ExportLogsServiceResponse, 5)
orig[0] = NewExportLogsServiceResponse()
orig[1] = GenTestExportLogsServiceResponse()
orig[2] = NewExportLogsServiceResponse()
orig[3] = GenTestExportLogsServiceResponse()
orig[4] = NewExportLogsServiceResponse()
return orig
}
func GenTestExportLogsServiceResponseSlice() []ExportLogsServiceResponse {
orig := make([]ExportLogsServiceResponse, 5)
orig[1] = *GenTestExportLogsServiceResponse()
orig[3] = *GenTestExportLogsServiceResponse()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportPartialSuccess represents the details of a partially successful export request.
type ExportMetricsPartialSuccess struct {
RejectedDataPoints int64
ErrorMessage string
}
var (
protoPoolExportMetricsPartialSuccess = sync.Pool{
New: func() any {
return &ExportMetricsPartialSuccess{}
},
}
)
func NewExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &ExportMetricsPartialSuccess{}
}
return protoPoolExportMetricsPartialSuccess.Get().(*ExportMetricsPartialSuccess)
}
func DeleteExportMetricsPartialSuccess(orig *ExportMetricsPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportMetricsPartialSuccess.Put(orig)
}
}
func CopyExportMetricsPartialSuccess(dest, src *ExportMetricsPartialSuccess) *ExportMetricsPartialSuccess {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportMetricsPartialSuccess()
}
dest.RejectedDataPoints = src.RejectedDataPoints
dest.ErrorMessage = src.ErrorMessage
return dest
}
func CopyExportMetricsPartialSuccessSlice(dest, src []ExportMetricsPartialSuccess) []ExportMetricsPartialSuccess {
var newDest []ExportMetricsPartialSuccess
if cap(dest) < len(src) {
newDest = make([]ExportMetricsPartialSuccess, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsPartialSuccess(&dest[i], false)
}
}
for i := range src {
CopyExportMetricsPartialSuccess(&newDest[i], &src[i])
}
return newDest
}
func CopyExportMetricsPartialSuccessPtrSlice(dest, src []*ExportMetricsPartialSuccess) []*ExportMetricsPartialSuccess {
var newDest []*ExportMetricsPartialSuccess
if cap(dest) < len(src) {
newDest = make([]*ExportMetricsPartialSuccess, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsPartialSuccess()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsPartialSuccess(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsPartialSuccess()
}
}
for i := range src {
CopyExportMetricsPartialSuccess(newDest[i], src[i])
}
return newDest
}
func (orig *ExportMetricsPartialSuccess) Reset() {
*orig = ExportMetricsPartialSuccess{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportMetricsPartialSuccess) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedDataPoints != int64(0) {
dest.WriteObjectField("rejectedDataPoints")
dest.WriteInt64(orig.RejectedDataPoints)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportMetricsPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedDataPoints", "rejected_data_points":
orig.RejectedDataPoints = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ExportMetricsPartialSuccess) SizeProto() int {
var n int
var l int
_ = l
if orig.RejectedDataPoints != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedDataPoints))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportMetricsPartialSuccess) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedDataPoints != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedDataPoints))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *ExportMetricsPartialSuccess) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedDataPoints", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedDataPoints = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportMetricsPartialSuccess() *ExportMetricsPartialSuccess {
orig := NewExportMetricsPartialSuccess()
orig.RejectedDataPoints = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
func GenTestExportMetricsPartialSuccessPtrSlice() []*ExportMetricsPartialSuccess {
orig := make([]*ExportMetricsPartialSuccess, 5)
orig[0] = NewExportMetricsPartialSuccess()
orig[1] = GenTestExportMetricsPartialSuccess()
orig[2] = NewExportMetricsPartialSuccess()
orig[3] = GenTestExportMetricsPartialSuccess()
orig[4] = NewExportMetricsPartialSuccess()
return orig
}
func GenTestExportMetricsPartialSuccessSlice() []ExportMetricsPartialSuccess {
orig := make([]ExportMetricsPartialSuccess, 5)
orig[1] = *GenTestExportMetricsPartialSuccess()
orig[3] = *GenTestExportMetricsPartialSuccess()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Metrics is the top-level struct that is propagated through the metrics pipeline.
// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
type ExportMetricsServiceRequest struct {
ResourceMetrics []*ResourceMetrics
}
var (
protoPoolExportMetricsServiceRequest = sync.Pool{
New: func() any {
return &ExportMetricsServiceRequest{}
},
}
)
func NewExportMetricsServiceRequest() *ExportMetricsServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &ExportMetricsServiceRequest{}
}
return protoPoolExportMetricsServiceRequest.Get().(*ExportMetricsServiceRequest)
}
func DeleteExportMetricsServiceRequest(orig *ExportMetricsServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceMetrics {
DeleteResourceMetrics(orig.ResourceMetrics[i], true)
}
orig.Reset()
if nullable {
protoPoolExportMetricsServiceRequest.Put(orig)
}
}
func CopyExportMetricsServiceRequest(dest, src *ExportMetricsServiceRequest) *ExportMetricsServiceRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportMetricsServiceRequest()
}
dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
return dest
}
func CopyExportMetricsServiceRequestSlice(dest, src []ExportMetricsServiceRequest) []ExportMetricsServiceRequest {
var newDest []ExportMetricsServiceRequest
if cap(dest) < len(src) {
newDest = make([]ExportMetricsServiceRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsServiceRequest(&dest[i], false)
}
}
for i := range src {
CopyExportMetricsServiceRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyExportMetricsServiceRequestPtrSlice(dest, src []*ExportMetricsServiceRequest) []*ExportMetricsServiceRequest {
var newDest []*ExportMetricsServiceRequest
if cap(dest) < len(src) {
newDest = make([]*ExportMetricsServiceRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsServiceRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsServiceRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsServiceRequest()
}
}
for i := range src {
CopyExportMetricsServiceRequest(newDest[i], src[i])
}
return newDest
}
func (orig *ExportMetricsServiceRequest) Reset() {
*orig = ExportMetricsServiceRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportMetricsServiceRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceMetrics) > 0 {
dest.WriteObjectField("resourceMetrics")
dest.WriteArrayStart()
orig.ResourceMetrics[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceMetrics); i++ {
dest.WriteMore()
orig.ResourceMetrics[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportMetricsServiceRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceMetrics", "resource_metrics":
for iter.ReadArray() {
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ExportMetricsServiceRequest) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceMetrics {
l = orig.ResourceMetrics[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportMetricsServiceRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *ExportMetricsServiceRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportMetricsServiceRequest() *ExportMetricsServiceRequest {
orig := NewExportMetricsServiceRequest()
orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
return orig
}
func GenTestExportMetricsServiceRequestPtrSlice() []*ExportMetricsServiceRequest {
orig := make([]*ExportMetricsServiceRequest, 5)
orig[0] = NewExportMetricsServiceRequest()
orig[1] = GenTestExportMetricsServiceRequest()
orig[2] = NewExportMetricsServiceRequest()
orig[3] = GenTestExportMetricsServiceRequest()
orig[4] = NewExportMetricsServiceRequest()
return orig
}
func GenTestExportMetricsServiceRequestSlice() []ExportMetricsServiceRequest {
orig := make([]ExportMetricsServiceRequest, 5)
orig[1] = *GenTestExportMetricsServiceRequest()
orig[3] = *GenTestExportMetricsServiceRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
type ExportMetricsServiceResponse struct {
PartialSuccess ExportMetricsPartialSuccess
}
var (
protoPoolExportMetricsServiceResponse = sync.Pool{
New: func() any {
return &ExportMetricsServiceResponse{}
},
}
)
func NewExportMetricsServiceResponse() *ExportMetricsServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &ExportMetricsServiceResponse{}
}
return protoPoolExportMetricsServiceResponse.Get().(*ExportMetricsServiceResponse)
}
func DeleteExportMetricsServiceResponse(orig *ExportMetricsServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportMetricsPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportMetricsServiceResponse.Put(orig)
}
}
func CopyExportMetricsServiceResponse(dest, src *ExportMetricsServiceResponse) *ExportMetricsServiceResponse {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportMetricsServiceResponse()
}
CopyExportMetricsPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
return dest
}
func CopyExportMetricsServiceResponseSlice(dest, src []ExportMetricsServiceResponse) []ExportMetricsServiceResponse {
var newDest []ExportMetricsServiceResponse
if cap(dest) < len(src) {
newDest = make([]ExportMetricsServiceResponse, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsServiceResponse(&dest[i], false)
}
}
for i := range src {
CopyExportMetricsServiceResponse(&newDest[i], &src[i])
}
return newDest
}
func CopyExportMetricsServiceResponsePtrSlice(dest, src []*ExportMetricsServiceResponse) []*ExportMetricsServiceResponse {
var newDest []*ExportMetricsServiceResponse
if cap(dest) < len(src) {
newDest = make([]*ExportMetricsServiceResponse, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsServiceResponse()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportMetricsServiceResponse(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportMetricsServiceResponse()
}
}
for i := range src {
CopyExportMetricsServiceResponse(newDest[i], src[i])
}
return newDest
}
func (orig *ExportMetricsServiceResponse) Reset() {
*orig = ExportMetricsServiceResponse{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportMetricsServiceResponse) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
orig.PartialSuccess.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportMetricsServiceResponse) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
orig.PartialSuccess.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ExportMetricsServiceResponse) SizeProto() int {
var n int
var l int
_ = l
l = orig.PartialSuccess.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ExportMetricsServiceResponse) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.PartialSuccess.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func (orig *ExportMetricsServiceResponse) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportMetricsServiceResponse() *ExportMetricsServiceResponse {
orig := NewExportMetricsServiceResponse()
orig.PartialSuccess = *GenTestExportMetricsPartialSuccess()
return orig
}
func GenTestExportMetricsServiceResponsePtrSlice() []*ExportMetricsServiceResponse {
orig := make([]*ExportMetricsServiceResponse, 5)
orig[0] = NewExportMetricsServiceResponse()
orig[1] = GenTestExportMetricsServiceResponse()
orig[2] = NewExportMetricsServiceResponse()
orig[3] = GenTestExportMetricsServiceResponse()
orig[4] = NewExportMetricsServiceResponse()
return orig
}
func GenTestExportMetricsServiceResponseSlice() []ExportMetricsServiceResponse {
orig := make([]ExportMetricsServiceResponse, 5)
orig[1] = *GenTestExportMetricsServiceResponse()
orig[3] = *GenTestExportMetricsServiceResponse()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportPartialSuccess represents the details of a partially successful export request.
type ExportProfilesPartialSuccess struct {
RejectedProfiles int64
ErrorMessage string
}
var (
protoPoolExportProfilesPartialSuccess = sync.Pool{
New: func() any {
return &ExportProfilesPartialSuccess{}
},
}
)
func NewExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &ExportProfilesPartialSuccess{}
}
return protoPoolExportProfilesPartialSuccess.Get().(*ExportProfilesPartialSuccess)
}
func DeleteExportProfilesPartialSuccess(orig *ExportProfilesPartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportProfilesPartialSuccess.Put(orig)
}
}
func CopyExportProfilesPartialSuccess(dest, src *ExportProfilesPartialSuccess) *ExportProfilesPartialSuccess {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportProfilesPartialSuccess()
}
dest.RejectedProfiles = src.RejectedProfiles
dest.ErrorMessage = src.ErrorMessage
return dest
}
func CopyExportProfilesPartialSuccessSlice(dest, src []ExportProfilesPartialSuccess) []ExportProfilesPartialSuccess {
var newDest []ExportProfilesPartialSuccess
if cap(dest) < len(src) {
newDest = make([]ExportProfilesPartialSuccess, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesPartialSuccess(&dest[i], false)
}
}
for i := range src {
CopyExportProfilesPartialSuccess(&newDest[i], &src[i])
}
return newDest
}
func CopyExportProfilesPartialSuccessPtrSlice(dest, src []*ExportProfilesPartialSuccess) []*ExportProfilesPartialSuccess {
var newDest []*ExportProfilesPartialSuccess
if cap(dest) < len(src) {
newDest = make([]*ExportProfilesPartialSuccess, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesPartialSuccess()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesPartialSuccess(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesPartialSuccess()
}
}
for i := range src {
CopyExportProfilesPartialSuccess(newDest[i], src[i])
}
return newDest
}
func (orig *ExportProfilesPartialSuccess) Reset() {
*orig = ExportProfilesPartialSuccess{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportProfilesPartialSuccess) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedProfiles != int64(0) {
dest.WriteObjectField("rejectedProfiles")
dest.WriteInt64(orig.RejectedProfiles)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportProfilesPartialSuccess) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedProfiles", "rejected_profiles":
orig.RejectedProfiles = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ExportProfilesPartialSuccess) SizeProto() int {
var n int
var l int
_ = l
if orig.RejectedProfiles != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedProfiles))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportProfilesPartialSuccess) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedProfiles != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedProfiles))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *ExportProfilesPartialSuccess) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedProfiles", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedProfiles = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportProfilesPartialSuccess() *ExportProfilesPartialSuccess {
orig := NewExportProfilesPartialSuccess()
orig.RejectedProfiles = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
func GenTestExportProfilesPartialSuccessPtrSlice() []*ExportProfilesPartialSuccess {
orig := make([]*ExportProfilesPartialSuccess, 5)
orig[0] = NewExportProfilesPartialSuccess()
orig[1] = GenTestExportProfilesPartialSuccess()
orig[2] = NewExportProfilesPartialSuccess()
orig[3] = GenTestExportProfilesPartialSuccess()
orig[4] = NewExportProfilesPartialSuccess()
return orig
}
func GenTestExportProfilesPartialSuccessSlice() []ExportProfilesPartialSuccess {
orig := make([]ExportProfilesPartialSuccess, 5)
orig[1] = *GenTestExportProfilesPartialSuccess()
orig[3] = *GenTestExportProfilesPartialSuccess()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Profiles is the top-level struct that is propagated through the profiles pipeline.
// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
type ExportProfilesServiceRequest struct {
ResourceProfiles []*ResourceProfiles
Dictionary ProfilesDictionary
}
var (
protoPoolExportProfilesServiceRequest = sync.Pool{
New: func() any {
return &ExportProfilesServiceRequest{}
},
}
)
func NewExportProfilesServiceRequest() *ExportProfilesServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &ExportProfilesServiceRequest{}
}
return protoPoolExportProfilesServiceRequest.Get().(*ExportProfilesServiceRequest)
}
func DeleteExportProfilesServiceRequest(orig *ExportProfilesServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceProfiles {
DeleteResourceProfiles(orig.ResourceProfiles[i], true)
}
DeleteProfilesDictionary(&orig.Dictionary, false)
orig.Reset()
if nullable {
protoPoolExportProfilesServiceRequest.Put(orig)
}
}
func CopyExportProfilesServiceRequest(dest, src *ExportProfilesServiceRequest) *ExportProfilesServiceRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportProfilesServiceRequest()
}
dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
return dest
}
func CopyExportProfilesServiceRequestSlice(dest, src []ExportProfilesServiceRequest) []ExportProfilesServiceRequest {
var newDest []ExportProfilesServiceRequest
if cap(dest) < len(src) {
newDest = make([]ExportProfilesServiceRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesServiceRequest(&dest[i], false)
}
}
for i := range src {
CopyExportProfilesServiceRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyExportProfilesServiceRequestPtrSlice(dest, src []*ExportProfilesServiceRequest) []*ExportProfilesServiceRequest {
var newDest []*ExportProfilesServiceRequest
if cap(dest) < len(src) {
newDest = make([]*ExportProfilesServiceRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesServiceRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesServiceRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesServiceRequest()
}
}
for i := range src {
CopyExportProfilesServiceRequest(newDest[i], src[i])
}
return newDest
}
func (orig *ExportProfilesServiceRequest) Reset() {
*orig = ExportProfilesServiceRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportProfilesServiceRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceProfiles) > 0 {
dest.WriteObjectField("resourceProfiles")
dest.WriteArrayStart()
orig.ResourceProfiles[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceProfiles); i++ {
dest.WriteMore()
orig.ResourceProfiles[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectField("dictionary")
orig.Dictionary.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportProfilesServiceRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceProfiles", "resource_profiles":
for iter.ReadArray() {
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
}
case "dictionary":
orig.Dictionary.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ExportProfilesServiceRequest) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceProfiles {
l = orig.ResourceProfiles[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.Dictionary.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ExportProfilesServiceRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = orig.Dictionary.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func (orig *ExportProfilesServiceRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportProfilesServiceRequest() *ExportProfilesServiceRequest {
orig := NewExportProfilesServiceRequest()
orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
orig.Dictionary = *GenTestProfilesDictionary()
return orig
}
func GenTestExportProfilesServiceRequestPtrSlice() []*ExportProfilesServiceRequest {
orig := make([]*ExportProfilesServiceRequest, 5)
orig[0] = NewExportProfilesServiceRequest()
orig[1] = GenTestExportProfilesServiceRequest()
orig[2] = NewExportProfilesServiceRequest()
orig[3] = GenTestExportProfilesServiceRequest()
orig[4] = NewExportProfilesServiceRequest()
return orig
}
func GenTestExportProfilesServiceRequestSlice() []ExportProfilesServiceRequest {
orig := make([]ExportProfilesServiceRequest, 5)
orig[1] = *GenTestExportProfilesServiceRequest()
orig[3] = *GenTestExportProfilesServiceRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
type ExportProfilesServiceResponse struct {
PartialSuccess ExportProfilesPartialSuccess
}
var (
protoPoolExportProfilesServiceResponse = sync.Pool{
New: func() any {
return &ExportProfilesServiceResponse{}
},
}
)
func NewExportProfilesServiceResponse() *ExportProfilesServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &ExportProfilesServiceResponse{}
}
return protoPoolExportProfilesServiceResponse.Get().(*ExportProfilesServiceResponse)
}
func DeleteExportProfilesServiceResponse(orig *ExportProfilesServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportProfilesPartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportProfilesServiceResponse.Put(orig)
}
}
func CopyExportProfilesServiceResponse(dest, src *ExportProfilesServiceResponse) *ExportProfilesServiceResponse {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportProfilesServiceResponse()
}
CopyExportProfilesPartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
return dest
}
func CopyExportProfilesServiceResponseSlice(dest, src []ExportProfilesServiceResponse) []ExportProfilesServiceResponse {
var newDest []ExportProfilesServiceResponse
if cap(dest) < len(src) {
newDest = make([]ExportProfilesServiceResponse, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesServiceResponse(&dest[i], false)
}
}
for i := range src {
CopyExportProfilesServiceResponse(&newDest[i], &src[i])
}
return newDest
}
func CopyExportProfilesServiceResponsePtrSlice(dest, src []*ExportProfilesServiceResponse) []*ExportProfilesServiceResponse {
var newDest []*ExportProfilesServiceResponse
if cap(dest) < len(src) {
newDest = make([]*ExportProfilesServiceResponse, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesServiceResponse()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportProfilesServiceResponse(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportProfilesServiceResponse()
}
}
for i := range src {
CopyExportProfilesServiceResponse(newDest[i], src[i])
}
return newDest
}
func (orig *ExportProfilesServiceResponse) Reset() {
*orig = ExportProfilesServiceResponse{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportProfilesServiceResponse) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
orig.PartialSuccess.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportProfilesServiceResponse) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
orig.PartialSuccess.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ExportProfilesServiceResponse) SizeProto() int {
var n int
var l int
_ = l
l = orig.PartialSuccess.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ExportProfilesServiceResponse) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.PartialSuccess.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func (orig *ExportProfilesServiceResponse) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportProfilesServiceResponse() *ExportProfilesServiceResponse {
orig := NewExportProfilesServiceResponse()
orig.PartialSuccess = *GenTestExportProfilesPartialSuccess()
return orig
}
func GenTestExportProfilesServiceResponsePtrSlice() []*ExportProfilesServiceResponse {
orig := make([]*ExportProfilesServiceResponse, 5)
orig[0] = NewExportProfilesServiceResponse()
orig[1] = GenTestExportProfilesServiceResponse()
orig[2] = NewExportProfilesServiceResponse()
orig[3] = GenTestExportProfilesServiceResponse()
orig[4] = NewExportProfilesServiceResponse()
return orig
}
func GenTestExportProfilesServiceResponseSlice() []ExportProfilesServiceResponse {
orig := make([]ExportProfilesServiceResponse, 5)
orig[1] = *GenTestExportProfilesServiceResponse()
orig[3] = *GenTestExportProfilesServiceResponse()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportPartialSuccess represents the details of a partially successful export request.
type ExportTracePartialSuccess struct {
RejectedSpans int64
ErrorMessage string
}
var (
protoPoolExportTracePartialSuccess = sync.Pool{
New: func() any {
return &ExportTracePartialSuccess{}
},
}
)
func NewExportTracePartialSuccess() *ExportTracePartialSuccess {
if !UseProtoPooling.IsEnabled() {
return &ExportTracePartialSuccess{}
}
return protoPoolExportTracePartialSuccess.Get().(*ExportTracePartialSuccess)
}
func DeleteExportTracePartialSuccess(orig *ExportTracePartialSuccess, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolExportTracePartialSuccess.Put(orig)
}
}
func CopyExportTracePartialSuccess(dest, src *ExportTracePartialSuccess) *ExportTracePartialSuccess {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportTracePartialSuccess()
}
dest.RejectedSpans = src.RejectedSpans
dest.ErrorMessage = src.ErrorMessage
return dest
}
func CopyExportTracePartialSuccessSlice(dest, src []ExportTracePartialSuccess) []ExportTracePartialSuccess {
var newDest []ExportTracePartialSuccess
if cap(dest) < len(src) {
newDest = make([]ExportTracePartialSuccess, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTracePartialSuccess(&dest[i], false)
}
}
for i := range src {
CopyExportTracePartialSuccess(&newDest[i], &src[i])
}
return newDest
}
func CopyExportTracePartialSuccessPtrSlice(dest, src []*ExportTracePartialSuccess) []*ExportTracePartialSuccess {
var newDest []*ExportTracePartialSuccess
if cap(dest) < len(src) {
newDest = make([]*ExportTracePartialSuccess, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTracePartialSuccess()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTracePartialSuccess(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTracePartialSuccess()
}
}
for i := range src {
CopyExportTracePartialSuccess(newDest[i], src[i])
}
return newDest
}
func (orig *ExportTracePartialSuccess) Reset() {
*orig = ExportTracePartialSuccess{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportTracePartialSuccess) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RejectedSpans != int64(0) {
dest.WriteObjectField("rejectedSpans")
dest.WriteInt64(orig.RejectedSpans)
}
if orig.ErrorMessage != "" {
dest.WriteObjectField("errorMessage")
dest.WriteString(orig.ErrorMessage)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportTracePartialSuccess) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "rejectedSpans", "rejected_spans":
orig.RejectedSpans = iter.ReadInt64()
case "errorMessage", "error_message":
orig.ErrorMessage = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ExportTracePartialSuccess) SizeProto() int {
var n int
var l int
_ = l
if orig.RejectedSpans != 0 {
n += 1 + proto.Sov(uint64(orig.RejectedSpans))
}
l = len(orig.ErrorMessage)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportTracePartialSuccess) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RejectedSpans != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.RejectedSpans))
pos--
buf[pos] = 0x8
}
l = len(orig.ErrorMessage)
if l > 0 {
pos -= l
copy(buf[pos:], orig.ErrorMessage)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *ExportTracePartialSuccess) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field RejectedSpans", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.RejectedSpans = int64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorMessage", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ErrorMessage = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportTracePartialSuccess() *ExportTracePartialSuccess {
orig := NewExportTracePartialSuccess()
orig.RejectedSpans = int64(13)
orig.ErrorMessage = "test_errormessage"
return orig
}
func GenTestExportTracePartialSuccessPtrSlice() []*ExportTracePartialSuccess {
orig := make([]*ExportTracePartialSuccess, 5)
orig[0] = NewExportTracePartialSuccess()
orig[1] = GenTestExportTracePartialSuccess()
orig[2] = NewExportTracePartialSuccess()
orig[3] = GenTestExportTracePartialSuccess()
orig[4] = NewExportTracePartialSuccess()
return orig
}
func GenTestExportTracePartialSuccessSlice() []ExportTracePartialSuccess {
orig := make([]ExportTracePartialSuccess, 5)
orig[1] = *GenTestExportTracePartialSuccess()
orig[3] = *GenTestExportTracePartialSuccess()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Traces is the top-level struct that is propagated through the traces pipeline.
// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
type ExportTraceServiceRequest struct {
ResourceSpans []*ResourceSpans
}
var (
protoPoolExportTraceServiceRequest = sync.Pool{
New: func() any {
return &ExportTraceServiceRequest{}
},
}
)
func NewExportTraceServiceRequest() *ExportTraceServiceRequest {
if !UseProtoPooling.IsEnabled() {
return &ExportTraceServiceRequest{}
}
return protoPoolExportTraceServiceRequest.Get().(*ExportTraceServiceRequest)
}
func DeleteExportTraceServiceRequest(orig *ExportTraceServiceRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceSpans {
DeleteResourceSpans(orig.ResourceSpans[i], true)
}
orig.Reset()
if nullable {
protoPoolExportTraceServiceRequest.Put(orig)
}
}
func CopyExportTraceServiceRequest(dest, src *ExportTraceServiceRequest) *ExportTraceServiceRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportTraceServiceRequest()
}
dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
return dest
}
func CopyExportTraceServiceRequestSlice(dest, src []ExportTraceServiceRequest) []ExportTraceServiceRequest {
var newDest []ExportTraceServiceRequest
if cap(dest) < len(src) {
newDest = make([]ExportTraceServiceRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTraceServiceRequest(&dest[i], false)
}
}
for i := range src {
CopyExportTraceServiceRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyExportTraceServiceRequestPtrSlice(dest, src []*ExportTraceServiceRequest) []*ExportTraceServiceRequest {
var newDest []*ExportTraceServiceRequest
if cap(dest) < len(src) {
newDest = make([]*ExportTraceServiceRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTraceServiceRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTraceServiceRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTraceServiceRequest()
}
}
for i := range src {
CopyExportTraceServiceRequest(newDest[i], src[i])
}
return newDest
}
func (orig *ExportTraceServiceRequest) Reset() {
*orig = ExportTraceServiceRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportTraceServiceRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceSpans) > 0 {
dest.WriteObjectField("resourceSpans")
dest.WriteArrayStart()
orig.ResourceSpans[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceSpans); i++ {
dest.WriteMore()
orig.ResourceSpans[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportTraceServiceRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceSpans", "resource_spans":
for iter.ReadArray() {
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ExportTraceServiceRequest) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceSpans {
l = orig.ResourceSpans[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ExportTraceServiceRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *ExportTraceServiceRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportTraceServiceRequest() *ExportTraceServiceRequest {
orig := NewExportTraceServiceRequest()
orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
return orig
}
func GenTestExportTraceServiceRequestPtrSlice() []*ExportTraceServiceRequest {
orig := make([]*ExportTraceServiceRequest, 5)
orig[0] = NewExportTraceServiceRequest()
orig[1] = GenTestExportTraceServiceRequest()
orig[2] = NewExportTraceServiceRequest()
orig[3] = GenTestExportTraceServiceRequest()
orig[4] = NewExportTraceServiceRequest()
return orig
}
func GenTestExportTraceServiceRequestSlice() []ExportTraceServiceRequest {
orig := make([]ExportTraceServiceRequest, 5)
orig[1] = *GenTestExportTraceServiceRequest()
orig[3] = *GenTestExportTraceServiceRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
type ExportTraceServiceResponse struct {
PartialSuccess ExportTracePartialSuccess
}
var (
protoPoolExportTraceServiceResponse = sync.Pool{
New: func() any {
return &ExportTraceServiceResponse{}
},
}
)
func NewExportTraceServiceResponse() *ExportTraceServiceResponse {
if !UseProtoPooling.IsEnabled() {
return &ExportTraceServiceResponse{}
}
return protoPoolExportTraceServiceResponse.Get().(*ExportTraceServiceResponse)
}
func DeleteExportTraceServiceResponse(orig *ExportTraceServiceResponse, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteExportTracePartialSuccess(&orig.PartialSuccess, false)
orig.Reset()
if nullable {
protoPoolExportTraceServiceResponse.Put(orig)
}
}
func CopyExportTraceServiceResponse(dest, src *ExportTraceServiceResponse) *ExportTraceServiceResponse {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewExportTraceServiceResponse()
}
CopyExportTracePartialSuccess(&dest.PartialSuccess, &src.PartialSuccess)
return dest
}
func CopyExportTraceServiceResponseSlice(dest, src []ExportTraceServiceResponse) []ExportTraceServiceResponse {
var newDest []ExportTraceServiceResponse
if cap(dest) < len(src) {
newDest = make([]ExportTraceServiceResponse, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTraceServiceResponse(&dest[i], false)
}
}
for i := range src {
CopyExportTraceServiceResponse(&newDest[i], &src[i])
}
return newDest
}
func CopyExportTraceServiceResponsePtrSlice(dest, src []*ExportTraceServiceResponse) []*ExportTraceServiceResponse {
var newDest []*ExportTraceServiceResponse
if cap(dest) < len(src) {
newDest = make([]*ExportTraceServiceResponse, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTraceServiceResponse()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteExportTraceServiceResponse(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewExportTraceServiceResponse()
}
}
for i := range src {
CopyExportTraceServiceResponse(newDest[i], src[i])
}
return newDest
}
func (orig *ExportTraceServiceResponse) Reset() {
*orig = ExportTraceServiceResponse{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ExportTraceServiceResponse) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("partialSuccess")
orig.PartialSuccess.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ExportTraceServiceResponse) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "partialSuccess", "partial_success":
orig.PartialSuccess.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ExportTraceServiceResponse) SizeProto() int {
var n int
var l int
_ = l
l = orig.PartialSuccess.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ExportTraceServiceResponse) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.PartialSuccess.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
return len(buf) - pos
}
func (orig *ExportTraceServiceResponse) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PartialSuccess", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.PartialSuccess.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestExportTraceServiceResponse() *ExportTraceServiceResponse {
orig := NewExportTraceServiceResponse()
orig.PartialSuccess = *GenTestExportTracePartialSuccess()
return orig
}
func GenTestExportTraceServiceResponsePtrSlice() []*ExportTraceServiceResponse {
orig := make([]*ExportTraceServiceResponse, 5)
orig[0] = NewExportTraceServiceResponse()
orig[1] = GenTestExportTraceServiceResponse()
orig[2] = NewExportTraceServiceResponse()
orig[3] = GenTestExportTraceServiceResponse()
orig[4] = NewExportTraceServiceResponse()
return orig
}
func GenTestExportTraceServiceResponseSlice() []ExportTraceServiceResponse {
orig := make([]ExportTraceServiceResponse, 5)
orig[1] = *GenTestExportTraceServiceResponse()
orig[3] = *GenTestExportTraceServiceResponse()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
type Function struct {
NameStrindex int32
SystemNameStrindex int32
FilenameStrindex int32
StartLine int64
}
var (
protoPoolFunction = sync.Pool{
New: func() any {
return &Function{}
},
}
)
func NewFunction() *Function {
if !UseProtoPooling.IsEnabled() {
return &Function{}
}
return protoPoolFunction.Get().(*Function)
}
func DeleteFunction(orig *Function, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolFunction.Put(orig)
}
}
func CopyFunction(dest, src *Function) *Function {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewFunction()
}
dest.NameStrindex = src.NameStrindex
dest.SystemNameStrindex = src.SystemNameStrindex
dest.FilenameStrindex = src.FilenameStrindex
dest.StartLine = src.StartLine
return dest
}
func CopyFunctionSlice(dest, src []Function) []Function {
var newDest []Function
if cap(dest) < len(src) {
newDest = make([]Function, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteFunction(&dest[i], false)
}
}
for i := range src {
CopyFunction(&newDest[i], &src[i])
}
return newDest
}
func CopyFunctionPtrSlice(dest, src []*Function) []*Function {
var newDest []*Function
if cap(dest) < len(src) {
newDest = make([]*Function, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewFunction()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteFunction(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewFunction()
}
}
for i := range src {
CopyFunction(newDest[i], src[i])
}
return newDest
}
func (orig *Function) Reset() {
*orig = Function{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Function) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.NameStrindex != int32(0) {
dest.WriteObjectField("nameStrindex")
dest.WriteInt32(orig.NameStrindex)
}
if orig.SystemNameStrindex != int32(0) {
dest.WriteObjectField("systemNameStrindex")
dest.WriteInt32(orig.SystemNameStrindex)
}
if orig.FilenameStrindex != int32(0) {
dest.WriteObjectField("filenameStrindex")
dest.WriteInt32(orig.FilenameStrindex)
}
if orig.StartLine != int64(0) {
dest.WriteObjectField("startLine")
dest.WriteInt64(orig.StartLine)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Function) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "nameStrindex", "name_strindex":
orig.NameStrindex = iter.ReadInt32()
case "systemNameStrindex", "system_name_strindex":
orig.SystemNameStrindex = iter.ReadInt32()
case "filenameStrindex", "filename_strindex":
orig.FilenameStrindex = iter.ReadInt32()
case "startLine", "start_line":
orig.StartLine = iter.ReadInt64()
default:
iter.Skip()
}
}
}
func (orig *Function) SizeProto() int {
var n int
var l int
_ = l
if orig.NameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.NameStrindex))
}
if orig.SystemNameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.SystemNameStrindex))
}
if orig.FilenameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
}
if orig.StartLine != 0 {
n += 1 + proto.Sov(uint64(orig.StartLine))
}
return n
}
func (orig *Function) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.NameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.NameStrindex))
pos--
buf[pos] = 0x8
}
if orig.SystemNameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.SystemNameStrindex))
pos--
buf[pos] = 0x10
}
if orig.FilenameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
pos--
buf[pos] = 0x18
}
if orig.StartLine != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.StartLine))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func (orig *Function) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.NameStrindex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.SystemNameStrindex = int32(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FilenameStrindex = int32(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.StartLine = int64(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestFunction() *Function {
orig := NewFunction()
orig.NameStrindex = int32(13)
orig.SystemNameStrindex = int32(13)
orig.FilenameStrindex = int32(13)
orig.StartLine = int64(13)
return orig
}
func GenTestFunctionPtrSlice() []*Function {
orig := make([]*Function, 5)
orig[0] = NewFunction()
orig[1] = GenTestFunction()
orig[2] = NewFunction()
orig[3] = GenTestFunction()
orig[4] = NewFunction()
return orig
}
func GenTestFunctionSlice() []Function {
orig := make([]Function, 5)
orig[1] = *GenTestFunction()
orig[3] = *GenTestFunction()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
type Gauge struct {
DataPoints []*NumberDataPoint
}
var (
protoPoolGauge = sync.Pool{
New: func() any {
return &Gauge{}
},
}
)
func NewGauge() *Gauge {
if !UseProtoPooling.IsEnabled() {
return &Gauge{}
}
return protoPoolGauge.Get().(*Gauge)
}
func DeleteGauge(orig *Gauge, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteNumberDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolGauge.Put(orig)
}
}
func CopyGauge(dest, src *Gauge) *Gauge {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewGauge()
}
dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
return dest
}
func CopyGaugeSlice(dest, src []Gauge) []Gauge {
var newDest []Gauge
if cap(dest) < len(src) {
newDest = make([]Gauge, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteGauge(&dest[i], false)
}
}
for i := range src {
CopyGauge(&newDest[i], &src[i])
}
return newDest
}
func CopyGaugePtrSlice(dest, src []*Gauge) []*Gauge {
var newDest []*Gauge
if cap(dest) < len(src) {
newDest = make([]*Gauge, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewGauge()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteGauge(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewGauge()
}
}
for i := range src {
CopyGauge(newDest[i], src[i])
}
return newDest
}
func (orig *Gauge) Reset() {
*orig = Gauge{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Gauge) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Gauge) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *Gauge) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Gauge) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *Gauge) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestGauge() *Gauge {
orig := NewGauge()
orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
return orig
}
func GenTestGaugePtrSlice() []*Gauge {
orig := make([]*Gauge, 5)
orig[0] = NewGauge()
orig[1] = GenTestGauge()
orig[2] = NewGauge()
orig[3] = GenTestGauge()
orig[4] = NewGauge()
return orig
}
func GenTestGaugeSlice() []Gauge {
orig := make([]Gauge, 5)
orig[1] = *GenTestGauge()
orig[3] = *GenTestGauge()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
type Histogram struct {
DataPoints []*HistogramDataPoint
AggregationTemporality AggregationTemporality
}
var (
protoPoolHistogram = sync.Pool{
New: func() any {
return &Histogram{}
},
}
)
func NewHistogram() *Histogram {
if !UseProtoPooling.IsEnabled() {
return &Histogram{}
}
return protoPoolHistogram.Get().(*Histogram)
}
func DeleteHistogram(orig *Histogram, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteHistogramDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolHistogram.Put(orig)
}
}
func CopyHistogram(dest, src *Histogram) *Histogram {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewHistogram()
}
dest.DataPoints = CopyHistogramDataPointPtrSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
return dest
}
func CopyHistogramSlice(dest, src []Histogram) []Histogram {
var newDest []Histogram
if cap(dest) < len(src) {
newDest = make([]Histogram, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteHistogram(&dest[i], false)
}
}
for i := range src {
CopyHistogram(&newDest[i], &src[i])
}
return newDest
}
func CopyHistogramPtrSlice(dest, src []*Histogram) []*Histogram {
var newDest []*Histogram
if cap(dest) < len(src) {
newDest = make([]*Histogram, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogram()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteHistogram(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogram()
}
}
for i := range src {
CopyHistogram(newDest[i], src[i])
}
return newDest
}
func (orig *Histogram) Reset() {
*orig = Histogram{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Histogram) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Histogram) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
default:
iter.Skip()
}
}
}
func (orig *Histogram) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
return n
}
func (orig *Histogram) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
return len(buf) - pos
}
func (orig *Histogram) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewHistogramDataPoint())
err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = AggregationTemporality(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestHistogram() *Histogram {
orig := NewHistogram()
orig.DataPoints = []*HistogramDataPoint{{}, GenTestHistogramDataPoint()}
orig.AggregationTemporality = AggregationTemporality(13)
return orig
}
func GenTestHistogramPtrSlice() []*Histogram {
orig := make([]*Histogram, 5)
orig[0] = NewHistogram()
orig[1] = GenTestHistogram()
orig[2] = NewHistogram()
orig[3] = GenTestHistogram()
orig[4] = NewHistogram()
return orig
}
func GenTestHistogramSlice() []Histogram {
orig := make([]Histogram, 5)
orig[1] = *GenTestHistogram()
orig[3] = *GenTestHistogram()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *HistogramDataPoint) GetSum_() any {
if m != nil {
return m.Sum_
}
return nil
}
type HistogramDataPoint_Sum struct {
Sum float64
}
func (m *HistogramDataPoint) GetSum() float64 {
if v, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok {
return v.Sum
}
return float64(0)
}
func (m *HistogramDataPoint) GetMin_() any {
if m != nil {
return m.Min_
}
return nil
}
type HistogramDataPoint_Min struct {
Min float64
}
func (m *HistogramDataPoint) GetMin() float64 {
if v, ok := m.GetMin_().(*HistogramDataPoint_Min); ok {
return v.Min
}
return float64(0)
}
func (m *HistogramDataPoint) GetMax_() any {
if m != nil {
return m.Max_
}
return nil
}
type HistogramDataPoint_Max struct {
Max float64
}
func (m *HistogramDataPoint) GetMax() float64 {
if v, ok := m.GetMax_().(*HistogramDataPoint_Max); ok {
return v.Max
}
return float64(0)
}
// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.
type HistogramDataPoint struct {
Attributes []KeyValue
StartTimeUnixNano uint64
TimeUnixNano uint64
Count uint64
Sum_ any
BucketCounts []uint64
ExplicitBounds []float64
Exemplars []Exemplar
Flags uint32
Min_ any
Max_ any
}
var (
protoPoolHistogramDataPoint = sync.Pool{
New: func() any {
return &HistogramDataPoint{}
},
}
ProtoPoolHistogramDataPoint_Sum = sync.Pool{
New: func() any {
return &HistogramDataPoint_Sum{}
},
}
ProtoPoolHistogramDataPoint_Min = sync.Pool{
New: func() any {
return &HistogramDataPoint_Min{}
},
}
ProtoPoolHistogramDataPoint_Max = sync.Pool{
New: func() any {
return &HistogramDataPoint_Max{}
},
}
)
func NewHistogramDataPoint() *HistogramDataPoint {
if !UseProtoPooling.IsEnabled() {
return &HistogramDataPoint{}
}
return protoPoolHistogramDataPoint.Get().(*HistogramDataPoint)
}
func DeleteHistogramDataPoint(orig *HistogramDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Sum_.(type) {
case *HistogramDataPoint_Sum:
if UseProtoPooling.IsEnabled() {
ov.Sum = float64(0)
ProtoPoolHistogramDataPoint_Sum.Put(ov)
}
}
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
switch ov := orig.Min_.(type) {
case *HistogramDataPoint_Min:
if UseProtoPooling.IsEnabled() {
ov.Min = float64(0)
ProtoPoolHistogramDataPoint_Min.Put(ov)
}
}
switch ov := orig.Max_.(type) {
case *HistogramDataPoint_Max:
if UseProtoPooling.IsEnabled() {
ov.Max = float64(0)
ProtoPoolHistogramDataPoint_Max.Put(ov)
}
}
orig.Reset()
if nullable {
protoPoolHistogramDataPoint.Put(orig)
}
}
func CopyHistogramDataPoint(dest, src *HistogramDataPoint) *HistogramDataPoint {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewHistogramDataPoint()
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
switch t := src.Sum_.(type) {
case *HistogramDataPoint_Sum:
var ov *HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Sum{}
} else {
ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
ov.Sum = t.Sum
dest.Sum_ = ov
default:
dest.Sum_ = nil
}
dest.BucketCounts = append(dest.BucketCounts[:0], src.BucketCounts...)
dest.ExplicitBounds = append(dest.ExplicitBounds[:0], src.ExplicitBounds...)
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
dest.Flags = src.Flags
switch t := src.Min_.(type) {
case *HistogramDataPoint_Min:
var ov *HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Min{}
} else {
ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
ov.Min = t.Min
dest.Min_ = ov
default:
dest.Min_ = nil
}
switch t := src.Max_.(type) {
case *HistogramDataPoint_Max:
var ov *HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Max{}
} else {
ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
ov.Max = t.Max
dest.Max_ = ov
default:
dest.Max_ = nil
}
return dest
}
func CopyHistogramDataPointSlice(dest, src []HistogramDataPoint) []HistogramDataPoint {
var newDest []HistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]HistogramDataPoint, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteHistogramDataPoint(&dest[i], false)
}
}
for i := range src {
CopyHistogramDataPoint(&newDest[i], &src[i])
}
return newDest
}
func CopyHistogramDataPointPtrSlice(dest, src []*HistogramDataPoint) []*HistogramDataPoint {
var newDest []*HistogramDataPoint
if cap(dest) < len(src) {
newDest = make([]*HistogramDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogramDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteHistogramDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewHistogramDataPoint()
}
}
for i := range src {
CopyHistogramDataPoint(newDest[i], src[i])
}
return newDest
}
func (orig *HistogramDataPoint) Reset() {
*orig = HistogramDataPoint{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *HistogramDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if len(orig.BucketCounts) > 0 {
dest.WriteObjectField("bucketCounts")
dest.WriteArrayStart()
dest.WriteUint64(orig.BucketCounts[0])
for i := 1; i < len(orig.BucketCounts); i++ {
dest.WriteMore()
dest.WriteUint64(orig.BucketCounts[i])
}
dest.WriteArrayEnd()
}
if len(orig.ExplicitBounds) > 0 {
dest.WriteObjectField("explicitBounds")
dest.WriteArrayStart()
dest.WriteFloat64(orig.ExplicitBounds[0])
for i := 1; i < len(orig.ExplicitBounds); i++ {
dest.WriteMore()
dest.WriteFloat64(orig.ExplicitBounds[i])
}
dest.WriteArrayEnd()
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
dest.WriteObjectField("min")
dest.WriteFloat64(orig.Min)
}
if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
dest.WriteObjectField("max")
dest.WriteFloat64(orig.Max)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *HistogramDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
{
var ov *HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Sum{}
} else {
ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
ov.Sum = iter.ReadFloat64()
orig.Sum_ = ov
}
case "bucketCounts", "bucket_counts":
for iter.ReadArray() {
orig.BucketCounts = append(orig.BucketCounts, iter.ReadUint64())
}
case "explicitBounds", "explicit_bounds":
for iter.ReadArray() {
orig.ExplicitBounds = append(orig.ExplicitBounds, iter.ReadFloat64())
}
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, Exemplar{})
orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
case "min":
{
var ov *HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Min{}
} else {
ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
ov.Min = iter.ReadFloat64()
orig.Min_ = ov
}
case "max":
{
var ov *HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Max{}
} else {
ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
ov.Max = iter.ReadFloat64()
orig.Max_ = ov
}
default:
iter.Skip()
}
}
}
func (orig *HistogramDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
_ = orig
n += 9
}
l = len(orig.BucketCounts)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.ExplicitBounds)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Exemplars {
l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
_ = orig
n += 9
}
if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
_ = orig
n += 9
}
return n
}
func (orig *HistogramDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig, ok := orig.Sum_.(*HistogramDataPoint_Sum); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
l = len(orig.BucketCounts)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.BucketCounts[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x32
}
l = len(orig.ExplicitBounds)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.ExplicitBounds[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x3a
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x42
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x50
}
if orig, ok := orig.Min_.(*HistogramDataPoint_Min); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Min))
pos--
buf[pos] = 0x59
}
if orig, ok := orig.Max_.(*HistogramDataPoint_Max); ok {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Max))
pos--
buf[pos] = 0x61
}
return len(buf) - pos
}
func (orig *HistogramDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *HistogramDataPoint_Sum
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Sum{}
} else {
ov = ProtoPoolHistogramDataPoint_Sum.Get().(*HistogramDataPoint_Sum)
}
ov.Sum = math.Float64frombits(num)
orig.Sum_ = ov
case 6:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.BucketCounts = make([]uint64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.BucketCounts[i] = uint64(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field BucketCounts", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.BucketCounts = append(orig.BucketCounts, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
}
case 7:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.ExplicitBounds = make([]float64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.ExplicitBounds[i] = math.Float64frombits(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field ExplicitBounds", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ExplicitBounds = append(orig.ExplicitBounds, math.Float64frombits(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
}
case 8:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, Exemplar{})
err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 11:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *HistogramDataPoint_Min
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Min{}
} else {
ov = ProtoPoolHistogramDataPoint_Min.Get().(*HistogramDataPoint_Min)
}
ov.Min = math.Float64frombits(num)
orig.Min_ = ov
case 12:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *HistogramDataPoint_Max
if !UseProtoPooling.IsEnabled() {
ov = &HistogramDataPoint_Max{}
} else {
ov = ProtoPoolHistogramDataPoint_Max.Get().(*HistogramDataPoint_Max)
}
ov.Max = math.Float64frombits(num)
orig.Max_ = ov
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestHistogramDataPoint() *HistogramDataPoint {
orig := NewHistogramDataPoint()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.StartTimeUnixNano = uint64(13)
orig.TimeUnixNano = uint64(13)
orig.Count = uint64(13)
orig.Sum_ = &HistogramDataPoint_Sum{Sum: float64(3.1415926)}
orig.BucketCounts = []uint64{uint64(0), uint64(13)}
orig.ExplicitBounds = []float64{float64(0), float64(3.1415926)}
orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
orig.Flags = uint32(13)
orig.Min_ = &HistogramDataPoint_Min{Min: float64(3.1415926)}
orig.Max_ = &HistogramDataPoint_Max{Max: float64(3.1415926)}
return orig
}
func GenTestHistogramDataPointPtrSlice() []*HistogramDataPoint {
orig := make([]*HistogramDataPoint, 5)
orig[0] = NewHistogramDataPoint()
orig[1] = GenTestHistogramDataPoint()
orig[2] = NewHistogramDataPoint()
orig[3] = GenTestHistogramDataPoint()
orig[4] = NewHistogramDataPoint()
return orig
}
func GenTestHistogramDataPointSlice() []HistogramDataPoint {
orig := make([]HistogramDataPoint, 5)
orig[1] = *GenTestHistogramDataPoint()
orig[3] = *GenTestHistogramDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// InstrumentationScope is a message representing the instrumentation scope information.
type InstrumentationScope struct {
Name string
Version string
Attributes []KeyValue
DroppedAttributesCount uint32
}
var (
protoPoolInstrumentationScope = sync.Pool{
New: func() any {
return &InstrumentationScope{}
},
}
)
func NewInstrumentationScope() *InstrumentationScope {
if !UseProtoPooling.IsEnabled() {
return &InstrumentationScope{}
}
return protoPoolInstrumentationScope.Get().(*InstrumentationScope)
}
func DeleteInstrumentationScope(orig *InstrumentationScope, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolInstrumentationScope.Put(orig)
}
}
func CopyInstrumentationScope(dest, src *InstrumentationScope) *InstrumentationScope {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewInstrumentationScope()
}
dest.Name = src.Name
dest.Version = src.Version
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
return dest
}
func CopyInstrumentationScopeSlice(dest, src []InstrumentationScope) []InstrumentationScope {
var newDest []InstrumentationScope
if cap(dest) < len(src) {
newDest = make([]InstrumentationScope, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteInstrumentationScope(&dest[i], false)
}
}
for i := range src {
CopyInstrumentationScope(&newDest[i], &src[i])
}
return newDest
}
func CopyInstrumentationScopePtrSlice(dest, src []*InstrumentationScope) []*InstrumentationScope {
var newDest []*InstrumentationScope
if cap(dest) < len(src) {
newDest = make([]*InstrumentationScope, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewInstrumentationScope()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteInstrumentationScope(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewInstrumentationScope()
}
}
for i := range src {
CopyInstrumentationScope(newDest[i], src[i])
}
return newDest
}
func (orig *InstrumentationScope) Reset() {
*orig = InstrumentationScope{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *InstrumentationScope) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if orig.Version != "" {
dest.WriteObjectField("version")
dest.WriteString(orig.Version)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *InstrumentationScope) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "name":
orig.Name = iter.ReadString()
case "version":
orig.Version = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *InstrumentationScope) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Version)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
return n
}
func (orig *InstrumentationScope) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Version)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Version)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func (orig *InstrumentationScope) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Version = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestInstrumentationScope() *InstrumentationScope {
orig := NewInstrumentationScope()
orig.Name = "test_name"
orig.Version = "test_version"
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
return orig
}
func GenTestInstrumentationScopePtrSlice() []*InstrumentationScope {
orig := make([]*InstrumentationScope, 5)
orig[0] = NewInstrumentationScope()
orig[1] = GenTestInstrumentationScope()
orig[2] = NewInstrumentationScope()
orig[3] = GenTestInstrumentationScope()
orig[4] = NewInstrumentationScope()
return orig
}
func GenTestInstrumentationScopeSlice() []InstrumentationScope {
orig := make([]InstrumentationScope, 5)
orig[1] = *GenTestInstrumentationScope()
orig[3] = *GenTestInstrumentationScope()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type IPAddr struct {
IP []byte
Zone string
}
var (
protoPoolIPAddr = sync.Pool{
New: func() any {
return &IPAddr{}
},
}
)
func NewIPAddr() *IPAddr {
if !UseProtoPooling.IsEnabled() {
return &IPAddr{}
}
return protoPoolIPAddr.Get().(*IPAddr)
}
func DeleteIPAddr(orig *IPAddr, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolIPAddr.Put(orig)
}
}
func CopyIPAddr(dest, src *IPAddr) *IPAddr {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewIPAddr()
}
dest.IP = src.IP
dest.Zone = src.Zone
return dest
}
func CopyIPAddrSlice(dest, src []IPAddr) []IPAddr {
var newDest []IPAddr
if cap(dest) < len(src) {
newDest = make([]IPAddr, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteIPAddr(&dest[i], false)
}
}
for i := range src {
CopyIPAddr(&newDest[i], &src[i])
}
return newDest
}
func CopyIPAddrPtrSlice(dest, src []*IPAddr) []*IPAddr {
var newDest []*IPAddr
if cap(dest) < len(src) {
newDest = make([]*IPAddr, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewIPAddr()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteIPAddr(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewIPAddr()
}
}
for i := range src {
CopyIPAddr(newDest[i], src[i])
}
return newDest
}
func (orig *IPAddr) Reset() {
*orig = IPAddr{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *IPAddr) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.IP) > 0 {
dest.WriteObjectField("iP")
dest.WriteBytes(orig.IP)
}
if orig.Zone != "" {
dest.WriteObjectField("zone")
dest.WriteString(orig.Zone)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *IPAddr) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "iP":
orig.IP = iter.ReadBytes()
case "zone":
orig.Zone = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *IPAddr) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.IP)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Zone)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *IPAddr) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.IP)
if l > 0 {
pos -= l
copy(buf[pos:], orig.IP)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Zone)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Zone)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *IPAddr) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
if length != 0 {
orig.IP = make([]byte, length)
copy(orig.IP, buf[startPos:pos])
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Zone = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestIPAddr() *IPAddr {
orig := NewIPAddr()
orig.IP = []byte{1, 2, 3}
orig.Zone = "test_zone"
return orig
}
func GenTestIPAddrPtrSlice() []*IPAddr {
orig := make([]*IPAddr, 5)
orig[0] = NewIPAddr()
orig[1] = GenTestIPAddr()
orig[2] = NewIPAddr()
orig[3] = GenTestIPAddr()
orig[4] = NewIPAddr()
return orig
}
func GenTestIPAddrSlice() []IPAddr {
orig := make([]IPAddr, 5)
orig[1] = *GenTestIPAddr()
orig[3] = *GenTestIPAddr()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type KeyValue struct {
Key string
Value AnyValue
}
var (
protoPoolKeyValue = sync.Pool{
New: func() any {
return &KeyValue{}
},
}
)
func NewKeyValue() *KeyValue {
if !UseProtoPooling.IsEnabled() {
return &KeyValue{}
}
return protoPoolKeyValue.Get().(*KeyValue)
}
func DeleteKeyValue(orig *KeyValue, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Value, false)
orig.Reset()
if nullable {
protoPoolKeyValue.Put(orig)
}
}
func CopyKeyValue(dest, src *KeyValue) *KeyValue {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewKeyValue()
}
dest.Key = src.Key
CopyAnyValue(&dest.Value, &src.Value)
return dest
}
func CopyKeyValueSlice(dest, src []KeyValue) []KeyValue {
var newDest []KeyValue
if cap(dest) < len(src) {
newDest = make([]KeyValue, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValue(&dest[i], false)
}
}
for i := range src {
CopyKeyValue(&newDest[i], &src[i])
}
return newDest
}
func CopyKeyValuePtrSlice(dest, src []*KeyValue) []*KeyValue {
var newDest []*KeyValue
if cap(dest) < len(src) {
newDest = make([]*KeyValue, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValue()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValue(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValue()
}
}
for i := range src {
CopyKeyValue(newDest[i], src[i])
}
return newDest
}
func (orig *KeyValue) Reset() {
*orig = KeyValue{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *KeyValue) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Key != "" {
dest.WriteObjectField("key")
dest.WriteString(orig.Key)
}
dest.WriteObjectField("value")
orig.Value.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *KeyValue) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "key":
orig.Key = iter.ReadString()
case "value":
orig.Value.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *KeyValue) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.Key)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.Value.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *KeyValue) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Key)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Key)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = orig.Value.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func (orig *KeyValue) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Key = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Value.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestKeyValue() *KeyValue {
orig := NewKeyValue()
orig.Key = "test_key"
orig.Value = *GenTestAnyValue()
return orig
}
func GenTestKeyValuePtrSlice() []*KeyValue {
orig := make([]*KeyValue, 5)
orig[0] = NewKeyValue()
orig[1] = GenTestKeyValue()
orig[2] = NewKeyValue()
orig[3] = GenTestKeyValue()
orig[4] = NewKeyValue()
return orig
}
func GenTestKeyValueSlice() []KeyValue {
orig := make([]KeyValue, 5)
orig[1] = *GenTestKeyValue()
orig[3] = *GenTestKeyValue()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// KeyValueAndUnit represents a custom 'dictionary native'
// style of encoding attributes which is more convenient
// for profiles than opentelemetry.proto.common.v1.KeyValue.
type KeyValueAndUnit struct {
KeyStrindex int32
Value AnyValue
UnitStrindex int32
}
var (
protoPoolKeyValueAndUnit = sync.Pool{
New: func() any {
return &KeyValueAndUnit{}
},
}
)
func NewKeyValueAndUnit() *KeyValueAndUnit {
if !UseProtoPooling.IsEnabled() {
return &KeyValueAndUnit{}
}
return protoPoolKeyValueAndUnit.Get().(*KeyValueAndUnit)
}
func DeleteKeyValueAndUnit(orig *KeyValueAndUnit, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Value, false)
orig.Reset()
if nullable {
protoPoolKeyValueAndUnit.Put(orig)
}
}
func CopyKeyValueAndUnit(dest, src *KeyValueAndUnit) *KeyValueAndUnit {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewKeyValueAndUnit()
}
dest.KeyStrindex = src.KeyStrindex
CopyAnyValue(&dest.Value, &src.Value)
dest.UnitStrindex = src.UnitStrindex
return dest
}
func CopyKeyValueAndUnitSlice(dest, src []KeyValueAndUnit) []KeyValueAndUnit {
var newDest []KeyValueAndUnit
if cap(dest) < len(src) {
newDest = make([]KeyValueAndUnit, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValueAndUnit(&dest[i], false)
}
}
for i := range src {
CopyKeyValueAndUnit(&newDest[i], &src[i])
}
return newDest
}
func CopyKeyValueAndUnitPtrSlice(dest, src []*KeyValueAndUnit) []*KeyValueAndUnit {
var newDest []*KeyValueAndUnit
if cap(dest) < len(src) {
newDest = make([]*KeyValueAndUnit, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueAndUnit()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValueAndUnit(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueAndUnit()
}
}
for i := range src {
CopyKeyValueAndUnit(newDest[i], src[i])
}
return newDest
}
func (orig *KeyValueAndUnit) Reset() {
*orig = KeyValueAndUnit{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *KeyValueAndUnit) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.KeyStrindex != int32(0) {
dest.WriteObjectField("keyStrindex")
dest.WriteInt32(orig.KeyStrindex)
}
dest.WriteObjectField("value")
orig.Value.MarshalJSON(dest)
if orig.UnitStrindex != int32(0) {
dest.WriteObjectField("unitStrindex")
dest.WriteInt32(orig.UnitStrindex)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *KeyValueAndUnit) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "keyStrindex", "key_strindex":
orig.KeyStrindex = iter.ReadInt32()
case "value":
orig.Value.UnmarshalJSON(iter)
case "unitStrindex", "unit_strindex":
orig.UnitStrindex = iter.ReadInt32()
default:
iter.Skip()
}
}
}
func (orig *KeyValueAndUnit) SizeProto() int {
var n int
var l int
_ = l
if orig.KeyStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.KeyStrindex))
}
l = orig.Value.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.UnitStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.UnitStrindex))
}
return n
}
func (orig *KeyValueAndUnit) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.KeyStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.KeyStrindex))
pos--
buf[pos] = 0x8
}
l = orig.Value.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
if orig.UnitStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func (orig *KeyValueAndUnit) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.KeyStrindex = int32(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Value.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.UnitStrindex = int32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestKeyValueAndUnit() *KeyValueAndUnit {
orig := NewKeyValueAndUnit()
orig.KeyStrindex = int32(13)
orig.Value = *GenTestAnyValue()
orig.UnitStrindex = int32(13)
return orig
}
func GenTestKeyValueAndUnitPtrSlice() []*KeyValueAndUnit {
orig := make([]*KeyValueAndUnit, 5)
orig[0] = NewKeyValueAndUnit()
orig[1] = GenTestKeyValueAndUnit()
orig[2] = NewKeyValueAndUnit()
orig[3] = GenTestKeyValueAndUnit()
orig[4] = NewKeyValueAndUnit()
return orig
}
func GenTestKeyValueAndUnitSlice() []KeyValueAndUnit {
orig := make([]KeyValueAndUnit, 5)
orig[1] = *GenTestKeyValueAndUnit()
orig[3] = *GenTestKeyValueAndUnit()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message since oneof in AnyValue does not allow repeated fields.
type KeyValueList struct {
Values []KeyValue
}
var (
protoPoolKeyValueList = sync.Pool{
New: func() any {
return &KeyValueList{}
},
}
)
func NewKeyValueList() *KeyValueList {
if !UseProtoPooling.IsEnabled() {
return &KeyValueList{}
}
return protoPoolKeyValueList.Get().(*KeyValueList)
}
func DeleteKeyValueList(orig *KeyValueList, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Values {
DeleteKeyValue(&orig.Values[i], false)
}
orig.Reset()
if nullable {
protoPoolKeyValueList.Put(orig)
}
}
func CopyKeyValueList(dest, src *KeyValueList) *KeyValueList {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewKeyValueList()
}
dest.Values = CopyKeyValueSlice(dest.Values, src.Values)
return dest
}
func CopyKeyValueListSlice(dest, src []KeyValueList) []KeyValueList {
var newDest []KeyValueList
if cap(dest) < len(src) {
newDest = make([]KeyValueList, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValueList(&dest[i], false)
}
}
for i := range src {
CopyKeyValueList(&newDest[i], &src[i])
}
return newDest
}
func CopyKeyValueListPtrSlice(dest, src []*KeyValueList) []*KeyValueList {
var newDest []*KeyValueList
if cap(dest) < len(src) {
newDest = make([]*KeyValueList, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueList()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteKeyValueList(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewKeyValueList()
}
}
for i := range src {
CopyKeyValueList(newDest[i], src[i])
}
return newDest
}
func (orig *KeyValueList) Reset() {
*orig = KeyValueList{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *KeyValueList) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
orig.Values[0].MarshalJSON(dest)
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
orig.Values[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *KeyValueList) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, KeyValue{})
orig.Values[len(orig.Values)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *KeyValueList) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Values {
l = orig.Values[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *KeyValueList) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Values) - 1; i >= 0; i-- {
l = orig.Values[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *KeyValueList) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Values = append(orig.Values, KeyValue{})
err = orig.Values[len(orig.Values)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestKeyValueList() *KeyValueList {
orig := NewKeyValueList()
orig.Values = []KeyValue{{}, *GenTestKeyValue()}
return orig
}
func GenTestKeyValueListPtrSlice() []*KeyValueList {
orig := make([]*KeyValueList, 5)
orig[0] = NewKeyValueList()
orig[1] = GenTestKeyValueList()
orig[2] = NewKeyValueList()
orig[3] = GenTestKeyValueList()
orig[4] = NewKeyValueList()
return orig
}
func GenTestKeyValueListSlice() []KeyValueList {
orig := make([]KeyValueList, 5)
orig[1] = *GenTestKeyValueList()
orig[3] = *GenTestKeyValueList()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Line details a specific line in a source code, linked to a function.
type Line struct {
FunctionIndex int32
Line int64
Column int64
}
var (
protoPoolLine = sync.Pool{
New: func() any {
return &Line{}
},
}
)
func NewLine() *Line {
if !UseProtoPooling.IsEnabled() {
return &Line{}
}
return protoPoolLine.Get().(*Line)
}
func DeleteLine(orig *Line, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolLine.Put(orig)
}
}
func CopyLine(dest, src *Line) *Line {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLine()
}
dest.FunctionIndex = src.FunctionIndex
dest.Line = src.Line
dest.Column = src.Column
return dest
}
func CopyLineSlice(dest, src []Line) []Line {
var newDest []Line
if cap(dest) < len(src) {
newDest = make([]Line, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLine(&dest[i], false)
}
}
for i := range src {
CopyLine(&newDest[i], &src[i])
}
return newDest
}
func CopyLinePtrSlice(dest, src []*Line) []*Line {
var newDest []*Line
if cap(dest) < len(src) {
newDest = make([]*Line, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLine()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLine(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLine()
}
}
for i := range src {
CopyLine(newDest[i], src[i])
}
return newDest
}
func (orig *Line) Reset() {
*orig = Line{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Line) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.FunctionIndex != int32(0) {
dest.WriteObjectField("functionIndex")
dest.WriteInt32(orig.FunctionIndex)
}
if orig.Line != int64(0) {
dest.WriteObjectField("line")
dest.WriteInt64(orig.Line)
}
if orig.Column != int64(0) {
dest.WriteObjectField("column")
dest.WriteInt64(orig.Column)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Line) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "functionIndex", "function_index":
orig.FunctionIndex = iter.ReadInt32()
case "line":
orig.Line = iter.ReadInt64()
case "column":
orig.Column = iter.ReadInt64()
default:
iter.Skip()
}
}
}
func (orig *Line) SizeProto() int {
var n int
var l int
_ = l
if orig.FunctionIndex != 0 {
n += 1 + proto.Sov(uint64(orig.FunctionIndex))
}
if orig.Line != 0 {
n += 1 + proto.Sov(uint64(orig.Line))
}
if orig.Column != 0 {
n += 1 + proto.Sov(uint64(orig.Column))
}
return n
}
func (orig *Line) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.FunctionIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FunctionIndex))
pos--
buf[pos] = 0x8
}
if orig.Line != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Line))
pos--
buf[pos] = 0x10
}
if orig.Column != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Column))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func (orig *Line) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FunctionIndex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Line = int64(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Column = int64(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLine() *Line {
orig := NewLine()
orig.FunctionIndex = int32(13)
orig.Line = int64(13)
orig.Column = int64(13)
return orig
}
func GenTestLinePtrSlice() []*Line {
orig := make([]*Line, 5)
orig[0] = NewLine()
orig[1] = GenTestLine()
orig[2] = NewLine()
orig[3] = GenTestLine()
orig[4] = NewLine()
return orig
}
func GenTestLineSlice() []Line {
orig := make([]Line, 5)
orig[1] = *GenTestLine()
orig[3] = *GenTestLine()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Link represents a pointer from a profile Sample to a trace Span.
type Link struct {
TraceId TraceID
SpanId SpanID
}
var (
protoPoolLink = sync.Pool{
New: func() any {
return &Link{}
},
}
)
func NewLink() *Link {
if !UseProtoPooling.IsEnabled() {
return &Link{}
}
return protoPoolLink.Get().(*Link)
}
func DeleteLink(orig *Link, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
protoPoolLink.Put(orig)
}
}
func CopyLink(dest, src *Link) *Link {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLink()
}
CopyTraceID(&dest.TraceId, &src.TraceId)
CopySpanID(&dest.SpanId, &src.SpanId)
return dest
}
func CopyLinkSlice(dest, src []Link) []Link {
var newDest []Link
if cap(dest) < len(src) {
newDest = make([]Link, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLink(&dest[i], false)
}
}
for i := range src {
CopyLink(&newDest[i], &src[i])
}
return newDest
}
func CopyLinkPtrSlice(dest, src []*Link) []*Link {
var newDest []*Link
if cap(dest) < len(src) {
newDest = make([]*Link, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLink()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLink(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLink()
}
}
for i := range src {
CopyLink(newDest[i], src[i])
}
return newDest
}
func (orig *Link) Reset() {
*orig = Link{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Link) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
orig.TraceId.MarshalJSON(dest)
}
if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
orig.SpanId.MarshalJSON(dest)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Link) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
orig.SpanId.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *Link) SizeProto() int {
var n int
var l int
_ = l
l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *Link) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func (orig *Link) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLink() *Link {
orig := NewLink()
orig.TraceId = *GenTestTraceID()
orig.SpanId = *GenTestSpanID()
return orig
}
func GenTestLinkPtrSlice() []*Link {
orig := make([]*Link, 5)
orig[0] = NewLink()
orig[1] = GenTestLink()
orig[2] = NewLink()
orig[3] = GenTestLink()
orig[4] = NewLink()
return orig
}
func GenTestLinkSlice() []Link {
orig := make([]Link, 5)
orig[1] = *GenTestLink()
orig[3] = *GenTestLink()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Location describes function and line table debug information.
type Location struct {
MappingIndex int32
Address uint64
Lines []*Line
AttributeIndices []int32
}
var (
protoPoolLocation = sync.Pool{
New: func() any {
return &Location{}
},
}
)
func NewLocation() *Location {
if !UseProtoPooling.IsEnabled() {
return &Location{}
}
return protoPoolLocation.Get().(*Location)
}
func DeleteLocation(orig *Location, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Lines {
DeleteLine(orig.Lines[i], true)
}
orig.Reset()
if nullable {
protoPoolLocation.Put(orig)
}
}
func CopyLocation(dest, src *Location) *Location {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLocation()
}
dest.MappingIndex = src.MappingIndex
dest.Address = src.Address
dest.Lines = CopyLinePtrSlice(dest.Lines, src.Lines)
dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
return dest
}
func CopyLocationSlice(dest, src []Location) []Location {
var newDest []Location
if cap(dest) < len(src) {
newDest = make([]Location, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLocation(&dest[i], false)
}
}
for i := range src {
CopyLocation(&newDest[i], &src[i])
}
return newDest
}
func CopyLocationPtrSlice(dest, src []*Location) []*Location {
var newDest []*Location
if cap(dest) < len(src) {
newDest = make([]*Location, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLocation()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLocation(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLocation()
}
}
for i := range src {
CopyLocation(newDest[i], src[i])
}
return newDest
}
func (orig *Location) Reset() {
*orig = Location{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Location) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.MappingIndex != int32(0) {
dest.WriteObjectField("mappingIndex")
dest.WriteInt32(orig.MappingIndex)
}
if orig.Address != uint64(0) {
dest.WriteObjectField("address")
dest.WriteUint64(orig.Address)
}
if len(orig.Lines) > 0 {
dest.WriteObjectField("lines")
dest.WriteArrayStart()
orig.Lines[0].MarshalJSON(dest)
for i := 1; i < len(orig.Lines); i++ {
dest.WriteMore()
orig.Lines[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Location) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "mappingIndex", "mapping_index":
orig.MappingIndex = iter.ReadInt32()
case "address":
orig.Address = iter.ReadUint64()
case "lines":
for iter.ReadArray() {
orig.Lines = append(orig.Lines, NewLine())
orig.Lines[len(orig.Lines)-1].UnmarshalJSON(iter)
}
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func (orig *Location) SizeProto() int {
var n int
var l int
_ = l
if orig.MappingIndex != 0 {
n += 1 + proto.Sov(uint64(orig.MappingIndex))
}
if orig.Address != 0 {
n += 1 + proto.Sov(uint64(orig.Address))
}
for i := range orig.Lines {
l = orig.Lines[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Location) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.MappingIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MappingIndex))
pos--
buf[pos] = 0x8
}
if orig.Address != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Address))
pos--
buf[pos] = 0x10
}
for i := len(orig.Lines) - 1; i >= 0; i-- {
l = orig.Lines[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x22
}
return len(buf) - pos
}
func (orig *Location) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MappingIndex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Address = uint64(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Lines", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Lines = append(orig.Lines, NewLine())
err = orig.Lines[len(orig.Lines)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 4:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLocation() *Location {
orig := NewLocation()
orig.MappingIndex = int32(13)
orig.Address = uint64(13)
orig.Lines = []*Line{{}, GenTestLine()}
orig.AttributeIndices = []int32{int32(0), int32(13)}
return orig
}
func GenTestLocationPtrSlice() []*Location {
orig := make([]*Location, 5)
orig[0] = NewLocation()
orig[1] = GenTestLocation()
orig[2] = NewLocation()
orig[3] = GenTestLocation()
orig[4] = NewLocation()
return orig
}
func GenTestLocationSlice() []Location {
orig := make([]Location, 5)
orig[1] = *GenTestLocation()
orig[3] = *GenTestLocation()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// LogRecord are experimental implementation of OpenTelemetry Log Data Model.
type LogRecord struct {
TimeUnixNano uint64
ObservedTimeUnixNano uint64
SeverityNumber SeverityNumber
SeverityText string
Body AnyValue
Attributes []KeyValue
DroppedAttributesCount uint32
Flags uint32
TraceId TraceID
SpanId SpanID
EventName string
}
var (
protoPoolLogRecord = sync.Pool{
New: func() any {
return &LogRecord{}
},
}
)
func NewLogRecord() *LogRecord {
if !UseProtoPooling.IsEnabled() {
return &LogRecord{}
}
return protoPoolLogRecord.Get().(*LogRecord)
}
func DeleteLogRecord(orig *LogRecord, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteAnyValue(&orig.Body, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
orig.Reset()
if nullable {
protoPoolLogRecord.Put(orig)
}
}
func CopyLogRecord(dest, src *LogRecord) *LogRecord {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLogRecord()
}
dest.TimeUnixNano = src.TimeUnixNano
dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano
dest.SeverityNumber = src.SeverityNumber
dest.SeverityText = src.SeverityText
CopyAnyValue(&dest.Body, &src.Body)
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Flags = src.Flags
CopyTraceID(&dest.TraceId, &src.TraceId)
CopySpanID(&dest.SpanId, &src.SpanId)
dest.EventName = src.EventName
return dest
}
func CopyLogRecordSlice(dest, src []LogRecord) []LogRecord {
var newDest []LogRecord
if cap(dest) < len(src) {
newDest = make([]LogRecord, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogRecord(&dest[i], false)
}
}
for i := range src {
CopyLogRecord(&newDest[i], &src[i])
}
return newDest
}
func CopyLogRecordPtrSlice(dest, src []*LogRecord) []*LogRecord {
var newDest []*LogRecord
if cap(dest) < len(src) {
newDest = make([]*LogRecord, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogRecord()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogRecord(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogRecord()
}
}
for i := range src {
CopyLogRecord(newDest[i], src[i])
}
return newDest
}
func (orig *LogRecord) Reset() {
*orig = LogRecord{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *LogRecord) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.ObservedTimeUnixNano != uint64(0) {
dest.WriteObjectField("observedTimeUnixNano")
dest.WriteUint64(orig.ObservedTimeUnixNano)
}
if int32(orig.SeverityNumber) != 0 {
dest.WriteObjectField("severityNumber")
dest.WriteInt32(int32(orig.SeverityNumber))
}
if orig.SeverityText != "" {
dest.WriteObjectField("severityText")
dest.WriteString(orig.SeverityText)
}
dest.WriteObjectField("body")
orig.Body.MarshalJSON(dest)
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
orig.TraceId.MarshalJSON(dest)
}
if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
orig.SpanId.MarshalJSON(dest)
}
if orig.EventName != "" {
dest.WriteObjectField("eventName")
dest.WriteString(orig.EventName)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *LogRecord) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "observedTimeUnixNano", "observed_time_unix_nano":
orig.ObservedTimeUnixNano = iter.ReadUint64()
case "severityNumber", "severity_number":
orig.SeverityNumber = SeverityNumber(iter.ReadEnumValue(SeverityNumber_value))
case "severityText", "severity_text":
orig.SeverityText = iter.ReadString()
case "body":
orig.Body.UnmarshalJSON(iter)
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "flags":
orig.Flags = iter.ReadUint32()
case "traceId", "trace_id":
orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
orig.SpanId.UnmarshalJSON(iter)
case "eventName", "event_name":
orig.EventName = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *LogRecord) SizeProto() int {
var n int
var l int
_ = l
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.ObservedTimeUnixNano != 0 {
n += 9
}
if orig.SeverityNumber != 0 {
n += 1 + proto.Sov(uint64(orig.SeverityNumber))
}
l = len(orig.SeverityText)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.Body.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
if orig.Flags != 0 {
n += 5
}
l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.EventName)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *LogRecord) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x9
}
if orig.ObservedTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.ObservedTimeUnixNano))
pos--
buf[pos] = 0x59
}
if orig.SeverityNumber != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.SeverityNumber))
pos--
buf[pos] = 0x10
}
l = len(orig.SeverityText)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SeverityText)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = orig.Body.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x38
}
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x45
}
l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
l = len(orig.EventName)
if l > 0 {
pos -= l
copy(buf[pos:], orig.EventName)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x62
}
return len(buf) - pos
}
func (orig *LogRecord) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 11:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.ObservedTimeUnixNano = uint64(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.SeverityNumber = SeverityNumber(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SeverityText = string(buf[startPos:pos])
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Body.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 7:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 8:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.EventName = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLogRecord() *LogRecord {
orig := NewLogRecord()
orig.TimeUnixNano = uint64(13)
orig.ObservedTimeUnixNano = uint64(13)
orig.SeverityNumber = SeverityNumber(13)
orig.SeverityText = "test_severitytext"
orig.Body = *GenTestAnyValue()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
orig.Flags = uint32(13)
orig.TraceId = *GenTestTraceID()
orig.SpanId = *GenTestSpanID()
orig.EventName = "test_eventname"
return orig
}
func GenTestLogRecordPtrSlice() []*LogRecord {
orig := make([]*LogRecord, 5)
orig[0] = NewLogRecord()
orig[1] = GenTestLogRecord()
orig[2] = NewLogRecord()
orig[3] = GenTestLogRecord()
orig[4] = NewLogRecord()
return orig
}
func GenTestLogRecordSlice() []LogRecord {
orig := make([]LogRecord, 5)
orig[1] = *GenTestLogRecord()
orig[3] = *GenTestLogRecord()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// LogsData represents the logs data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP logs data but do not
// implement the OTLP protocol.
type LogsData struct {
ResourceLogs []*ResourceLogs
}
var (
protoPoolLogsData = sync.Pool{
New: func() any {
return &LogsData{}
},
}
)
func NewLogsData() *LogsData {
if !UseProtoPooling.IsEnabled() {
return &LogsData{}
}
return protoPoolLogsData.Get().(*LogsData)
}
func DeleteLogsData(orig *LogsData, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceLogs {
DeleteResourceLogs(orig.ResourceLogs[i], true)
}
orig.Reset()
if nullable {
protoPoolLogsData.Put(orig)
}
}
func CopyLogsData(dest, src *LogsData) *LogsData {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLogsData()
}
dest.ResourceLogs = CopyResourceLogsPtrSlice(dest.ResourceLogs, src.ResourceLogs)
return dest
}
func CopyLogsDataSlice(dest, src []LogsData) []LogsData {
var newDest []LogsData
if cap(dest) < len(src) {
newDest = make([]LogsData, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogsData(&dest[i], false)
}
}
for i := range src {
CopyLogsData(&newDest[i], &src[i])
}
return newDest
}
func CopyLogsDataPtrSlice(dest, src []*LogsData) []*LogsData {
var newDest []*LogsData
if cap(dest) < len(src) {
newDest = make([]*LogsData, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogsData()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogsData(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogsData()
}
}
for i := range src {
CopyLogsData(newDest[i], src[i])
}
return newDest
}
func (orig *LogsData) Reset() {
*orig = LogsData{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *LogsData) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceLogs) > 0 {
dest.WriteObjectField("resourceLogs")
dest.WriteArrayStart()
orig.ResourceLogs[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceLogs); i++ {
dest.WriteMore()
orig.ResourceLogs[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *LogsData) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceLogs", "resource_logs":
for iter.ReadArray() {
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *LogsData) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceLogs {
l = orig.ResourceLogs[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *LogsData) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceLogs) - 1; i >= 0; i-- {
l = orig.ResourceLogs[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *LogsData) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceLogs = append(orig.ResourceLogs, NewResourceLogs())
err = orig.ResourceLogs[len(orig.ResourceLogs)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLogsData() *LogsData {
orig := NewLogsData()
orig.ResourceLogs = []*ResourceLogs{{}, GenTestResourceLogs()}
return orig
}
func GenTestLogsDataPtrSlice() []*LogsData {
orig := make([]*LogsData, 5)
orig[0] = NewLogsData()
orig[1] = GenTestLogsData()
orig[2] = NewLogsData()
orig[3] = GenTestLogsData()
orig[4] = NewLogsData()
return orig
}
func GenTestLogsDataSlice() []LogsData {
orig := make([]LogsData, 5)
orig[1] = *GenTestLogsData()
orig[3] = *GenTestLogsData()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type LogsRequest struct {
RequestContext *RequestContext
LogsData LogsData
FormatVersion uint32
}
var (
protoPoolLogsRequest = sync.Pool{
New: func() any {
return &LogsRequest{}
},
}
)
func NewLogsRequest() *LogsRequest {
if !UseProtoPooling.IsEnabled() {
return &LogsRequest{}
}
return protoPoolLogsRequest.Get().(*LogsRequest)
}
func DeleteLogsRequest(orig *LogsRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteRequestContext(orig.RequestContext, true)
DeleteLogsData(&orig.LogsData, false)
orig.Reset()
if nullable {
protoPoolLogsRequest.Put(orig)
}
}
func CopyLogsRequest(dest, src *LogsRequest) *LogsRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewLogsRequest()
}
dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
CopyLogsData(&dest.LogsData, &src.LogsData)
dest.FormatVersion = src.FormatVersion
return dest
}
func CopyLogsRequestSlice(dest, src []LogsRequest) []LogsRequest {
var newDest []LogsRequest
if cap(dest) < len(src) {
newDest = make([]LogsRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogsRequest(&dest[i], false)
}
}
for i := range src {
CopyLogsRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyLogsRequestPtrSlice(dest, src []*LogsRequest) []*LogsRequest {
var newDest []*LogsRequest
if cap(dest) < len(src) {
newDest = make([]*LogsRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogsRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteLogsRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewLogsRequest()
}
}
for i := range src {
CopyLogsRequest(newDest[i], src[i])
}
return newDest
}
func (orig *LogsRequest) Reset() {
*orig = LogsRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *LogsRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RequestContext != nil {
dest.WriteObjectField("requestContext")
orig.RequestContext.MarshalJSON(dest)
}
dest.WriteObjectField("logsData")
orig.LogsData.MarshalJSON(dest)
if orig.FormatVersion != uint32(0) {
dest.WriteObjectField("formatVersion")
dest.WriteUint32(orig.FormatVersion)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *LogsRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "requestContext", "request_context":
orig.RequestContext = NewRequestContext()
orig.RequestContext.UnmarshalJSON(iter)
case "logsData", "logs_data":
orig.LogsData.UnmarshalJSON(iter)
case "formatVersion", "format_version":
orig.FormatVersion = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *LogsRequest) SizeProto() int {
var n int
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.LogsData.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.FormatVersion != 0 {
n += 5
}
return n
}
func (orig *LogsRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = orig.LogsData.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
if orig.FormatVersion != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
pos--
buf[pos] = 0xd
}
return len(buf) - pos
}
func (orig *LogsRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.RequestContext = NewRequestContext()
err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LogsData", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.LogsData.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 1:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.FormatVersion = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestLogsRequest() *LogsRequest {
orig := NewLogsRequest()
orig.RequestContext = GenTestRequestContext()
orig.LogsData = *GenTestLogsData()
orig.FormatVersion = uint32(13)
return orig
}
func GenTestLogsRequestPtrSlice() []*LogsRequest {
orig := make([]*LogsRequest, 5)
orig[0] = NewLogsRequest()
orig[1] = GenTestLogsRequest()
orig[2] = NewLogsRequest()
orig[3] = GenTestLogsRequest()
orig[4] = NewLogsRequest()
return orig
}
func GenTestLogsRequestSlice() []LogsRequest {
orig := make([]LogsRequest, 5)
orig[1] = *GenTestLogsRequest()
orig[3] = *GenTestLogsRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
type Mapping struct {
MemoryStart uint64
MemoryLimit uint64
FileOffset uint64
FilenameStrindex int32
AttributeIndices []int32
}
var (
protoPoolMapping = sync.Pool{
New: func() any {
return &Mapping{}
},
}
)
func NewMapping() *Mapping {
if !UseProtoPooling.IsEnabled() {
return &Mapping{}
}
return protoPoolMapping.Get().(*Mapping)
}
func DeleteMapping(orig *Mapping, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolMapping.Put(orig)
}
}
func CopyMapping(dest, src *Mapping) *Mapping {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewMapping()
}
dest.MemoryStart = src.MemoryStart
dest.MemoryLimit = src.MemoryLimit
dest.FileOffset = src.FileOffset
dest.FilenameStrindex = src.FilenameStrindex
dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
return dest
}
func CopyMappingSlice(dest, src []Mapping) []Mapping {
var newDest []Mapping
if cap(dest) < len(src) {
newDest = make([]Mapping, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMapping(&dest[i], false)
}
}
for i := range src {
CopyMapping(&newDest[i], &src[i])
}
return newDest
}
func CopyMappingPtrSlice(dest, src []*Mapping) []*Mapping {
var newDest []*Mapping
if cap(dest) < len(src) {
newDest = make([]*Mapping, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMapping()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMapping(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMapping()
}
}
for i := range src {
CopyMapping(newDest[i], src[i])
}
return newDest
}
func (orig *Mapping) Reset() {
*orig = Mapping{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Mapping) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.MemoryStart != uint64(0) {
dest.WriteObjectField("memoryStart")
dest.WriteUint64(orig.MemoryStart)
}
if orig.MemoryLimit != uint64(0) {
dest.WriteObjectField("memoryLimit")
dest.WriteUint64(orig.MemoryLimit)
}
if orig.FileOffset != uint64(0) {
dest.WriteObjectField("fileOffset")
dest.WriteUint64(orig.FileOffset)
}
if orig.FilenameStrindex != int32(0) {
dest.WriteObjectField("filenameStrindex")
dest.WriteInt32(orig.FilenameStrindex)
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Mapping) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "memoryStart", "memory_start":
orig.MemoryStart = iter.ReadUint64()
case "memoryLimit", "memory_limit":
orig.MemoryLimit = iter.ReadUint64()
case "fileOffset", "file_offset":
orig.FileOffset = iter.ReadUint64()
case "filenameStrindex", "filename_strindex":
orig.FilenameStrindex = iter.ReadInt32()
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func (orig *Mapping) SizeProto() int {
var n int
var l int
_ = l
if orig.MemoryStart != 0 {
n += 1 + proto.Sov(uint64(orig.MemoryStart))
}
if orig.MemoryLimit != 0 {
n += 1 + proto.Sov(uint64(orig.MemoryLimit))
}
if orig.FileOffset != 0 {
n += 1 + proto.Sov(uint64(orig.FileOffset))
}
if orig.FilenameStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.FilenameStrindex))
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Mapping) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.MemoryStart != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryStart))
pos--
buf[pos] = 0x8
}
if orig.MemoryLimit != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.MemoryLimit))
pos--
buf[pos] = 0x10
}
if orig.FileOffset != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FileOffset))
pos--
buf[pos] = 0x18
}
if orig.FilenameStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.FilenameStrindex))
pos--
buf[pos] = 0x20
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x2a
}
return len(buf) - pos
}
func (orig *Mapping) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MemoryStart = uint64(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.MemoryLimit = uint64(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FileOffset = uint64(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.FilenameStrindex = int32(num)
case 5:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestMapping() *Mapping {
orig := NewMapping()
orig.MemoryStart = uint64(13)
orig.MemoryLimit = uint64(13)
orig.FileOffset = uint64(13)
orig.FilenameStrindex = int32(13)
orig.AttributeIndices = []int32{int32(0), int32(13)}
return orig
}
func GenTestMappingPtrSlice() []*Mapping {
orig := make([]*Mapping, 5)
orig[0] = NewMapping()
orig[1] = GenTestMapping()
orig[2] = NewMapping()
orig[3] = GenTestMapping()
orig[4] = NewMapping()
return orig
}
func GenTestMappingSlice() []Mapping {
orig := make([]Mapping, 5)
orig[1] = *GenTestMapping()
orig[3] = *GenTestMapping()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *Metric) GetData() any {
if m != nil {
return m.Data
}
return nil
}
type Metric_Gauge struct {
Gauge *Gauge
}
func (m *Metric) GetGauge() *Gauge {
if v, ok := m.GetData().(*Metric_Gauge); ok {
return v.Gauge
}
return nil
}
type Metric_Sum struct {
Sum *Sum
}
func (m *Metric) GetSum() *Sum {
if v, ok := m.GetData().(*Metric_Sum); ok {
return v.Sum
}
return nil
}
type Metric_Histogram struct {
Histogram *Histogram
}
func (m *Metric) GetHistogram() *Histogram {
if v, ok := m.GetData().(*Metric_Histogram); ok {
return v.Histogram
}
return nil
}
type Metric_ExponentialHistogram struct {
ExponentialHistogram *ExponentialHistogram
}
func (m *Metric) GetExponentialHistogram() *ExponentialHistogram {
if v, ok := m.GetData().(*Metric_ExponentialHistogram); ok {
return v.ExponentialHistogram
}
return nil
}
type Metric_Summary struct {
Summary *Summary
}
func (m *Metric) GetSummary() *Summary {
if v, ok := m.GetData().(*Metric_Summary); ok {
return v.Summary
}
return nil
}
// Metric represents one metric as a collection of datapoints.
// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto
type Metric struct {
Name string
Description string
Unit string
Data any
Metadata []KeyValue
}
var (
protoPoolMetric = sync.Pool{
New: func() any {
return &Metric{}
},
}
ProtoPoolMetric_Gauge = sync.Pool{
New: func() any {
return &Metric_Gauge{}
},
}
ProtoPoolMetric_Sum = sync.Pool{
New: func() any {
return &Metric_Sum{}
},
}
ProtoPoolMetric_Histogram = sync.Pool{
New: func() any {
return &Metric_Histogram{}
},
}
ProtoPoolMetric_ExponentialHistogram = sync.Pool{
New: func() any {
return &Metric_ExponentialHistogram{}
},
}
ProtoPoolMetric_Summary = sync.Pool{
New: func() any {
return &Metric_Summary{}
},
}
)
func NewMetric() *Metric {
if !UseProtoPooling.IsEnabled() {
return &Metric{}
}
return protoPoolMetric.Get().(*Metric)
}
func DeleteMetric(orig *Metric, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
switch ov := orig.Data.(type) {
case *Metric_Gauge:
DeleteGauge(ov.Gauge, true)
ov.Gauge = nil
ProtoPoolMetric_Gauge.Put(ov)
case *Metric_Sum:
DeleteSum(ov.Sum, true)
ov.Sum = nil
ProtoPoolMetric_Sum.Put(ov)
case *Metric_Histogram:
DeleteHistogram(ov.Histogram, true)
ov.Histogram = nil
ProtoPoolMetric_Histogram.Put(ov)
case *Metric_ExponentialHistogram:
DeleteExponentialHistogram(ov.ExponentialHistogram, true)
ov.ExponentialHistogram = nil
ProtoPoolMetric_ExponentialHistogram.Put(ov)
case *Metric_Summary:
DeleteSummary(ov.Summary, true)
ov.Summary = nil
ProtoPoolMetric_Summary.Put(ov)
}
for i := range orig.Metadata {
DeleteKeyValue(&orig.Metadata[i], false)
}
orig.Reset()
if nullable {
protoPoolMetric.Put(orig)
}
}
func CopyMetric(dest, src *Metric) *Metric {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewMetric()
}
dest.Name = src.Name
dest.Description = src.Description
dest.Unit = src.Unit
switch t := src.Data.(type) {
case *Metric_Gauge:
var ov *Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
}
ov.Gauge = NewGauge()
CopyGauge(ov.Gauge, t.Gauge)
dest.Data = ov
case *Metric_Sum:
var ov *Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
}
ov.Sum = NewSum()
CopySum(ov.Sum, t.Sum)
dest.Data = ov
case *Metric_Histogram:
var ov *Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
}
ov.Histogram = NewHistogram()
CopyHistogram(ov.Histogram, t.Histogram)
dest.Data = ov
case *Metric_ExponentialHistogram:
var ov *Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
CopyExponentialHistogram(ov.ExponentialHistogram, t.ExponentialHistogram)
dest.Data = ov
case *Metric_Summary:
var ov *Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
}
ov.Summary = NewSummary()
CopySummary(ov.Summary, t.Summary)
dest.Data = ov
default:
dest.Data = nil
}
dest.Metadata = CopyKeyValueSlice(dest.Metadata, src.Metadata)
return dest
}
func CopyMetricSlice(dest, src []Metric) []Metric {
var newDest []Metric
if cap(dest) < len(src) {
newDest = make([]Metric, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetric(&dest[i], false)
}
}
for i := range src {
CopyMetric(&newDest[i], &src[i])
}
return newDest
}
func CopyMetricPtrSlice(dest, src []*Metric) []*Metric {
var newDest []*Metric
if cap(dest) < len(src) {
newDest = make([]*Metric, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetric()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetric(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetric()
}
}
for i := range src {
CopyMetric(newDest[i], src[i])
}
return newDest
}
func (orig *Metric) Reset() {
*orig = Metric{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Metric) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if orig.Description != "" {
dest.WriteObjectField("description")
dest.WriteString(orig.Description)
}
if orig.Unit != "" {
dest.WriteObjectField("unit")
dest.WriteString(orig.Unit)
}
switch orig := orig.Data.(type) {
case *Metric_Gauge:
if orig.Gauge != nil {
dest.WriteObjectField("gauge")
orig.Gauge.MarshalJSON(dest)
}
case *Metric_Sum:
if orig.Sum != nil {
dest.WriteObjectField("sum")
orig.Sum.MarshalJSON(dest)
}
case *Metric_Histogram:
if orig.Histogram != nil {
dest.WriteObjectField("histogram")
orig.Histogram.MarshalJSON(dest)
}
case *Metric_ExponentialHistogram:
if orig.ExponentialHistogram != nil {
dest.WriteObjectField("exponentialHistogram")
orig.ExponentialHistogram.MarshalJSON(dest)
}
case *Metric_Summary:
if orig.Summary != nil {
dest.WriteObjectField("summary")
orig.Summary.MarshalJSON(dest)
}
}
if len(orig.Metadata) > 0 {
dest.WriteObjectField("metadata")
dest.WriteArrayStart()
orig.Metadata[0].MarshalJSON(dest)
for i := 1; i < len(orig.Metadata); i++ {
dest.WriteMore()
orig.Metadata[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Metric) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "name":
orig.Name = iter.ReadString()
case "description":
orig.Description = iter.ReadString()
case "unit":
orig.Unit = iter.ReadString()
case "gauge":
{
var ov *Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
}
ov.Gauge = NewGauge()
ov.Gauge.UnmarshalJSON(iter)
orig.Data = ov
}
case "sum":
{
var ov *Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
}
ov.Sum = NewSum()
ov.Sum.UnmarshalJSON(iter)
orig.Data = ov
}
case "histogram":
{
var ov *Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
}
ov.Histogram = NewHistogram()
ov.Histogram.UnmarshalJSON(iter)
orig.Data = ov
}
case "exponentialHistogram", "exponential_histogram":
{
var ov *Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
ov.ExponentialHistogram.UnmarshalJSON(iter)
orig.Data = ov
}
case "summary":
{
var ov *Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
}
ov.Summary = NewSummary()
ov.Summary.UnmarshalJSON(iter)
orig.Data = ov
}
case "metadata":
for iter.ReadArray() {
orig.Metadata = append(orig.Metadata, KeyValue{})
orig.Metadata[len(orig.Metadata)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *Metric) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Description)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Unit)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
switch orig := orig.Data.(type) {
case nil:
_ = orig
break
case *Metric_Gauge:
if orig.Gauge != nil {
l = orig.Gauge.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *Metric_Sum:
if orig.Sum != nil {
l = orig.Sum.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *Metric_Histogram:
if orig.Histogram != nil {
l = orig.Histogram.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *Metric_ExponentialHistogram:
if orig.ExponentialHistogram != nil {
l = orig.ExponentialHistogram.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *Metric_Summary:
if orig.Summary != nil {
l = orig.Summary.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
}
for i := range orig.Metadata {
l = orig.Metadata[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Metric) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Description)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Description)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.Unit)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Unit)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
switch orig := orig.Data.(type) {
case *Metric_Gauge:
if orig.Gauge != nil {
l = orig.Gauge.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
case *Metric_Sum:
if orig.Sum != nil {
l = orig.Sum.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
case *Metric_Histogram:
if orig.Histogram != nil {
l = orig.Histogram.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
case *Metric_ExponentialHistogram:
if orig.ExponentialHistogram != nil {
l = orig.ExponentialHistogram.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
}
case *Metric_Summary:
if orig.Summary != nil {
l = orig.Summary.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
}
for i := len(orig.Metadata) - 1; i >= 0; i-- {
l = orig.Metadata[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x62
}
return len(buf) - pos
}
func (orig *Metric) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Description = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Unit = string(buf[startPos:pos])
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *Metric_Gauge
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Gauge{}
} else {
ov = ProtoPoolMetric_Gauge.Get().(*Metric_Gauge)
}
ov.Gauge = NewGauge()
err = ov.Gauge.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *Metric_Sum
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Sum{}
} else {
ov = ProtoPoolMetric_Sum.Get().(*Metric_Sum)
}
ov.Sum = NewSum()
err = ov.Sum.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *Metric_Histogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Histogram{}
} else {
ov = ProtoPoolMetric_Histogram.Get().(*Metric_Histogram)
}
ov.Histogram = NewHistogram()
err = ov.Histogram.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *Metric_ExponentialHistogram
if !UseProtoPooling.IsEnabled() {
ov = &Metric_ExponentialHistogram{}
} else {
ov = ProtoPoolMetric_ExponentialHistogram.Get().(*Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = NewExponentialHistogram()
err = ov.ExponentialHistogram.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *Metric_Summary
if !UseProtoPooling.IsEnabled() {
ov = &Metric_Summary{}
} else {
ov = ProtoPoolMetric_Summary.Get().(*Metric_Summary)
}
ov.Summary = NewSummary()
err = ov.Summary.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.Data = ov
case 12:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Metadata = append(orig.Metadata, KeyValue{})
err = orig.Metadata[len(orig.Metadata)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestMetric() *Metric {
orig := NewMetric()
orig.Name = "test_name"
orig.Description = "test_description"
orig.Unit = "test_unit"
orig.Data = &Metric_Gauge{Gauge: GenTestGauge()}
orig.Metadata = []KeyValue{{}, *GenTestKeyValue()}
return orig
}
func GenTestMetricPtrSlice() []*Metric {
orig := make([]*Metric, 5)
orig[0] = NewMetric()
orig[1] = GenTestMetric()
orig[2] = NewMetric()
orig[3] = GenTestMetric()
orig[4] = NewMetric()
return orig
}
func GenTestMetricSlice() []Metric {
orig := make([]Metric, 5)
orig[1] = *GenTestMetric()
orig[3] = *GenTestMetric()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// MetricsData represents the metrics data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP metrics data but do not
// implement the OTLP protocol..
type MetricsData struct {
ResourceMetrics []*ResourceMetrics
}
var (
protoPoolMetricsData = sync.Pool{
New: func() any {
return &MetricsData{}
},
}
)
func NewMetricsData() *MetricsData {
if !UseProtoPooling.IsEnabled() {
return &MetricsData{}
}
return protoPoolMetricsData.Get().(*MetricsData)
}
func DeleteMetricsData(orig *MetricsData, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceMetrics {
DeleteResourceMetrics(orig.ResourceMetrics[i], true)
}
orig.Reset()
if nullable {
protoPoolMetricsData.Put(orig)
}
}
func CopyMetricsData(dest, src *MetricsData) *MetricsData {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewMetricsData()
}
dest.ResourceMetrics = CopyResourceMetricsPtrSlice(dest.ResourceMetrics, src.ResourceMetrics)
return dest
}
func CopyMetricsDataSlice(dest, src []MetricsData) []MetricsData {
var newDest []MetricsData
if cap(dest) < len(src) {
newDest = make([]MetricsData, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetricsData(&dest[i], false)
}
}
for i := range src {
CopyMetricsData(&newDest[i], &src[i])
}
return newDest
}
func CopyMetricsDataPtrSlice(dest, src []*MetricsData) []*MetricsData {
var newDest []*MetricsData
if cap(dest) < len(src) {
newDest = make([]*MetricsData, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetricsData()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetricsData(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetricsData()
}
}
for i := range src {
CopyMetricsData(newDest[i], src[i])
}
return newDest
}
func (orig *MetricsData) Reset() {
*orig = MetricsData{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *MetricsData) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceMetrics) > 0 {
dest.WriteObjectField("resourceMetrics")
dest.WriteArrayStart()
orig.ResourceMetrics[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceMetrics); i++ {
dest.WriteMore()
orig.ResourceMetrics[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *MetricsData) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceMetrics", "resource_metrics":
for iter.ReadArray() {
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *MetricsData) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceMetrics {
l = orig.ResourceMetrics[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *MetricsData) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceMetrics) - 1; i >= 0; i-- {
l = orig.ResourceMetrics[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *MetricsData) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceMetrics = append(orig.ResourceMetrics, NewResourceMetrics())
err = orig.ResourceMetrics[len(orig.ResourceMetrics)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestMetricsData() *MetricsData {
orig := NewMetricsData()
orig.ResourceMetrics = []*ResourceMetrics{{}, GenTestResourceMetrics()}
return orig
}
func GenTestMetricsDataPtrSlice() []*MetricsData {
orig := make([]*MetricsData, 5)
orig[0] = NewMetricsData()
orig[1] = GenTestMetricsData()
orig[2] = NewMetricsData()
orig[3] = GenTestMetricsData()
orig[4] = NewMetricsData()
return orig
}
func GenTestMetricsDataSlice() []MetricsData {
orig := make([]MetricsData, 5)
orig[1] = *GenTestMetricsData()
orig[3] = *GenTestMetricsData()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type MetricsRequest struct {
RequestContext *RequestContext
MetricsData MetricsData
FormatVersion uint32
}
var (
protoPoolMetricsRequest = sync.Pool{
New: func() any {
return &MetricsRequest{}
},
}
)
func NewMetricsRequest() *MetricsRequest {
if !UseProtoPooling.IsEnabled() {
return &MetricsRequest{}
}
return protoPoolMetricsRequest.Get().(*MetricsRequest)
}
func DeleteMetricsRequest(orig *MetricsRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteRequestContext(orig.RequestContext, true)
DeleteMetricsData(&orig.MetricsData, false)
orig.Reset()
if nullable {
protoPoolMetricsRequest.Put(orig)
}
}
func CopyMetricsRequest(dest, src *MetricsRequest) *MetricsRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewMetricsRequest()
}
dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
CopyMetricsData(&dest.MetricsData, &src.MetricsData)
dest.FormatVersion = src.FormatVersion
return dest
}
func CopyMetricsRequestSlice(dest, src []MetricsRequest) []MetricsRequest {
var newDest []MetricsRequest
if cap(dest) < len(src) {
newDest = make([]MetricsRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetricsRequest(&dest[i], false)
}
}
for i := range src {
CopyMetricsRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyMetricsRequestPtrSlice(dest, src []*MetricsRequest) []*MetricsRequest {
var newDest []*MetricsRequest
if cap(dest) < len(src) {
newDest = make([]*MetricsRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetricsRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteMetricsRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewMetricsRequest()
}
}
for i := range src {
CopyMetricsRequest(newDest[i], src[i])
}
return newDest
}
func (orig *MetricsRequest) Reset() {
*orig = MetricsRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *MetricsRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RequestContext != nil {
dest.WriteObjectField("requestContext")
orig.RequestContext.MarshalJSON(dest)
}
dest.WriteObjectField("metricsData")
orig.MetricsData.MarshalJSON(dest)
if orig.FormatVersion != uint32(0) {
dest.WriteObjectField("formatVersion")
dest.WriteUint32(orig.FormatVersion)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *MetricsRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "requestContext", "request_context":
orig.RequestContext = NewRequestContext()
orig.RequestContext.UnmarshalJSON(iter)
case "metricsData", "metrics_data":
orig.MetricsData.UnmarshalJSON(iter)
case "formatVersion", "format_version":
orig.FormatVersion = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *MetricsRequest) SizeProto() int {
var n int
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.MetricsData.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.FormatVersion != 0 {
n += 5
}
return n
}
func (orig *MetricsRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = orig.MetricsData.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
if orig.FormatVersion != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
pos--
buf[pos] = 0xd
}
return len(buf) - pos
}
func (orig *MetricsRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.RequestContext = NewRequestContext()
err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field MetricsData", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.MetricsData.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 1:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.FormatVersion = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestMetricsRequest() *MetricsRequest {
orig := NewMetricsRequest()
orig.RequestContext = GenTestRequestContext()
orig.MetricsData = *GenTestMetricsData()
orig.FormatVersion = uint32(13)
return orig
}
func GenTestMetricsRequestPtrSlice() []*MetricsRequest {
orig := make([]*MetricsRequest, 5)
orig[0] = NewMetricsRequest()
orig[1] = GenTestMetricsRequest()
orig[2] = NewMetricsRequest()
orig[3] = GenTestMetricsRequest()
orig[4] = NewMetricsRequest()
return orig
}
func GenTestMetricsRequestSlice() []MetricsRequest {
orig := make([]MetricsRequest, 5)
orig[1] = *GenTestMetricsRequest()
orig[3] = *GenTestMetricsRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *NumberDataPoint) GetValue() any {
if m != nil {
return m.Value
}
return nil
}
type NumberDataPoint_AsDouble struct {
AsDouble float64
}
func (m *NumberDataPoint) GetAsDouble() float64 {
if v, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok {
return v.AsDouble
}
return float64(0)
}
type NumberDataPoint_AsInt struct {
AsInt int64
}
func (m *NumberDataPoint) GetAsInt() int64 {
if v, ok := m.GetValue().(*NumberDataPoint_AsInt); ok {
return v.AsInt
}
return int64(0)
}
// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.
type NumberDataPoint struct {
Attributes []KeyValue
StartTimeUnixNano uint64
TimeUnixNano uint64
Value any
Exemplars []Exemplar
Flags uint32
}
var (
protoPoolNumberDataPoint = sync.Pool{
New: func() any {
return &NumberDataPoint{}
},
}
ProtoPoolNumberDataPoint_AsDouble = sync.Pool{
New: func() any {
return &NumberDataPoint_AsDouble{}
},
}
ProtoPoolNumberDataPoint_AsInt = sync.Pool{
New: func() any {
return &NumberDataPoint_AsInt{}
},
}
)
func NewNumberDataPoint() *NumberDataPoint {
if !UseProtoPooling.IsEnabled() {
return &NumberDataPoint{}
}
return protoPoolNumberDataPoint.Get().(*NumberDataPoint)
}
func DeleteNumberDataPoint(orig *NumberDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
switch ov := orig.Value.(type) {
case *NumberDataPoint_AsDouble:
if UseProtoPooling.IsEnabled() {
ov.AsDouble = float64(0)
ProtoPoolNumberDataPoint_AsDouble.Put(ov)
}
case *NumberDataPoint_AsInt:
if UseProtoPooling.IsEnabled() {
ov.AsInt = int64(0)
ProtoPoolNumberDataPoint_AsInt.Put(ov)
}
}
for i := range orig.Exemplars {
DeleteExemplar(&orig.Exemplars[i], false)
}
orig.Reset()
if nullable {
protoPoolNumberDataPoint.Put(orig)
}
}
func CopyNumberDataPoint(dest, src *NumberDataPoint) *NumberDataPoint {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewNumberDataPoint()
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
switch t := src.Value.(type) {
case *NumberDataPoint_AsDouble:
var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = t.AsDouble
dest.Value = ov
case *NumberDataPoint_AsInt:
var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = t.AsInt
dest.Value = ov
default:
dest.Value = nil
}
dest.Exemplars = CopyExemplarSlice(dest.Exemplars, src.Exemplars)
dest.Flags = src.Flags
return dest
}
func CopyNumberDataPointSlice(dest, src []NumberDataPoint) []NumberDataPoint {
var newDest []NumberDataPoint
if cap(dest) < len(src) {
newDest = make([]NumberDataPoint, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteNumberDataPoint(&dest[i], false)
}
}
for i := range src {
CopyNumberDataPoint(&newDest[i], &src[i])
}
return newDest
}
func CopyNumberDataPointPtrSlice(dest, src []*NumberDataPoint) []*NumberDataPoint {
var newDest []*NumberDataPoint
if cap(dest) < len(src) {
newDest = make([]*NumberDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewNumberDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteNumberDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewNumberDataPoint()
}
}
for i := range src {
CopyNumberDataPoint(newDest[i], src[i])
}
return newDest
}
func (orig *NumberDataPoint) Reset() {
*orig = NumberDataPoint{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *NumberDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
switch orig := orig.Value.(type) {
case *NumberDataPoint_AsDouble:
dest.WriteObjectField("asDouble")
dest.WriteFloat64(orig.AsDouble)
case *NumberDataPoint_AsInt:
dest.WriteObjectField("asInt")
dest.WriteInt64(orig.AsInt)
}
if len(orig.Exemplars) > 0 {
dest.WriteObjectField("exemplars")
dest.WriteArrayStart()
orig.Exemplars[0].MarshalJSON(dest)
for i := 1; i < len(orig.Exemplars); i++ {
dest.WriteMore()
orig.Exemplars[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *NumberDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "asDouble", "as_double":
{
var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = iter.ReadFloat64()
orig.Value = ov
}
case "asInt", "as_int":
{
var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = iter.ReadInt64()
orig.Value = ov
}
case "exemplars":
for iter.ReadArray() {
orig.Exemplars = append(orig.Exemplars, Exemplar{})
orig.Exemplars[len(orig.Exemplars)-1].UnmarshalJSON(iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *NumberDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
switch orig := orig.Value.(type) {
case nil:
_ = orig
break
case *NumberDataPoint_AsDouble:
n += 9
case *NumberDataPoint_AsInt:
n += 9
}
for i := range orig.Exemplars {
l = orig.Exemplars[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
return n
}
func (orig *NumberDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
switch orig := orig.Value.(type) {
case *NumberDataPoint_AsDouble:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.AsDouble))
pos--
buf[pos] = 0x21
case *NumberDataPoint_AsInt:
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.AsInt))
pos--
buf[pos] = 0x31
}
for i := len(orig.Exemplars) - 1; i >= 0; i-- {
l = orig.Exemplars[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x40
}
return len(buf) - pos
}
func (orig *NumberDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *NumberDataPoint_AsDouble
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsDouble{}
} else {
ov = ProtoPoolNumberDataPoint_AsDouble.Get().(*NumberDataPoint_AsDouble)
}
ov.AsDouble = math.Float64frombits(num)
orig.Value = ov
case 6:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
var ov *NumberDataPoint_AsInt
if !UseProtoPooling.IsEnabled() {
ov = &NumberDataPoint_AsInt{}
} else {
ov = ProtoPoolNumberDataPoint_AsInt.Get().(*NumberDataPoint_AsInt)
}
ov.AsInt = int64(num)
orig.Value = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Exemplars = append(orig.Exemplars, Exemplar{})
err = orig.Exemplars[len(orig.Exemplars)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 8:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestNumberDataPoint() *NumberDataPoint {
orig := NewNumberDataPoint()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.StartTimeUnixNano = uint64(13)
orig.TimeUnixNano = uint64(13)
orig.Value = &NumberDataPoint_AsDouble{AsDouble: float64(3.1415926)}
orig.Exemplars = []Exemplar{{}, *GenTestExemplar()}
orig.Flags = uint32(13)
return orig
}
func GenTestNumberDataPointPtrSlice() []*NumberDataPoint {
orig := make([]*NumberDataPoint, 5)
orig[0] = NewNumberDataPoint()
orig[1] = GenTestNumberDataPoint()
orig[2] = NewNumberDataPoint()
orig[3] = GenTestNumberDataPoint()
orig[4] = NewNumberDataPoint()
return orig
}
func GenTestNumberDataPointSlice() []NumberDataPoint {
orig := make([]NumberDataPoint, 5)
orig[1] = *GenTestNumberDataPoint()
orig[3] = *GenTestNumberDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Profile are an implementation of the pprofextended data model.
type Profile struct {
SampleType ValueType
Samples []*Sample
TimeUnixNano uint64
DurationNano uint64
PeriodType ValueType
Period int64
ProfileId ProfileID
DroppedAttributesCount uint32
OriginalPayloadFormat string
OriginalPayload []byte
AttributeIndices []int32
}
var (
protoPoolProfile = sync.Pool{
New: func() any {
return &Profile{}
},
}
)
func NewProfile() *Profile {
if !UseProtoPooling.IsEnabled() {
return &Profile{}
}
return protoPoolProfile.Get().(*Profile)
}
func DeleteProfile(orig *Profile, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteValueType(&orig.SampleType, false)
for i := range orig.Samples {
DeleteSample(orig.Samples[i], true)
}
DeleteValueType(&orig.PeriodType, false)
DeleteProfileID(&orig.ProfileId, false)
orig.Reset()
if nullable {
protoPoolProfile.Put(orig)
}
}
func CopyProfile(dest, src *Profile) *Profile {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewProfile()
}
CopyValueType(&dest.SampleType, &src.SampleType)
dest.Samples = CopySamplePtrSlice(dest.Samples, src.Samples)
dest.TimeUnixNano = src.TimeUnixNano
dest.DurationNano = src.DurationNano
CopyValueType(&dest.PeriodType, &src.PeriodType)
dest.Period = src.Period
CopyProfileID(&dest.ProfileId, &src.ProfileId)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.OriginalPayloadFormat = src.OriginalPayloadFormat
dest.OriginalPayload = src.OriginalPayload
dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
return dest
}
func CopyProfileSlice(dest, src []Profile) []Profile {
var newDest []Profile
if cap(dest) < len(src) {
newDest = make([]Profile, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfile(&dest[i], false)
}
}
for i := range src {
CopyProfile(&newDest[i], &src[i])
}
return newDest
}
func CopyProfilePtrSlice(dest, src []*Profile) []*Profile {
var newDest []*Profile
if cap(dest) < len(src) {
newDest = make([]*Profile, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfile()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfile(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfile()
}
}
for i := range src {
CopyProfile(newDest[i], src[i])
}
return newDest
}
func (orig *Profile) Reset() {
*orig = Profile{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Profile) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("sampleType")
orig.SampleType.MarshalJSON(dest)
if len(orig.Samples) > 0 {
dest.WriteObjectField("samples")
dest.WriteArrayStart()
orig.Samples[0].MarshalJSON(dest)
for i := 1; i < len(orig.Samples); i++ {
dest.WriteMore()
orig.Samples[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.DurationNano != uint64(0) {
dest.WriteObjectField("durationNano")
dest.WriteUint64(orig.DurationNano)
}
dest.WriteObjectField("periodType")
orig.PeriodType.MarshalJSON(dest)
if orig.Period != int64(0) {
dest.WriteObjectField("period")
dest.WriteInt64(orig.Period)
}
if !orig.ProfileId.IsEmpty() {
dest.WriteObjectField("profileId")
orig.ProfileId.MarshalJSON(dest)
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.OriginalPayloadFormat != "" {
dest.WriteObjectField("originalPayloadFormat")
dest.WriteString(orig.OriginalPayloadFormat)
}
if len(orig.OriginalPayload) > 0 {
dest.WriteObjectField("originalPayload")
dest.WriteBytes(orig.OriginalPayload)
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Profile) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "sampleType", "sample_type":
orig.SampleType.UnmarshalJSON(iter)
case "samples":
for iter.ReadArray() {
orig.Samples = append(orig.Samples, NewSample())
orig.Samples[len(orig.Samples)-1].UnmarshalJSON(iter)
}
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "durationNano", "duration_nano":
orig.DurationNano = iter.ReadUint64()
case "periodType", "period_type":
orig.PeriodType.UnmarshalJSON(iter)
case "period":
orig.Period = iter.ReadInt64()
case "profileId", "profile_id":
orig.ProfileId.UnmarshalJSON(iter)
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "originalPayloadFormat", "original_payload_format":
orig.OriginalPayloadFormat = iter.ReadString()
case "originalPayload", "original_payload":
orig.OriginalPayload = iter.ReadBytes()
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func (orig *Profile) SizeProto() int {
var n int
var l int
_ = l
l = orig.SampleType.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Samples {
l = orig.Samples[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.DurationNano != 0 {
n += 1 + proto.Sov(uint64(orig.DurationNano))
}
l = orig.PeriodType.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.Period != 0 {
n += 1 + proto.Sov(uint64(orig.Period))
}
l = orig.ProfileId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
l = len(orig.OriginalPayloadFormat)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.OriginalPayload)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Profile) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.SampleType.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Samples) - 1; i >= 0; i-- {
l = orig.Samples[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.DurationNano != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DurationNano))
pos--
buf[pos] = 0x20
}
l = orig.PeriodType.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
if orig.Period != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Period))
pos--
buf[pos] = 0x30
}
l = orig.ProfileId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x40
}
l = len(orig.OriginalPayloadFormat)
if l > 0 {
pos -= l
copy(buf[pos:], orig.OriginalPayloadFormat)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
l = len(orig.OriginalPayload)
if l > 0 {
pos -= l
copy(buf[pos:], orig.OriginalPayload)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x52
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x5a
}
return len(buf) - pos
}
func (orig *Profile) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SampleType.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Samples = append(orig.Samples, NewSample())
err = orig.Samples[len(orig.Samples)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DurationNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DurationNano = uint64(num)
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.PeriodType.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Period = int64(num)
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.ProfileId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 8:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.OriginalPayloadFormat = string(buf[startPos:pos])
case 10:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
if length != 0 {
orig.OriginalPayload = make([]byte, length)
copy(orig.OriginalPayload, buf[startPos:pos])
}
case 11:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestProfile() *Profile {
orig := NewProfile()
orig.SampleType = *GenTestValueType()
orig.Samples = []*Sample{{}, GenTestSample()}
orig.TimeUnixNano = uint64(13)
orig.DurationNano = uint64(13)
orig.PeriodType = *GenTestValueType()
orig.Period = int64(13)
orig.ProfileId = *GenTestProfileID()
orig.DroppedAttributesCount = uint32(13)
orig.OriginalPayloadFormat = "test_originalpayloadformat"
orig.OriginalPayload = []byte{1, 2, 3}
orig.AttributeIndices = []int32{int32(0), int32(13)}
return orig
}
func GenTestProfilePtrSlice() []*Profile {
orig := make([]*Profile, 5)
orig[0] = NewProfile()
orig[1] = GenTestProfile()
orig[2] = NewProfile()
orig[3] = GenTestProfile()
orig[4] = NewProfile()
return orig
}
func GenTestProfileSlice() []Profile {
orig := make([]Profile, 5)
orig[1] = *GenTestProfile()
orig[3] = *GenTestProfile()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ProfilesData represents the profiles data that can be stored in persistent storage,
// OR can be embedded by other protocols that transfer OTLP profiles data but do not
// implement the OTLP protocol.
type ProfilesData struct {
ResourceProfiles []*ResourceProfiles
Dictionary ProfilesDictionary
}
var (
protoPoolProfilesData = sync.Pool{
New: func() any {
return &ProfilesData{}
},
}
)
func NewProfilesData() *ProfilesData {
if !UseProtoPooling.IsEnabled() {
return &ProfilesData{}
}
return protoPoolProfilesData.Get().(*ProfilesData)
}
func DeleteProfilesData(orig *ProfilesData, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceProfiles {
DeleteResourceProfiles(orig.ResourceProfiles[i], true)
}
DeleteProfilesDictionary(&orig.Dictionary, false)
orig.Reset()
if nullable {
protoPoolProfilesData.Put(orig)
}
}
func CopyProfilesData(dest, src *ProfilesData) *ProfilesData {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewProfilesData()
}
dest.ResourceProfiles = CopyResourceProfilesPtrSlice(dest.ResourceProfiles, src.ResourceProfiles)
CopyProfilesDictionary(&dest.Dictionary, &src.Dictionary)
return dest
}
func CopyProfilesDataSlice(dest, src []ProfilesData) []ProfilesData {
var newDest []ProfilesData
if cap(dest) < len(src) {
newDest = make([]ProfilesData, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesData(&dest[i], false)
}
}
for i := range src {
CopyProfilesData(&newDest[i], &src[i])
}
return newDest
}
func CopyProfilesDataPtrSlice(dest, src []*ProfilesData) []*ProfilesData {
var newDest []*ProfilesData
if cap(dest) < len(src) {
newDest = make([]*ProfilesData, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesData()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesData(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesData()
}
}
for i := range src {
CopyProfilesData(newDest[i], src[i])
}
return newDest
}
func (orig *ProfilesData) Reset() {
*orig = ProfilesData{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ProfilesData) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceProfiles) > 0 {
dest.WriteObjectField("resourceProfiles")
dest.WriteArrayStart()
orig.ResourceProfiles[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceProfiles); i++ {
dest.WriteMore()
orig.ResourceProfiles[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectField("dictionary")
orig.Dictionary.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ProfilesData) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceProfiles", "resource_profiles":
for iter.ReadArray() {
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalJSON(iter)
}
case "dictionary":
orig.Dictionary.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *ProfilesData) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceProfiles {
l = orig.ResourceProfiles[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.Dictionary.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *ProfilesData) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceProfiles) - 1; i >= 0; i-- {
l = orig.ResourceProfiles[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = orig.Dictionary.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
return len(buf) - pos
}
func (orig *ProfilesData) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceProfiles = append(orig.ResourceProfiles, NewResourceProfiles())
err = orig.ResourceProfiles[len(orig.ResourceProfiles)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Dictionary", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Dictionary.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestProfilesData() *ProfilesData {
orig := NewProfilesData()
orig.ResourceProfiles = []*ResourceProfiles{{}, GenTestResourceProfiles()}
orig.Dictionary = *GenTestProfilesDictionary()
return orig
}
func GenTestProfilesDataPtrSlice() []*ProfilesData {
orig := make([]*ProfilesData, 5)
orig[0] = NewProfilesData()
orig[1] = GenTestProfilesData()
orig[2] = NewProfilesData()
orig[3] = GenTestProfilesData()
orig[4] = NewProfilesData()
return orig
}
func GenTestProfilesDataSlice() []ProfilesData {
orig := make([]ProfilesData, 5)
orig[1] = *GenTestProfilesData()
orig[3] = *GenTestProfilesData()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
type ProfilesDictionary struct {
MappingTable []*Mapping
LocationTable []*Location
FunctionTable []*Function
LinkTable []*Link
StringTable []string
AttributeTable []*KeyValueAndUnit
StackTable []*Stack
}
var (
protoPoolProfilesDictionary = sync.Pool{
New: func() any {
return &ProfilesDictionary{}
},
}
)
func NewProfilesDictionary() *ProfilesDictionary {
if !UseProtoPooling.IsEnabled() {
return &ProfilesDictionary{}
}
return protoPoolProfilesDictionary.Get().(*ProfilesDictionary)
}
func DeleteProfilesDictionary(orig *ProfilesDictionary, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.MappingTable {
DeleteMapping(orig.MappingTable[i], true)
}
for i := range orig.LocationTable {
DeleteLocation(orig.LocationTable[i], true)
}
for i := range orig.FunctionTable {
DeleteFunction(orig.FunctionTable[i], true)
}
for i := range orig.LinkTable {
DeleteLink(orig.LinkTable[i], true)
}
for i := range orig.AttributeTable {
DeleteKeyValueAndUnit(orig.AttributeTable[i], true)
}
for i := range orig.StackTable {
DeleteStack(orig.StackTable[i], true)
}
orig.Reset()
if nullable {
protoPoolProfilesDictionary.Put(orig)
}
}
func CopyProfilesDictionary(dest, src *ProfilesDictionary) *ProfilesDictionary {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewProfilesDictionary()
}
dest.MappingTable = CopyMappingPtrSlice(dest.MappingTable, src.MappingTable)
dest.LocationTable = CopyLocationPtrSlice(dest.LocationTable, src.LocationTable)
dest.FunctionTable = CopyFunctionPtrSlice(dest.FunctionTable, src.FunctionTable)
dest.LinkTable = CopyLinkPtrSlice(dest.LinkTable, src.LinkTable)
dest.StringTable = append(dest.StringTable[:0], src.StringTable...)
dest.AttributeTable = CopyKeyValueAndUnitPtrSlice(dest.AttributeTable, src.AttributeTable)
dest.StackTable = CopyStackPtrSlice(dest.StackTable, src.StackTable)
return dest
}
func CopyProfilesDictionarySlice(dest, src []ProfilesDictionary) []ProfilesDictionary {
var newDest []ProfilesDictionary
if cap(dest) < len(src) {
newDest = make([]ProfilesDictionary, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesDictionary(&dest[i], false)
}
}
for i := range src {
CopyProfilesDictionary(&newDest[i], &src[i])
}
return newDest
}
func CopyProfilesDictionaryPtrSlice(dest, src []*ProfilesDictionary) []*ProfilesDictionary {
var newDest []*ProfilesDictionary
if cap(dest) < len(src) {
newDest = make([]*ProfilesDictionary, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesDictionary()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesDictionary(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesDictionary()
}
}
for i := range src {
CopyProfilesDictionary(newDest[i], src[i])
}
return newDest
}
func (orig *ProfilesDictionary) Reset() {
*orig = ProfilesDictionary{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ProfilesDictionary) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.MappingTable) > 0 {
dest.WriteObjectField("mappingTable")
dest.WriteArrayStart()
orig.MappingTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.MappingTable); i++ {
dest.WriteMore()
orig.MappingTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.LocationTable) > 0 {
dest.WriteObjectField("locationTable")
dest.WriteArrayStart()
orig.LocationTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.LocationTable); i++ {
dest.WriteMore()
orig.LocationTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.FunctionTable) > 0 {
dest.WriteObjectField("functionTable")
dest.WriteArrayStart()
orig.FunctionTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.FunctionTable); i++ {
dest.WriteMore()
orig.FunctionTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.LinkTable) > 0 {
dest.WriteObjectField("linkTable")
dest.WriteArrayStart()
orig.LinkTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.LinkTable); i++ {
dest.WriteMore()
orig.LinkTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.StringTable) > 0 {
dest.WriteObjectField("stringTable")
dest.WriteArrayStart()
dest.WriteString(orig.StringTable[0])
for i := 1; i < len(orig.StringTable); i++ {
dest.WriteMore()
dest.WriteString(orig.StringTable[i])
}
dest.WriteArrayEnd()
}
if len(orig.AttributeTable) > 0 {
dest.WriteObjectField("attributeTable")
dest.WriteArrayStart()
orig.AttributeTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.AttributeTable); i++ {
dest.WriteMore()
orig.AttributeTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if len(orig.StackTable) > 0 {
dest.WriteObjectField("stackTable")
dest.WriteArrayStart()
orig.StackTable[0].MarshalJSON(dest)
for i := 1; i < len(orig.StackTable); i++ {
dest.WriteMore()
orig.StackTable[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ProfilesDictionary) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "mappingTable", "mapping_table":
for iter.ReadArray() {
orig.MappingTable = append(orig.MappingTable, NewMapping())
orig.MappingTable[len(orig.MappingTable)-1].UnmarshalJSON(iter)
}
case "locationTable", "location_table":
for iter.ReadArray() {
orig.LocationTable = append(orig.LocationTable, NewLocation())
orig.LocationTable[len(orig.LocationTable)-1].UnmarshalJSON(iter)
}
case "functionTable", "function_table":
for iter.ReadArray() {
orig.FunctionTable = append(orig.FunctionTable, NewFunction())
orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalJSON(iter)
}
case "linkTable", "link_table":
for iter.ReadArray() {
orig.LinkTable = append(orig.LinkTable, NewLink())
orig.LinkTable[len(orig.LinkTable)-1].UnmarshalJSON(iter)
}
case "stringTable", "string_table":
for iter.ReadArray() {
orig.StringTable = append(orig.StringTable, iter.ReadString())
}
case "attributeTable", "attribute_table":
for iter.ReadArray() {
orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalJSON(iter)
}
case "stackTable", "stack_table":
for iter.ReadArray() {
orig.StackTable = append(orig.StackTable, NewStack())
orig.StackTable[len(orig.StackTable)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ProfilesDictionary) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.MappingTable {
l = orig.MappingTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.LocationTable {
l = orig.LocationTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.FunctionTable {
l = orig.FunctionTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.LinkTable {
l = orig.LinkTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for _, s := range orig.StringTable {
l = len(s)
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.AttributeTable {
l = orig.AttributeTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.StackTable {
l = orig.StackTable[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ProfilesDictionary) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.MappingTable) - 1; i >= 0; i-- {
l = orig.MappingTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
for i := len(orig.LocationTable) - 1; i >= 0; i-- {
l = orig.LocationTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.FunctionTable) - 1; i >= 0; i-- {
l = orig.FunctionTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.LinkTable) - 1; i >= 0; i-- {
l = orig.LinkTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
for i := len(orig.StringTable) - 1; i >= 0; i-- {
l = len(orig.StringTable[i])
pos -= l
copy(buf[pos:], orig.StringTable[i])
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
for i := len(orig.AttributeTable) - 1; i >= 0; i-- {
l = orig.AttributeTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
for i := len(orig.StackTable) - 1; i >= 0; i-- {
l = orig.StackTable[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
return len(buf) - pos
}
func (orig *ProfilesDictionary) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.MappingTable = append(orig.MappingTable, NewMapping())
err = orig.MappingTable[len(orig.MappingTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LocationTable = append(orig.LocationTable, NewLocation())
err = orig.LocationTable[len(orig.LocationTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.FunctionTable = append(orig.FunctionTable, NewFunction())
err = orig.FunctionTable[len(orig.FunctionTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LinkTable = append(orig.LinkTable, NewLink())
err = orig.LinkTable[len(orig.LinkTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.StringTable = append(orig.StringTable, string(buf[startPos:pos]))
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.AttributeTable = append(orig.AttributeTable, NewKeyValueAndUnit())
err = orig.AttributeTable[len(orig.AttributeTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field StackTable", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.StackTable = append(orig.StackTable, NewStack())
err = orig.StackTable[len(orig.StackTable)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestProfilesDictionary() *ProfilesDictionary {
orig := NewProfilesDictionary()
orig.MappingTable = []*Mapping{{}, GenTestMapping()}
orig.LocationTable = []*Location{{}, GenTestLocation()}
orig.FunctionTable = []*Function{{}, GenTestFunction()}
orig.LinkTable = []*Link{{}, GenTestLink()}
orig.StringTable = []string{"", "test_stringtable"}
orig.AttributeTable = []*KeyValueAndUnit{{}, GenTestKeyValueAndUnit()}
orig.StackTable = []*Stack{{}, GenTestStack()}
return orig
}
func GenTestProfilesDictionaryPtrSlice() []*ProfilesDictionary {
orig := make([]*ProfilesDictionary, 5)
orig[0] = NewProfilesDictionary()
orig[1] = GenTestProfilesDictionary()
orig[2] = NewProfilesDictionary()
orig[3] = GenTestProfilesDictionary()
orig[4] = NewProfilesDictionary()
return orig
}
func GenTestProfilesDictionarySlice() []ProfilesDictionary {
orig := make([]ProfilesDictionary, 5)
orig[1] = *GenTestProfilesDictionary()
orig[3] = *GenTestProfilesDictionary()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type ProfilesRequest struct {
RequestContext *RequestContext
ProfilesData ProfilesData
FormatVersion uint32
}
var (
protoPoolProfilesRequest = sync.Pool{
New: func() any {
return &ProfilesRequest{}
},
}
)
func NewProfilesRequest() *ProfilesRequest {
if !UseProtoPooling.IsEnabled() {
return &ProfilesRequest{}
}
return protoPoolProfilesRequest.Get().(*ProfilesRequest)
}
func DeleteProfilesRequest(orig *ProfilesRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteRequestContext(orig.RequestContext, true)
DeleteProfilesData(&orig.ProfilesData, false)
orig.Reset()
if nullable {
protoPoolProfilesRequest.Put(orig)
}
}
func CopyProfilesRequest(dest, src *ProfilesRequest) *ProfilesRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewProfilesRequest()
}
dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
CopyProfilesData(&dest.ProfilesData, &src.ProfilesData)
dest.FormatVersion = src.FormatVersion
return dest
}
func CopyProfilesRequestSlice(dest, src []ProfilesRequest) []ProfilesRequest {
var newDest []ProfilesRequest
if cap(dest) < len(src) {
newDest = make([]ProfilesRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesRequest(&dest[i], false)
}
}
for i := range src {
CopyProfilesRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyProfilesRequestPtrSlice(dest, src []*ProfilesRequest) []*ProfilesRequest {
var newDest []*ProfilesRequest
if cap(dest) < len(src) {
newDest = make([]*ProfilesRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteProfilesRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewProfilesRequest()
}
}
for i := range src {
CopyProfilesRequest(newDest[i], src[i])
}
return newDest
}
func (orig *ProfilesRequest) Reset() {
*orig = ProfilesRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ProfilesRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RequestContext != nil {
dest.WriteObjectField("requestContext")
orig.RequestContext.MarshalJSON(dest)
}
dest.WriteObjectField("profilesData")
orig.ProfilesData.MarshalJSON(dest)
if orig.FormatVersion != uint32(0) {
dest.WriteObjectField("formatVersion")
dest.WriteUint32(orig.FormatVersion)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ProfilesRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "requestContext", "request_context":
orig.RequestContext = NewRequestContext()
orig.RequestContext.UnmarshalJSON(iter)
case "profilesData", "profiles_data":
orig.ProfilesData.UnmarshalJSON(iter)
case "formatVersion", "format_version":
orig.FormatVersion = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *ProfilesRequest) SizeProto() int {
var n int
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.ProfilesData.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.FormatVersion != 0 {
n += 5
}
return n
}
func (orig *ProfilesRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = orig.ProfilesData.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
if orig.FormatVersion != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
pos--
buf[pos] = 0xd
}
return len(buf) - pos
}
func (orig *ProfilesRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.RequestContext = NewRequestContext()
err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ProfilesData", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.ProfilesData.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 1:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.FormatVersion = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestProfilesRequest() *ProfilesRequest {
orig := NewProfilesRequest()
orig.RequestContext = GenTestRequestContext()
orig.ProfilesData = *GenTestProfilesData()
orig.FormatVersion = uint32(13)
return orig
}
func GenTestProfilesRequestPtrSlice() []*ProfilesRequest {
orig := make([]*ProfilesRequest, 5)
orig[0] = NewProfilesRequest()
orig[1] = GenTestProfilesRequest()
orig[2] = NewProfilesRequest()
orig[3] = GenTestProfilesRequest()
orig[4] = NewProfilesRequest()
return orig
}
func GenTestProfilesRequestSlice() []ProfilesRequest {
orig := make([]ProfilesRequest, 5)
orig[1] = *GenTestProfilesRequest()
orig[3] = *GenTestProfilesRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
func (m *RequestContext) GetClientAddress() any {
if m != nil {
return m.ClientAddress
}
return nil
}
type RequestContext_IP struct {
IP *IPAddr
}
func (m *RequestContext) GetIP() *IPAddr {
if v, ok := m.GetClientAddress().(*RequestContext_IP); ok {
return v.IP
}
return nil
}
type RequestContext_TCP struct {
TCP *TCPAddr
}
func (m *RequestContext) GetTCP() *TCPAddr {
if v, ok := m.GetClientAddress().(*RequestContext_TCP); ok {
return v.TCP
}
return nil
}
type RequestContext_UDP struct {
UDP *UDPAddr
}
func (m *RequestContext) GetUDP() *UDPAddr {
if v, ok := m.GetClientAddress().(*RequestContext_UDP); ok {
return v.UDP
}
return nil
}
type RequestContext_Unix struct {
Unix *UnixAddr
}
func (m *RequestContext) GetUnix() *UnixAddr {
if v, ok := m.GetClientAddress().(*RequestContext_Unix); ok {
return v.Unix
}
return nil
}
type RequestContext struct {
SpanContext *SpanContext
ClientMetadata []KeyValue
ClientAddress any
}
var (
protoPoolRequestContext = sync.Pool{
New: func() any {
return &RequestContext{}
},
}
ProtoPoolRequestContext_IP = sync.Pool{
New: func() any {
return &RequestContext_IP{}
},
}
ProtoPoolRequestContext_TCP = sync.Pool{
New: func() any {
return &RequestContext_TCP{}
},
}
ProtoPoolRequestContext_UDP = sync.Pool{
New: func() any {
return &RequestContext_UDP{}
},
}
ProtoPoolRequestContext_Unix = sync.Pool{
New: func() any {
return &RequestContext_Unix{}
},
}
)
func NewRequestContext() *RequestContext {
if !UseProtoPooling.IsEnabled() {
return &RequestContext{}
}
return protoPoolRequestContext.Get().(*RequestContext)
}
func DeleteRequestContext(orig *RequestContext, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteSpanContext(orig.SpanContext, true)
for i := range orig.ClientMetadata {
DeleteKeyValue(&orig.ClientMetadata[i], false)
}
switch ov := orig.ClientAddress.(type) {
case *RequestContext_IP:
DeleteIPAddr(ov.IP, true)
ov.IP = nil
ProtoPoolRequestContext_IP.Put(ov)
case *RequestContext_TCP:
DeleteTCPAddr(ov.TCP, true)
ov.TCP = nil
ProtoPoolRequestContext_TCP.Put(ov)
case *RequestContext_UDP:
DeleteUDPAddr(ov.UDP, true)
ov.UDP = nil
ProtoPoolRequestContext_UDP.Put(ov)
case *RequestContext_Unix:
DeleteUnixAddr(ov.Unix, true)
ov.Unix = nil
ProtoPoolRequestContext_Unix.Put(ov)
}
orig.Reset()
if nullable {
protoPoolRequestContext.Put(orig)
}
}
func CopyRequestContext(dest, src *RequestContext) *RequestContext {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewRequestContext()
}
dest.SpanContext = CopySpanContext(dest.SpanContext, src.SpanContext)
dest.ClientMetadata = CopyKeyValueSlice(dest.ClientMetadata, src.ClientMetadata)
switch t := src.ClientAddress.(type) {
case *RequestContext_IP:
var ov *RequestContext_IP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_IP{}
} else {
ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
}
ov.IP = NewIPAddr()
CopyIPAddr(ov.IP, t.IP)
dest.ClientAddress = ov
case *RequestContext_TCP:
var ov *RequestContext_TCP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_TCP{}
} else {
ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
}
ov.TCP = NewTCPAddr()
CopyTCPAddr(ov.TCP, t.TCP)
dest.ClientAddress = ov
case *RequestContext_UDP:
var ov *RequestContext_UDP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_UDP{}
} else {
ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
}
ov.UDP = NewUDPAddr()
CopyUDPAddr(ov.UDP, t.UDP)
dest.ClientAddress = ov
case *RequestContext_Unix:
var ov *RequestContext_Unix
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_Unix{}
} else {
ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
}
ov.Unix = NewUnixAddr()
CopyUnixAddr(ov.Unix, t.Unix)
dest.ClientAddress = ov
default:
dest.ClientAddress = nil
}
return dest
}
func CopyRequestContextSlice(dest, src []RequestContext) []RequestContext {
var newDest []RequestContext
if cap(dest) < len(src) {
newDest = make([]RequestContext, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteRequestContext(&dest[i], false)
}
}
for i := range src {
CopyRequestContext(&newDest[i], &src[i])
}
return newDest
}
func CopyRequestContextPtrSlice(dest, src []*RequestContext) []*RequestContext {
var newDest []*RequestContext
if cap(dest) < len(src) {
newDest = make([]*RequestContext, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewRequestContext()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteRequestContext(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewRequestContext()
}
}
for i := range src {
CopyRequestContext(newDest[i], src[i])
}
return newDest
}
func (orig *RequestContext) Reset() {
*orig = RequestContext{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *RequestContext) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.SpanContext != nil {
dest.WriteObjectField("spanContext")
orig.SpanContext.MarshalJSON(dest)
}
if len(orig.ClientMetadata) > 0 {
dest.WriteObjectField("clientMetadata")
dest.WriteArrayStart()
orig.ClientMetadata[0].MarshalJSON(dest)
for i := 1; i < len(orig.ClientMetadata); i++ {
dest.WriteMore()
orig.ClientMetadata[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
switch orig := orig.ClientAddress.(type) {
case *RequestContext_IP:
if orig.IP != nil {
dest.WriteObjectField("iP")
orig.IP.MarshalJSON(dest)
}
case *RequestContext_TCP:
if orig.TCP != nil {
dest.WriteObjectField("tCP")
orig.TCP.MarshalJSON(dest)
}
case *RequestContext_UDP:
if orig.UDP != nil {
dest.WriteObjectField("uDP")
orig.UDP.MarshalJSON(dest)
}
case *RequestContext_Unix:
if orig.Unix != nil {
dest.WriteObjectField("unix")
orig.Unix.MarshalJSON(dest)
}
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *RequestContext) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "spanContext", "span_context":
orig.SpanContext = NewSpanContext()
orig.SpanContext.UnmarshalJSON(iter)
case "clientMetadata", "client_metadata":
for iter.ReadArray() {
orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalJSON(iter)
}
case "iP":
{
var ov *RequestContext_IP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_IP{}
} else {
ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
}
ov.IP = NewIPAddr()
ov.IP.UnmarshalJSON(iter)
orig.ClientAddress = ov
}
case "tCP":
{
var ov *RequestContext_TCP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_TCP{}
} else {
ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
}
ov.TCP = NewTCPAddr()
ov.TCP.UnmarshalJSON(iter)
orig.ClientAddress = ov
}
case "uDP":
{
var ov *RequestContext_UDP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_UDP{}
} else {
ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
}
ov.UDP = NewUDPAddr()
ov.UDP.UnmarshalJSON(iter)
orig.ClientAddress = ov
}
case "unix":
{
var ov *RequestContext_Unix
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_Unix{}
} else {
ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
}
ov.Unix = NewUnixAddr()
ov.Unix.UnmarshalJSON(iter)
orig.ClientAddress = ov
}
default:
iter.Skip()
}
}
}
func (orig *RequestContext) SizeProto() int {
var n int
var l int
_ = l
if orig.SpanContext != nil {
l = orig.SpanContext.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.ClientMetadata {
l = orig.ClientMetadata[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
switch orig := orig.ClientAddress.(type) {
case nil:
_ = orig
break
case *RequestContext_IP:
if orig.IP != nil {
l = orig.IP.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *RequestContext_TCP:
if orig.TCP != nil {
l = orig.TCP.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *RequestContext_UDP:
if orig.UDP != nil {
l = orig.UDP.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
case *RequestContext_Unix:
if orig.Unix != nil {
l = orig.Unix.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
}
return n
}
func (orig *RequestContext) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.SpanContext != nil {
l = orig.SpanContext.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
for i := len(orig.ClientMetadata) - 1; i >= 0; i-- {
l = orig.ClientMetadata[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
switch orig := orig.ClientAddress.(type) {
case *RequestContext_IP:
if orig.IP != nil {
l = orig.IP.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
case *RequestContext_TCP:
if orig.TCP != nil {
l = orig.TCP.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
case *RequestContext_UDP:
if orig.UDP != nil {
l = orig.UDP.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
case *RequestContext_Unix:
if orig.Unix != nil {
l = orig.Unix.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
}
return len(buf) - pos
}
func (orig *RequestContext) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanContext", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SpanContext = NewSpanContext()
err = orig.SpanContext.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ClientMetadata = append(orig.ClientMetadata, KeyValue{})
err = orig.ClientMetadata[len(orig.ClientMetadata)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *RequestContext_IP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_IP{}
} else {
ov = ProtoPoolRequestContext_IP.Get().(*RequestContext_IP)
}
ov.IP = NewIPAddr()
err = ov.IP.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.ClientAddress = ov
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TCP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *RequestContext_TCP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_TCP{}
} else {
ov = ProtoPoolRequestContext_TCP.Get().(*RequestContext_TCP)
}
ov.TCP = NewTCPAddr()
err = ov.TCP.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.ClientAddress = ov
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field UDP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *RequestContext_UDP
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_UDP{}
} else {
ov = ProtoPoolRequestContext_UDP.Get().(*RequestContext_UDP)
}
ov.UDP = NewUDPAddr()
err = ov.UDP.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.ClientAddress = ov
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Unix", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var ov *RequestContext_Unix
if !UseProtoPooling.IsEnabled() {
ov = &RequestContext_Unix{}
} else {
ov = ProtoPoolRequestContext_Unix.Get().(*RequestContext_Unix)
}
ov.Unix = NewUnixAddr()
err = ov.Unix.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
orig.ClientAddress = ov
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestRequestContext() *RequestContext {
orig := NewRequestContext()
orig.SpanContext = GenTestSpanContext()
orig.ClientMetadata = []KeyValue{{}, *GenTestKeyValue()}
orig.ClientAddress = &RequestContext_IP{IP: GenTestIPAddr()}
return orig
}
func GenTestRequestContextPtrSlice() []*RequestContext {
orig := make([]*RequestContext, 5)
orig[0] = NewRequestContext()
orig[1] = GenTestRequestContext()
orig[2] = NewRequestContext()
orig[3] = GenTestRequestContext()
orig[4] = NewRequestContext()
return orig
}
func GenTestRequestContextSlice() []RequestContext {
orig := make([]RequestContext, 5)
orig[1] = *GenTestRequestContext()
orig[3] = *GenTestRequestContext()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Resource is a message representing the resource information.
type Resource struct {
Attributes []KeyValue
DroppedAttributesCount uint32
EntityRefs []*EntityRef
}
var (
protoPoolResource = sync.Pool{
New: func() any {
return &Resource{}
},
}
)
func NewResource() *Resource {
if !UseProtoPooling.IsEnabled() {
return &Resource{}
}
return protoPoolResource.Get().(*Resource)
}
func DeleteResource(orig *Resource, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.EntityRefs {
DeleteEntityRef(orig.EntityRefs[i], true)
}
orig.Reset()
if nullable {
protoPoolResource.Put(orig)
}
}
func CopyResource(dest, src *Resource) *Resource {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewResource()
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.EntityRefs = CopyEntityRefPtrSlice(dest.EntityRefs, src.EntityRefs)
return dest
}
func CopyResourceSlice(dest, src []Resource) []Resource {
var newDest []Resource
if cap(dest) < len(src) {
newDest = make([]Resource, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResource(&dest[i], false)
}
}
for i := range src {
CopyResource(&newDest[i], &src[i])
}
return newDest
}
func CopyResourcePtrSlice(dest, src []*Resource) []*Resource {
var newDest []*Resource
if cap(dest) < len(src) {
newDest = make([]*Resource, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResource()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResource(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResource()
}
}
for i := range src {
CopyResource(newDest[i], src[i])
}
return newDest
}
func (orig *Resource) Reset() {
*orig = Resource{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Resource) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if len(orig.EntityRefs) > 0 {
dest.WriteObjectField("entityRefs")
dest.WriteArrayStart()
orig.EntityRefs[0].MarshalJSON(dest)
for i := 1; i < len(orig.EntityRefs); i++ {
dest.WriteMore()
orig.EntityRefs[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Resource) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "entityRefs", "entity_refs":
for iter.ReadArray() {
orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *Resource) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
for i := range orig.EntityRefs {
l = orig.EntityRefs[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Resource) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x10
}
for i := len(orig.EntityRefs) - 1; i >= 0; i-- {
l = orig.EntityRefs[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *Resource) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field EntityRefs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.EntityRefs = append(orig.EntityRefs, NewEntityRef())
err = orig.EntityRefs[len(orig.EntityRefs)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestResource() *Resource {
orig := NewResource()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
orig.EntityRefs = []*EntityRef{{}, GenTestEntityRef()}
return orig
}
func GenTestResourcePtrSlice() []*Resource {
orig := make([]*Resource, 5)
orig[0] = NewResource()
orig[1] = GenTestResource()
orig[2] = NewResource()
orig[3] = GenTestResource()
orig[4] = NewResource()
return orig
}
func GenTestResourceSlice() []Resource {
orig := make([]Resource, 5)
orig[1] = *GenTestResource()
orig[3] = *GenTestResource()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ResourceLogs is a collection of logs from a Resource.
type ResourceLogs struct {
Resource Resource
ScopeLogs []*ScopeLogs
SchemaUrl string
DeprecatedScopeLogs []*ScopeLogs
}
var (
protoPoolResourceLogs = sync.Pool{
New: func() any {
return &ResourceLogs{}
},
}
)
func NewResourceLogs() *ResourceLogs {
if !UseProtoPooling.IsEnabled() {
return &ResourceLogs{}
}
return protoPoolResourceLogs.Get().(*ResourceLogs)
}
func DeleteResourceLogs(orig *ResourceLogs, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeLogs {
DeleteScopeLogs(orig.ScopeLogs[i], true)
}
for i := range orig.DeprecatedScopeLogs {
DeleteScopeLogs(orig.DeprecatedScopeLogs[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceLogs.Put(orig)
}
}
func CopyResourceLogs(dest, src *ResourceLogs) *ResourceLogs {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewResourceLogs()
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeLogs = CopyScopeLogsPtrSlice(dest.ScopeLogs, src.ScopeLogs)
dest.SchemaUrl = src.SchemaUrl
dest.DeprecatedScopeLogs = CopyScopeLogsPtrSlice(dest.DeprecatedScopeLogs, src.DeprecatedScopeLogs)
return dest
}
func CopyResourceLogsSlice(dest, src []ResourceLogs) []ResourceLogs {
var newDest []ResourceLogs
if cap(dest) < len(src) {
newDest = make([]ResourceLogs, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceLogs(&dest[i], false)
}
}
for i := range src {
CopyResourceLogs(&newDest[i], &src[i])
}
return newDest
}
func CopyResourceLogsPtrSlice(dest, src []*ResourceLogs) []*ResourceLogs {
var newDest []*ResourceLogs
if cap(dest) < len(src) {
newDest = make([]*ResourceLogs, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceLogs()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceLogs(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceLogs()
}
}
for i := range src {
CopyResourceLogs(newDest[i], src[i])
}
return newDest
}
func (orig *ResourceLogs) Reset() {
*orig = ResourceLogs{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ResourceLogs) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
orig.Resource.MarshalJSON(dest)
if len(orig.ScopeLogs) > 0 {
dest.WriteObjectField("scopeLogs")
dest.WriteArrayStart()
orig.ScopeLogs[0].MarshalJSON(dest)
for i := 1; i < len(orig.ScopeLogs); i++ {
dest.WriteMore()
orig.ScopeLogs[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
if len(orig.DeprecatedScopeLogs) > 0 {
dest.WriteObjectField("deprecatedScopeLogs")
dest.WriteArrayStart()
orig.DeprecatedScopeLogs[0].MarshalJSON(dest)
for i := 1; i < len(orig.DeprecatedScopeLogs); i++ {
dest.WriteMore()
orig.DeprecatedScopeLogs[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ResourceLogs) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
orig.Resource.UnmarshalJSON(iter)
case "scopeLogs", "scope_logs":
for iter.ReadArray() {
orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
case "deprecatedScopeLogs", "deprecated_scope_logs":
for iter.ReadArray() {
orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ResourceLogs) SizeProto() int {
var n int
var l int
_ = l
l = orig.Resource.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeLogs {
l = orig.ScopeLogs[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.DeprecatedScopeLogs {
l = orig.DeprecatedScopeLogs[i].SizeProto()
n += 2 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ResourceLogs) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Resource.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeLogs) - 1; i >= 0; i-- {
l = orig.ScopeLogs[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.DeprecatedScopeLogs) - 1; i >= 0; i-- {
l = orig.DeprecatedScopeLogs[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3e
pos--
buf[pos] = 0xc2
}
return len(buf) - pos
}
func (orig *ResourceLogs) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Resource.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeLogs = append(orig.ScopeLogs, NewScopeLogs())
err = orig.ScopeLogs[len(orig.ScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
case 1000:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeLogs", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DeprecatedScopeLogs = append(orig.DeprecatedScopeLogs, NewScopeLogs())
err = orig.DeprecatedScopeLogs[len(orig.DeprecatedScopeLogs)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestResourceLogs() *ResourceLogs {
orig := NewResourceLogs()
orig.Resource = *GenTestResource()
orig.ScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
orig.SchemaUrl = "test_schemaurl"
orig.DeprecatedScopeLogs = []*ScopeLogs{{}, GenTestScopeLogs()}
return orig
}
func GenTestResourceLogsPtrSlice() []*ResourceLogs {
orig := make([]*ResourceLogs, 5)
orig[0] = NewResourceLogs()
orig[1] = GenTestResourceLogs()
orig[2] = NewResourceLogs()
orig[3] = GenTestResourceLogs()
orig[4] = NewResourceLogs()
return orig
}
func GenTestResourceLogsSlice() []ResourceLogs {
orig := make([]ResourceLogs, 5)
orig[1] = *GenTestResourceLogs()
orig[3] = *GenTestResourceLogs()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ResourceMetrics is a collection of metrics from a Resource.
type ResourceMetrics struct {
Resource Resource
ScopeMetrics []*ScopeMetrics
SchemaUrl string
DeprecatedScopeMetrics []*ScopeMetrics
}
var (
protoPoolResourceMetrics = sync.Pool{
New: func() any {
return &ResourceMetrics{}
},
}
)
func NewResourceMetrics() *ResourceMetrics {
if !UseProtoPooling.IsEnabled() {
return &ResourceMetrics{}
}
return protoPoolResourceMetrics.Get().(*ResourceMetrics)
}
func DeleteResourceMetrics(orig *ResourceMetrics, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeMetrics {
DeleteScopeMetrics(orig.ScopeMetrics[i], true)
}
for i := range orig.DeprecatedScopeMetrics {
DeleteScopeMetrics(orig.DeprecatedScopeMetrics[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceMetrics.Put(orig)
}
}
func CopyResourceMetrics(dest, src *ResourceMetrics) *ResourceMetrics {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewResourceMetrics()
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeMetrics = CopyScopeMetricsPtrSlice(dest.ScopeMetrics, src.ScopeMetrics)
dest.SchemaUrl = src.SchemaUrl
dest.DeprecatedScopeMetrics = CopyScopeMetricsPtrSlice(dest.DeprecatedScopeMetrics, src.DeprecatedScopeMetrics)
return dest
}
func CopyResourceMetricsSlice(dest, src []ResourceMetrics) []ResourceMetrics {
var newDest []ResourceMetrics
if cap(dest) < len(src) {
newDest = make([]ResourceMetrics, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceMetrics(&dest[i], false)
}
}
for i := range src {
CopyResourceMetrics(&newDest[i], &src[i])
}
return newDest
}
func CopyResourceMetricsPtrSlice(dest, src []*ResourceMetrics) []*ResourceMetrics {
var newDest []*ResourceMetrics
if cap(dest) < len(src) {
newDest = make([]*ResourceMetrics, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceMetrics()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceMetrics(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceMetrics()
}
}
for i := range src {
CopyResourceMetrics(newDest[i], src[i])
}
return newDest
}
func (orig *ResourceMetrics) Reset() {
*orig = ResourceMetrics{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ResourceMetrics) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
orig.Resource.MarshalJSON(dest)
if len(orig.ScopeMetrics) > 0 {
dest.WriteObjectField("scopeMetrics")
dest.WriteArrayStart()
orig.ScopeMetrics[0].MarshalJSON(dest)
for i := 1; i < len(orig.ScopeMetrics); i++ {
dest.WriteMore()
orig.ScopeMetrics[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
if len(orig.DeprecatedScopeMetrics) > 0 {
dest.WriteObjectField("deprecatedScopeMetrics")
dest.WriteArrayStart()
orig.DeprecatedScopeMetrics[0].MarshalJSON(dest)
for i := 1; i < len(orig.DeprecatedScopeMetrics); i++ {
dest.WriteMore()
orig.DeprecatedScopeMetrics[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ResourceMetrics) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
orig.Resource.UnmarshalJSON(iter)
case "scopeMetrics", "scope_metrics":
for iter.ReadArray() {
orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
case "deprecatedScopeMetrics", "deprecated_scope_metrics":
for iter.ReadArray() {
orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ResourceMetrics) SizeProto() int {
var n int
var l int
_ = l
l = orig.Resource.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeMetrics {
l = orig.ScopeMetrics[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.DeprecatedScopeMetrics {
l = orig.DeprecatedScopeMetrics[i].SizeProto()
n += 2 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ResourceMetrics) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Resource.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeMetrics) - 1; i >= 0; i-- {
l = orig.ScopeMetrics[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.DeprecatedScopeMetrics) - 1; i >= 0; i-- {
l = orig.DeprecatedScopeMetrics[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3e
pos--
buf[pos] = 0xc2
}
return len(buf) - pos
}
func (orig *ResourceMetrics) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Resource.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeMetrics = append(orig.ScopeMetrics, NewScopeMetrics())
err = orig.ScopeMetrics[len(orig.ScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
case 1000:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DeprecatedScopeMetrics = append(orig.DeprecatedScopeMetrics, NewScopeMetrics())
err = orig.DeprecatedScopeMetrics[len(orig.DeprecatedScopeMetrics)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestResourceMetrics() *ResourceMetrics {
orig := NewResourceMetrics()
orig.Resource = *GenTestResource()
orig.ScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
orig.SchemaUrl = "test_schemaurl"
orig.DeprecatedScopeMetrics = []*ScopeMetrics{{}, GenTestScopeMetrics()}
return orig
}
func GenTestResourceMetricsPtrSlice() []*ResourceMetrics {
orig := make([]*ResourceMetrics, 5)
orig[0] = NewResourceMetrics()
orig[1] = GenTestResourceMetrics()
orig[2] = NewResourceMetrics()
orig[3] = GenTestResourceMetrics()
orig[4] = NewResourceMetrics()
return orig
}
func GenTestResourceMetricsSlice() []ResourceMetrics {
orig := make([]ResourceMetrics, 5)
orig[1] = *GenTestResourceMetrics()
orig[3] = *GenTestResourceMetrics()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ResourceProfiles is a collection of profiles from a Resource.
type ResourceProfiles struct {
Resource Resource
ScopeProfiles []*ScopeProfiles
SchemaUrl string
}
var (
protoPoolResourceProfiles = sync.Pool{
New: func() any {
return &ResourceProfiles{}
},
}
)
func NewResourceProfiles() *ResourceProfiles {
if !UseProtoPooling.IsEnabled() {
return &ResourceProfiles{}
}
return protoPoolResourceProfiles.Get().(*ResourceProfiles)
}
func DeleteResourceProfiles(orig *ResourceProfiles, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeProfiles {
DeleteScopeProfiles(orig.ScopeProfiles[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceProfiles.Put(orig)
}
}
func CopyResourceProfiles(dest, src *ResourceProfiles) *ResourceProfiles {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewResourceProfiles()
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeProfiles = CopyScopeProfilesPtrSlice(dest.ScopeProfiles, src.ScopeProfiles)
dest.SchemaUrl = src.SchemaUrl
return dest
}
func CopyResourceProfilesSlice(dest, src []ResourceProfiles) []ResourceProfiles {
var newDest []ResourceProfiles
if cap(dest) < len(src) {
newDest = make([]ResourceProfiles, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceProfiles(&dest[i], false)
}
}
for i := range src {
CopyResourceProfiles(&newDest[i], &src[i])
}
return newDest
}
func CopyResourceProfilesPtrSlice(dest, src []*ResourceProfiles) []*ResourceProfiles {
var newDest []*ResourceProfiles
if cap(dest) < len(src) {
newDest = make([]*ResourceProfiles, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceProfiles()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceProfiles(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceProfiles()
}
}
for i := range src {
CopyResourceProfiles(newDest[i], src[i])
}
return newDest
}
func (orig *ResourceProfiles) Reset() {
*orig = ResourceProfiles{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ResourceProfiles) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
orig.Resource.MarshalJSON(dest)
if len(orig.ScopeProfiles) > 0 {
dest.WriteObjectField("scopeProfiles")
dest.WriteArrayStart()
orig.ScopeProfiles[0].MarshalJSON(dest)
for i := 1; i < len(orig.ScopeProfiles); i++ {
dest.WriteMore()
orig.ScopeProfiles[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ResourceProfiles) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
orig.Resource.UnmarshalJSON(iter)
case "scopeProfiles", "scope_profiles":
for iter.ReadArray() {
orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ResourceProfiles) SizeProto() int {
var n int
var l int
_ = l
l = orig.Resource.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeProfiles {
l = orig.ScopeProfiles[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ResourceProfiles) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Resource.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeProfiles) - 1; i >= 0; i-- {
l = orig.ScopeProfiles[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *ResourceProfiles) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Resource.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeProfiles = append(orig.ScopeProfiles, NewScopeProfiles())
err = orig.ScopeProfiles[len(orig.ScopeProfiles)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestResourceProfiles() *ResourceProfiles {
orig := NewResourceProfiles()
orig.Resource = *GenTestResource()
orig.ScopeProfiles = []*ScopeProfiles{{}, GenTestScopeProfiles()}
orig.SchemaUrl = "test_schemaurl"
return orig
}
func GenTestResourceProfilesPtrSlice() []*ResourceProfiles {
orig := make([]*ResourceProfiles, 5)
orig[0] = NewResourceProfiles()
orig[1] = GenTestResourceProfiles()
orig[2] = NewResourceProfiles()
orig[3] = GenTestResourceProfiles()
orig[4] = NewResourceProfiles()
return orig
}
func GenTestResourceProfilesSlice() []ResourceProfiles {
orig := make([]ResourceProfiles, 5)
orig[1] = *GenTestResourceProfiles()
orig[3] = *GenTestResourceProfiles()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ResourceSpans is a collection of spans from a Resource.
type ResourceSpans struct {
Resource Resource
ScopeSpans []*ScopeSpans
SchemaUrl string
DeprecatedScopeSpans []*ScopeSpans
}
var (
protoPoolResourceSpans = sync.Pool{
New: func() any {
return &ResourceSpans{}
},
}
)
func NewResourceSpans() *ResourceSpans {
if !UseProtoPooling.IsEnabled() {
return &ResourceSpans{}
}
return protoPoolResourceSpans.Get().(*ResourceSpans)
}
func DeleteResourceSpans(orig *ResourceSpans, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteResource(&orig.Resource, false)
for i := range orig.ScopeSpans {
DeleteScopeSpans(orig.ScopeSpans[i], true)
}
for i := range orig.DeprecatedScopeSpans {
DeleteScopeSpans(orig.DeprecatedScopeSpans[i], true)
}
orig.Reset()
if nullable {
protoPoolResourceSpans.Put(orig)
}
}
func CopyResourceSpans(dest, src *ResourceSpans) *ResourceSpans {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewResourceSpans()
}
CopyResource(&dest.Resource, &src.Resource)
dest.ScopeSpans = CopyScopeSpansPtrSlice(dest.ScopeSpans, src.ScopeSpans)
dest.SchemaUrl = src.SchemaUrl
dest.DeprecatedScopeSpans = CopyScopeSpansPtrSlice(dest.DeprecatedScopeSpans, src.DeprecatedScopeSpans)
return dest
}
func CopyResourceSpansSlice(dest, src []ResourceSpans) []ResourceSpans {
var newDest []ResourceSpans
if cap(dest) < len(src) {
newDest = make([]ResourceSpans, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceSpans(&dest[i], false)
}
}
for i := range src {
CopyResourceSpans(&newDest[i], &src[i])
}
return newDest
}
func CopyResourceSpansPtrSlice(dest, src []*ResourceSpans) []*ResourceSpans {
var newDest []*ResourceSpans
if cap(dest) < len(src) {
newDest = make([]*ResourceSpans, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceSpans()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteResourceSpans(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewResourceSpans()
}
}
for i := range src {
CopyResourceSpans(newDest[i], src[i])
}
return newDest
}
func (orig *ResourceSpans) Reset() {
*orig = ResourceSpans{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ResourceSpans) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("resource")
orig.Resource.MarshalJSON(dest)
if len(orig.ScopeSpans) > 0 {
dest.WriteObjectField("scopeSpans")
dest.WriteArrayStart()
orig.ScopeSpans[0].MarshalJSON(dest)
for i := 1; i < len(orig.ScopeSpans); i++ {
dest.WriteMore()
orig.ScopeSpans[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
if len(orig.DeprecatedScopeSpans) > 0 {
dest.WriteObjectField("deprecatedScopeSpans")
dest.WriteArrayStart()
orig.DeprecatedScopeSpans[0].MarshalJSON(dest)
for i := 1; i < len(orig.DeprecatedScopeSpans); i++ {
dest.WriteMore()
orig.DeprecatedScopeSpans[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ResourceSpans) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resource":
orig.Resource.UnmarshalJSON(iter)
case "scopeSpans", "scope_spans":
for iter.ReadArray() {
orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
case "deprecatedScopeSpans", "deprecated_scope_spans":
for iter.ReadArray() {
orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *ResourceSpans) SizeProto() int {
var n int
var l int
_ = l
l = orig.Resource.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.ScopeSpans {
l = orig.ScopeSpans[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.DeprecatedScopeSpans {
l = orig.DeprecatedScopeSpans[i].SizeProto()
n += 2 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ResourceSpans) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Resource.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.ScopeSpans) - 1; i >= 0; i-- {
l = orig.ScopeSpans[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.DeprecatedScopeSpans) - 1; i >= 0; i-- {
l = orig.DeprecatedScopeSpans[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3e
pos--
buf[pos] = 0xc2
}
return len(buf) - pos
}
func (orig *ResourceSpans) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Resource.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ScopeSpans = append(orig.ScopeSpans, NewScopeSpans())
err = orig.ScopeSpans[len(orig.ScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
case 1000:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DeprecatedScopeSpans = append(orig.DeprecatedScopeSpans, NewScopeSpans())
err = orig.DeprecatedScopeSpans[len(orig.DeprecatedScopeSpans)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestResourceSpans() *ResourceSpans {
orig := NewResourceSpans()
orig.Resource = *GenTestResource()
orig.ScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
orig.SchemaUrl = "test_schemaurl"
orig.DeprecatedScopeSpans = []*ScopeSpans{{}, GenTestScopeSpans()}
return orig
}
func GenTestResourceSpansPtrSlice() []*ResourceSpans {
orig := make([]*ResourceSpans, 5)
orig[0] = NewResourceSpans()
orig[1] = GenTestResourceSpans()
orig[2] = NewResourceSpans()
orig[3] = GenTestResourceSpans()
orig[4] = NewResourceSpans()
return orig
}
func GenTestResourceSpansSlice() []ResourceSpans {
orig := make([]ResourceSpans, 5)
orig[1] = *GenTestResourceSpans()
orig[3] = *GenTestResourceSpans()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Sample represents each record value encountered within a profiled program.
type Sample struct {
StackIndex int32
Values []int64
AttributeIndices []int32
LinkIndex int32
TimestampsUnixNano []uint64
}
var (
protoPoolSample = sync.Pool{
New: func() any {
return &Sample{}
},
}
)
func NewSample() *Sample {
if !UseProtoPooling.IsEnabled() {
return &Sample{}
}
return protoPoolSample.Get().(*Sample)
}
func DeleteSample(orig *Sample, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolSample.Put(orig)
}
}
func CopySample(dest, src *Sample) *Sample {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSample()
}
dest.StackIndex = src.StackIndex
dest.Values = append(dest.Values[:0], src.Values...)
dest.AttributeIndices = append(dest.AttributeIndices[:0], src.AttributeIndices...)
dest.LinkIndex = src.LinkIndex
dest.TimestampsUnixNano = append(dest.TimestampsUnixNano[:0], src.TimestampsUnixNano...)
return dest
}
func CopySampleSlice(dest, src []Sample) []Sample {
var newDest []Sample
if cap(dest) < len(src) {
newDest = make([]Sample, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSample(&dest[i], false)
}
}
for i := range src {
CopySample(&newDest[i], &src[i])
}
return newDest
}
func CopySamplePtrSlice(dest, src []*Sample) []*Sample {
var newDest []*Sample
if cap(dest) < len(src) {
newDest = make([]*Sample, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSample()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSample(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSample()
}
}
for i := range src {
CopySample(newDest[i], src[i])
}
return newDest
}
func (orig *Sample) Reset() {
*orig = Sample{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Sample) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.StackIndex != int32(0) {
dest.WriteObjectField("stackIndex")
dest.WriteInt32(orig.StackIndex)
}
if len(orig.Values) > 0 {
dest.WriteObjectField("values")
dest.WriteArrayStart()
dest.WriteInt64(orig.Values[0])
for i := 1; i < len(orig.Values); i++ {
dest.WriteMore()
dest.WriteInt64(orig.Values[i])
}
dest.WriteArrayEnd()
}
if len(orig.AttributeIndices) > 0 {
dest.WriteObjectField("attributeIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.AttributeIndices[0])
for i := 1; i < len(orig.AttributeIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.AttributeIndices[i])
}
dest.WriteArrayEnd()
}
if orig.LinkIndex != int32(0) {
dest.WriteObjectField("linkIndex")
dest.WriteInt32(orig.LinkIndex)
}
if len(orig.TimestampsUnixNano) > 0 {
dest.WriteObjectField("timestampsUnixNano")
dest.WriteArrayStart()
dest.WriteUint64(orig.TimestampsUnixNano[0])
for i := 1; i < len(orig.TimestampsUnixNano); i++ {
dest.WriteMore()
dest.WriteUint64(orig.TimestampsUnixNano[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Sample) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "stackIndex", "stack_index":
orig.StackIndex = iter.ReadInt32()
case "values":
for iter.ReadArray() {
orig.Values = append(orig.Values, iter.ReadInt64())
}
case "attributeIndices", "attribute_indices":
for iter.ReadArray() {
orig.AttributeIndices = append(orig.AttributeIndices, iter.ReadInt32())
}
case "linkIndex", "link_index":
orig.LinkIndex = iter.ReadInt32()
case "timestampsUnixNano", "timestamps_unix_nano":
for iter.ReadArray() {
orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, iter.ReadUint64())
}
default:
iter.Skip()
}
}
}
func (orig *Sample) SizeProto() int {
var n int
var l int
_ = l
if orig.StackIndex != 0 {
n += 1 + proto.Sov(uint64(orig.StackIndex))
}
if len(orig.Values) > 0 {
l = 0
for _, e := range orig.Values {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
if len(orig.AttributeIndices) > 0 {
l = 0
for _, e := range orig.AttributeIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.LinkIndex != 0 {
n += 1 + proto.Sov(uint64(orig.LinkIndex))
}
l = len(orig.TimestampsUnixNano)
if l > 0 {
l *= 8
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Sample) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.StackIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.StackIndex))
pos--
buf[pos] = 0x8
}
l = len(orig.Values)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Values[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x12
}
l = len(orig.AttributeIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AttributeIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0x1a
}
if orig.LinkIndex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.LinkIndex))
pos--
buf[pos] = 0x20
}
l = len(orig.TimestampsUnixNano)
if l > 0 {
for i := l - 1; i >= 0; i-- {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimestampsUnixNano[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(l*8))
pos--
buf[pos] = 0x2a
}
return len(buf) - pos
}
func (orig *Sample) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field StackIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.StackIndex = int32(num)
case 2:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.Values = append(orig.Values, int64(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field Values", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Values = append(orig.Values, int64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
}
case 3:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field AttributeIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AttributeIndices = append(orig.AttributeIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType)
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.LinkIndex = int32(num)
case 5:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
size := length / 8
orig.TimestampsUnixNano = make([]uint64, size)
var num uint64
for i := 0; i < size; i++ {
num, startPos, err = proto.ConsumeI64(buf[:pos], startPos)
if err != nil {
return err
}
orig.TimestampsUnixNano[i] = uint64(num)
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field TimestampsUnixNano", pos-startPos)
}
case proto.WireTypeI64:
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimestampsUnixNano = append(orig.TimestampsUnixNano, uint64(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSample() *Sample {
orig := NewSample()
orig.StackIndex = int32(13)
orig.Values = []int64{int64(0), int64(13)}
orig.AttributeIndices = []int32{int32(0), int32(13)}
orig.LinkIndex = int32(13)
orig.TimestampsUnixNano = []uint64{uint64(0), uint64(13)}
return orig
}
func GenTestSamplePtrSlice() []*Sample {
orig := make([]*Sample, 5)
orig[0] = NewSample()
orig[1] = GenTestSample()
orig[2] = NewSample()
orig[3] = GenTestSample()
orig[4] = NewSample()
return orig
}
func GenTestSampleSlice() []Sample {
orig := make([]Sample, 5)
orig[1] = *GenTestSample()
orig[3] = *GenTestSample()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ScopeLogs is a collection of logs from a LibraryInstrumentation.
type ScopeLogs struct {
Scope InstrumentationScope
LogRecords []*LogRecord
SchemaUrl string
}
var (
protoPoolScopeLogs = sync.Pool{
New: func() any {
return &ScopeLogs{}
},
}
)
func NewScopeLogs() *ScopeLogs {
if !UseProtoPooling.IsEnabled() {
return &ScopeLogs{}
}
return protoPoolScopeLogs.Get().(*ScopeLogs)
}
func DeleteScopeLogs(orig *ScopeLogs, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.LogRecords {
DeleteLogRecord(orig.LogRecords[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeLogs.Put(orig)
}
}
func CopyScopeLogs(dest, src *ScopeLogs) *ScopeLogs {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewScopeLogs()
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.LogRecords = CopyLogRecordPtrSlice(dest.LogRecords, src.LogRecords)
dest.SchemaUrl = src.SchemaUrl
return dest
}
func CopyScopeLogsSlice(dest, src []ScopeLogs) []ScopeLogs {
var newDest []ScopeLogs
if cap(dest) < len(src) {
newDest = make([]ScopeLogs, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeLogs(&dest[i], false)
}
}
for i := range src {
CopyScopeLogs(&newDest[i], &src[i])
}
return newDest
}
func CopyScopeLogsPtrSlice(dest, src []*ScopeLogs) []*ScopeLogs {
var newDest []*ScopeLogs
if cap(dest) < len(src) {
newDest = make([]*ScopeLogs, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeLogs()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeLogs(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeLogs()
}
}
for i := range src {
CopyScopeLogs(newDest[i], src[i])
}
return newDest
}
func (orig *ScopeLogs) Reset() {
*orig = ScopeLogs{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ScopeLogs) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
orig.Scope.MarshalJSON(dest)
if len(orig.LogRecords) > 0 {
dest.WriteObjectField("logRecords")
dest.WriteArrayStart()
orig.LogRecords[0].MarshalJSON(dest)
for i := 1; i < len(orig.LogRecords); i++ {
dest.WriteMore()
orig.LogRecords[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ScopeLogs) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
orig.Scope.UnmarshalJSON(iter)
case "logRecords", "log_records":
for iter.ReadArray() {
orig.LogRecords = append(orig.LogRecords, NewLogRecord())
orig.LogRecords[len(orig.LogRecords)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ScopeLogs) SizeProto() int {
var n int
var l int
_ = l
l = orig.Scope.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.LogRecords {
l = orig.LogRecords[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ScopeLogs) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Scope.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.LogRecords) - 1; i >= 0; i-- {
l = orig.LogRecords[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *ScopeLogs) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Scope.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field LogRecords", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.LogRecords = append(orig.LogRecords, NewLogRecord())
err = orig.LogRecords[len(orig.LogRecords)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestScopeLogs() *ScopeLogs {
orig := NewScopeLogs()
orig.Scope = *GenTestInstrumentationScope()
orig.LogRecords = []*LogRecord{{}, GenTestLogRecord()}
orig.SchemaUrl = "test_schemaurl"
return orig
}
func GenTestScopeLogsPtrSlice() []*ScopeLogs {
orig := make([]*ScopeLogs, 5)
orig[0] = NewScopeLogs()
orig[1] = GenTestScopeLogs()
orig[2] = NewScopeLogs()
orig[3] = GenTestScopeLogs()
orig[4] = NewScopeLogs()
return orig
}
func GenTestScopeLogsSlice() []ScopeLogs {
orig := make([]ScopeLogs, 5)
orig[1] = *GenTestScopeLogs()
orig[3] = *GenTestScopeLogs()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ScopeMetrics is a collection of metrics from a LibraryInstrumentation.
type ScopeMetrics struct {
Scope InstrumentationScope
Metrics []*Metric
SchemaUrl string
}
var (
protoPoolScopeMetrics = sync.Pool{
New: func() any {
return &ScopeMetrics{}
},
}
)
func NewScopeMetrics() *ScopeMetrics {
if !UseProtoPooling.IsEnabled() {
return &ScopeMetrics{}
}
return protoPoolScopeMetrics.Get().(*ScopeMetrics)
}
func DeleteScopeMetrics(orig *ScopeMetrics, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Metrics {
DeleteMetric(orig.Metrics[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeMetrics.Put(orig)
}
}
func CopyScopeMetrics(dest, src *ScopeMetrics) *ScopeMetrics {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewScopeMetrics()
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Metrics = CopyMetricPtrSlice(dest.Metrics, src.Metrics)
dest.SchemaUrl = src.SchemaUrl
return dest
}
func CopyScopeMetricsSlice(dest, src []ScopeMetrics) []ScopeMetrics {
var newDest []ScopeMetrics
if cap(dest) < len(src) {
newDest = make([]ScopeMetrics, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeMetrics(&dest[i], false)
}
}
for i := range src {
CopyScopeMetrics(&newDest[i], &src[i])
}
return newDest
}
func CopyScopeMetricsPtrSlice(dest, src []*ScopeMetrics) []*ScopeMetrics {
var newDest []*ScopeMetrics
if cap(dest) < len(src) {
newDest = make([]*ScopeMetrics, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeMetrics()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeMetrics(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeMetrics()
}
}
for i := range src {
CopyScopeMetrics(newDest[i], src[i])
}
return newDest
}
func (orig *ScopeMetrics) Reset() {
*orig = ScopeMetrics{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ScopeMetrics) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
orig.Scope.MarshalJSON(dest)
if len(orig.Metrics) > 0 {
dest.WriteObjectField("metrics")
dest.WriteArrayStart()
orig.Metrics[0].MarshalJSON(dest)
for i := 1; i < len(orig.Metrics); i++ {
dest.WriteMore()
orig.Metrics[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ScopeMetrics) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
orig.Scope.UnmarshalJSON(iter)
case "metrics":
for iter.ReadArray() {
orig.Metrics = append(orig.Metrics, NewMetric())
orig.Metrics[len(orig.Metrics)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ScopeMetrics) SizeProto() int {
var n int
var l int
_ = l
l = orig.Scope.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Metrics {
l = orig.Metrics[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ScopeMetrics) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Scope.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Metrics) - 1; i >= 0; i-- {
l = orig.Metrics[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *ScopeMetrics) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Scope.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Metrics = append(orig.Metrics, NewMetric())
err = orig.Metrics[len(orig.Metrics)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestScopeMetrics() *ScopeMetrics {
orig := NewScopeMetrics()
orig.Scope = *GenTestInstrumentationScope()
orig.Metrics = []*Metric{{}, GenTestMetric()}
orig.SchemaUrl = "test_schemaurl"
return orig
}
func GenTestScopeMetricsPtrSlice() []*ScopeMetrics {
orig := make([]*ScopeMetrics, 5)
orig[0] = NewScopeMetrics()
orig[1] = GenTestScopeMetrics()
orig[2] = NewScopeMetrics()
orig[3] = GenTestScopeMetrics()
orig[4] = NewScopeMetrics()
return orig
}
func GenTestScopeMetricsSlice() []ScopeMetrics {
orig := make([]ScopeMetrics, 5)
orig[1] = *GenTestScopeMetrics()
orig[3] = *GenTestScopeMetrics()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
type ScopeProfiles struct {
Scope InstrumentationScope
Profiles []*Profile
SchemaUrl string
}
var (
protoPoolScopeProfiles = sync.Pool{
New: func() any {
return &ScopeProfiles{}
},
}
)
func NewScopeProfiles() *ScopeProfiles {
if !UseProtoPooling.IsEnabled() {
return &ScopeProfiles{}
}
return protoPoolScopeProfiles.Get().(*ScopeProfiles)
}
func DeleteScopeProfiles(orig *ScopeProfiles, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Profiles {
DeleteProfile(orig.Profiles[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeProfiles.Put(orig)
}
}
func CopyScopeProfiles(dest, src *ScopeProfiles) *ScopeProfiles {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewScopeProfiles()
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Profiles = CopyProfilePtrSlice(dest.Profiles, src.Profiles)
dest.SchemaUrl = src.SchemaUrl
return dest
}
func CopyScopeProfilesSlice(dest, src []ScopeProfiles) []ScopeProfiles {
var newDest []ScopeProfiles
if cap(dest) < len(src) {
newDest = make([]ScopeProfiles, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeProfiles(&dest[i], false)
}
}
for i := range src {
CopyScopeProfiles(&newDest[i], &src[i])
}
return newDest
}
func CopyScopeProfilesPtrSlice(dest, src []*ScopeProfiles) []*ScopeProfiles {
var newDest []*ScopeProfiles
if cap(dest) < len(src) {
newDest = make([]*ScopeProfiles, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeProfiles()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeProfiles(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeProfiles()
}
}
for i := range src {
CopyScopeProfiles(newDest[i], src[i])
}
return newDest
}
func (orig *ScopeProfiles) Reset() {
*orig = ScopeProfiles{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ScopeProfiles) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
orig.Scope.MarshalJSON(dest)
if len(orig.Profiles) > 0 {
dest.WriteObjectField("profiles")
dest.WriteArrayStart()
orig.Profiles[0].MarshalJSON(dest)
for i := 1; i < len(orig.Profiles); i++ {
dest.WriteMore()
orig.Profiles[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ScopeProfiles) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
orig.Scope.UnmarshalJSON(iter)
case "profiles":
for iter.ReadArray() {
orig.Profiles = append(orig.Profiles, NewProfile())
orig.Profiles[len(orig.Profiles)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ScopeProfiles) SizeProto() int {
var n int
var l int
_ = l
l = orig.Scope.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Profiles {
l = orig.Profiles[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ScopeProfiles) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Scope.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Profiles) - 1; i >= 0; i-- {
l = orig.Profiles[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *ScopeProfiles) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Scope.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Profiles = append(orig.Profiles, NewProfile())
err = orig.Profiles[len(orig.Profiles)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestScopeProfiles() *ScopeProfiles {
orig := NewScopeProfiles()
orig.Scope = *GenTestInstrumentationScope()
orig.Profiles = []*Profile{{}, GenTestProfile()}
orig.SchemaUrl = "test_schemaurl"
return orig
}
func GenTestScopeProfilesPtrSlice() []*ScopeProfiles {
orig := make([]*ScopeProfiles, 5)
orig[0] = NewScopeProfiles()
orig[1] = GenTestScopeProfiles()
orig[2] = NewScopeProfiles()
orig[3] = GenTestScopeProfiles()
orig[4] = NewScopeProfiles()
return orig
}
func GenTestScopeProfilesSlice() []ScopeProfiles {
orig := make([]ScopeProfiles, 5)
orig[1] = *GenTestScopeProfiles()
orig[3] = *GenTestScopeProfiles()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ScopeSpans is a collection of spans from a LibraryInstrumentation.
type ScopeSpans struct {
Scope InstrumentationScope
Spans []*Span
SchemaUrl string
}
var (
protoPoolScopeSpans = sync.Pool{
New: func() any {
return &ScopeSpans{}
},
}
)
func NewScopeSpans() *ScopeSpans {
if !UseProtoPooling.IsEnabled() {
return &ScopeSpans{}
}
return protoPoolScopeSpans.Get().(*ScopeSpans)
}
func DeleteScopeSpans(orig *ScopeSpans, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteInstrumentationScope(&orig.Scope, false)
for i := range orig.Spans {
DeleteSpan(orig.Spans[i], true)
}
orig.Reset()
if nullable {
protoPoolScopeSpans.Put(orig)
}
}
func CopyScopeSpans(dest, src *ScopeSpans) *ScopeSpans {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewScopeSpans()
}
CopyInstrumentationScope(&dest.Scope, &src.Scope)
dest.Spans = CopySpanPtrSlice(dest.Spans, src.Spans)
dest.SchemaUrl = src.SchemaUrl
return dest
}
func CopyScopeSpansSlice(dest, src []ScopeSpans) []ScopeSpans {
var newDest []ScopeSpans
if cap(dest) < len(src) {
newDest = make([]ScopeSpans, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeSpans(&dest[i], false)
}
}
for i := range src {
CopyScopeSpans(&newDest[i], &src[i])
}
return newDest
}
func CopyScopeSpansPtrSlice(dest, src []*ScopeSpans) []*ScopeSpans {
var newDest []*ScopeSpans
if cap(dest) < len(src) {
newDest = make([]*ScopeSpans, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeSpans()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteScopeSpans(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewScopeSpans()
}
}
for i := range src {
CopyScopeSpans(newDest[i], src[i])
}
return newDest
}
func (orig *ScopeSpans) Reset() {
*orig = ScopeSpans{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ScopeSpans) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
dest.WriteObjectField("scope")
orig.Scope.MarshalJSON(dest)
if len(orig.Spans) > 0 {
dest.WriteObjectField("spans")
dest.WriteArrayStart()
orig.Spans[0].MarshalJSON(dest)
for i := 1; i < len(orig.Spans); i++ {
dest.WriteMore()
orig.Spans[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.SchemaUrl != "" {
dest.WriteObjectField("schemaUrl")
dest.WriteString(orig.SchemaUrl)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ScopeSpans) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "scope":
orig.Scope.UnmarshalJSON(iter)
case "spans":
for iter.ReadArray() {
orig.Spans = append(orig.Spans, NewSpan())
orig.Spans[len(orig.Spans)-1].UnmarshalJSON(iter)
}
case "schemaUrl", "schema_url":
orig.SchemaUrl = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *ScopeSpans) SizeProto() int {
var n int
var l int
_ = l
l = orig.Scope.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
for i := range orig.Spans {
l = orig.Spans[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.SchemaUrl)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *ScopeSpans) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.Scope.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
for i := len(orig.Spans) - 1; i >= 0; i-- {
l = orig.Spans[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = len(orig.SchemaUrl)
if l > 0 {
pos -= l
copy(buf[pos:], orig.SchemaUrl)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *ScopeSpans) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Scope.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Spans = append(orig.Spans, NewSpan())
err = orig.Spans[len(orig.Spans)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.SchemaUrl = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestScopeSpans() *ScopeSpans {
orig := NewScopeSpans()
orig.Scope = *GenTestInstrumentationScope()
orig.Spans = []*Span{{}, GenTestSpan()}
orig.SchemaUrl = "test_schemaurl"
return orig
}
func GenTestScopeSpansPtrSlice() []*ScopeSpans {
orig := make([]*ScopeSpans, 5)
orig[0] = NewScopeSpans()
orig[1] = GenTestScopeSpans()
orig[2] = NewScopeSpans()
orig[3] = GenTestScopeSpans()
orig[4] = NewScopeSpans()
return orig
}
func GenTestScopeSpansSlice() []ScopeSpans {
orig := make([]ScopeSpans, 5)
orig[1] = *GenTestScopeSpans()
orig[3] = *GenTestScopeSpans()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Span represents a single operation within a trace.
// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
type Span struct {
TraceId TraceID
SpanId SpanID
TraceState string
ParentSpanId SpanID
Flags uint32
Name string
Kind SpanKind
StartTimeUnixNano uint64
EndTimeUnixNano uint64
Attributes []KeyValue
DroppedAttributesCount uint32
Events []*SpanEvent
DroppedEventsCount uint32
Links []*SpanLink
DroppedLinksCount uint32
Status Status
}
var (
protoPoolSpan = sync.Pool{
New: func() any {
return &Span{}
},
}
)
func NewSpan() *Span {
if !UseProtoPooling.IsEnabled() {
return &Span{}
}
return protoPoolSpan.Get().(*Span)
}
func DeleteSpan(orig *Span, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
DeleteSpanID(&orig.ParentSpanId, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.Events {
DeleteSpanEvent(orig.Events[i], true)
}
for i := range orig.Links {
DeleteSpanLink(orig.Links[i], true)
}
DeleteStatus(&orig.Status, false)
orig.Reset()
if nullable {
protoPoolSpan.Put(orig)
}
}
func CopySpan(dest, src *Span) *Span {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSpan()
}
CopyTraceID(&dest.TraceId, &src.TraceId)
CopySpanID(&dest.SpanId, &src.SpanId)
dest.TraceState = src.TraceState
CopySpanID(&dest.ParentSpanId, &src.ParentSpanId)
dest.Flags = src.Flags
dest.Name = src.Name
dest.Kind = src.Kind
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.EndTimeUnixNano = src.EndTimeUnixNano
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Events = CopySpanEventPtrSlice(dest.Events, src.Events)
dest.DroppedEventsCount = src.DroppedEventsCount
dest.Links = CopySpanLinkPtrSlice(dest.Links, src.Links)
dest.DroppedLinksCount = src.DroppedLinksCount
CopyStatus(&dest.Status, &src.Status)
return dest
}
func CopySpanSlice(dest, src []Span) []Span {
var newDest []Span
if cap(dest) < len(src) {
newDest = make([]Span, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpan(&dest[i], false)
}
}
for i := range src {
CopySpan(&newDest[i], &src[i])
}
return newDest
}
func CopySpanPtrSlice(dest, src []*Span) []*Span {
var newDest []*Span
if cap(dest) < len(src) {
newDest = make([]*Span, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpan(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpan()
}
}
for i := range src {
CopySpan(newDest[i], src[i])
}
return newDest
}
func (orig *Span) Reset() {
*orig = Span{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Span) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
orig.TraceId.MarshalJSON(dest)
}
if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
orig.SpanId.MarshalJSON(dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
if !orig.ParentSpanId.IsEmpty() {
dest.WriteObjectField("parentSpanId")
orig.ParentSpanId.MarshalJSON(dest)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if int32(orig.Kind) != 0 {
dest.WriteObjectField("kind")
dest.WriteInt32(int32(orig.Kind))
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.EndTimeUnixNano != uint64(0) {
dest.WriteObjectField("endTimeUnixNano")
dest.WriteUint64(orig.EndTimeUnixNano)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if len(orig.Events) > 0 {
dest.WriteObjectField("events")
dest.WriteArrayStart()
orig.Events[0].MarshalJSON(dest)
for i := 1; i < len(orig.Events); i++ {
dest.WriteMore()
orig.Events[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedEventsCount != uint32(0) {
dest.WriteObjectField("droppedEventsCount")
dest.WriteUint32(orig.DroppedEventsCount)
}
if len(orig.Links) > 0 {
dest.WriteObjectField("links")
dest.WriteArrayStart()
orig.Links[0].MarshalJSON(dest)
for i := 1; i < len(orig.Links); i++ {
dest.WriteMore()
orig.Links[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedLinksCount != uint32(0) {
dest.WriteObjectField("droppedLinksCount")
dest.WriteUint32(orig.DroppedLinksCount)
}
dest.WriteObjectField("status")
orig.Status.MarshalJSON(dest)
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Span) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
orig.SpanId.UnmarshalJSON(iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "parentSpanId", "parent_span_id":
orig.ParentSpanId.UnmarshalJSON(iter)
case "flags":
orig.Flags = iter.ReadUint32()
case "name":
orig.Name = iter.ReadString()
case "kind":
orig.Kind = SpanKind(iter.ReadEnumValue(SpanKind_value))
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "endTimeUnixNano", "end_time_unix_nano":
orig.EndTimeUnixNano = iter.ReadUint64()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "events":
for iter.ReadArray() {
orig.Events = append(orig.Events, NewSpanEvent())
orig.Events[len(orig.Events)-1].UnmarshalJSON(iter)
}
case "droppedEventsCount", "dropped_events_count":
orig.DroppedEventsCount = iter.ReadUint32()
case "links":
for iter.ReadArray() {
orig.Links = append(orig.Links, NewSpanLink())
orig.Links[len(orig.Links)-1].UnmarshalJSON(iter)
}
case "droppedLinksCount", "dropped_links_count":
orig.DroppedLinksCount = iter.ReadUint32()
case "status":
orig.Status.UnmarshalJSON(iter)
default:
iter.Skip()
}
}
}
func (orig *Span) SizeProto() int {
var n int
var l int
_ = l
l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.ParentSpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.Flags != 0 {
n += 6
}
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Kind != 0 {
n += 1 + proto.Sov(uint64(orig.Kind))
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.EndTimeUnixNano != 0 {
n += 9
}
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
for i := range orig.Events {
l = orig.Events[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedEventsCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedEventsCount))
}
for i := range orig.Links {
l = orig.Links[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedLinksCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedLinksCount))
}
l = orig.Status.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
return n
}
func (orig *Span) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
l = len(orig.TraceState)
if l > 0 {
pos -= l
copy(buf[pos:], orig.TraceState)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
l = orig.ParentSpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x1
pos--
buf[pos] = 0x85
}
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x2a
}
if orig.Kind != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Kind))
pos--
buf[pos] = 0x30
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x39
}
if orig.EndTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.EndTimeUnixNano))
pos--
buf[pos] = 0x41
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x4a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x50
}
for i := len(orig.Events) - 1; i >= 0; i-- {
l = orig.Events[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x5a
}
if orig.DroppedEventsCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedEventsCount))
pos--
buf[pos] = 0x60
}
for i := len(orig.Links) - 1; i >= 0; i-- {
l = orig.Links[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x6a
}
if orig.DroppedLinksCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedLinksCount))
pos--
buf[pos] = 0x70
}
l = orig.Status.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x7a
return len(buf) - pos
}
func (orig *Span) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.TraceState = string(buf[startPos:pos])
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.ParentSpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 16:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
case 5:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 6:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Kind = SpanKind(num)
case 7:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 8:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.EndTimeUnixNano = uint64(num)
case 9:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 10:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 11:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Events = append(orig.Events, NewSpanEvent())
err = orig.Events[len(orig.Events)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 12:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedEventsCount = uint32(num)
case 13:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Links = append(orig.Links, NewSpanLink())
err = orig.Links[len(orig.Links)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 14:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedLinksCount = uint32(num)
case 15:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.Status.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSpan() *Span {
orig := NewSpan()
orig.TraceId = *GenTestTraceID()
orig.SpanId = *GenTestSpanID()
orig.TraceState = "test_tracestate"
orig.ParentSpanId = *GenTestSpanID()
orig.Flags = uint32(13)
orig.Name = "test_name"
orig.Kind = SpanKind(13)
orig.StartTimeUnixNano = uint64(13)
orig.EndTimeUnixNano = uint64(13)
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
orig.Events = []*SpanEvent{{}, GenTestSpanEvent()}
orig.DroppedEventsCount = uint32(13)
orig.Links = []*SpanLink{{}, GenTestSpanLink()}
orig.DroppedLinksCount = uint32(13)
orig.Status = *GenTestStatus()
return orig
}
func GenTestSpanPtrSlice() []*Span {
orig := make([]*Span, 5)
orig[0] = NewSpan()
orig[1] = GenTestSpan()
orig[2] = NewSpan()
orig[3] = GenTestSpan()
orig[4] = NewSpan()
return orig
}
func GenTestSpanSlice() []Span {
orig := make([]Span, 5)
orig[1] = *GenTestSpan()
orig[3] = *GenTestSpan()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type SpanContext struct {
TraceID TraceID
SpanID SpanID
TraceFlags uint32
TraceState string
Remote bool
}
var (
protoPoolSpanContext = sync.Pool{
New: func() any {
return &SpanContext{}
},
}
)
func NewSpanContext() *SpanContext {
if !UseProtoPooling.IsEnabled() {
return &SpanContext{}
}
return protoPoolSpanContext.Get().(*SpanContext)
}
func DeleteSpanContext(orig *SpanContext, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceID, false)
DeleteSpanID(&orig.SpanID, false)
orig.Reset()
if nullable {
protoPoolSpanContext.Put(orig)
}
}
func CopySpanContext(dest, src *SpanContext) *SpanContext {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSpanContext()
}
CopyTraceID(&dest.TraceID, &src.TraceID)
CopySpanID(&dest.SpanID, &src.SpanID)
dest.TraceFlags = src.TraceFlags
dest.TraceState = src.TraceState
dest.Remote = src.Remote
return dest
}
func CopySpanContextSlice(dest, src []SpanContext) []SpanContext {
var newDest []SpanContext
if cap(dest) < len(src) {
newDest = make([]SpanContext, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanContext(&dest[i], false)
}
}
for i := range src {
CopySpanContext(&newDest[i], &src[i])
}
return newDest
}
func CopySpanContextPtrSlice(dest, src []*SpanContext) []*SpanContext {
var newDest []*SpanContext
if cap(dest) < len(src) {
newDest = make([]*SpanContext, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanContext()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanContext(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanContext()
}
}
for i := range src {
CopySpanContext(newDest[i], src[i])
}
return newDest
}
func (orig *SpanContext) Reset() {
*orig = SpanContext{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *SpanContext) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if !orig.TraceID.IsEmpty() {
dest.WriteObjectField("traceID")
orig.TraceID.MarshalJSON(dest)
}
if !orig.SpanID.IsEmpty() {
dest.WriteObjectField("spanID")
orig.SpanID.MarshalJSON(dest)
}
if orig.TraceFlags != uint32(0) {
dest.WriteObjectField("traceFlags")
dest.WriteUint32(orig.TraceFlags)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
if orig.Remote != false {
dest.WriteObjectField("remote")
dest.WriteBool(orig.Remote)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *SpanContext) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceID", "trace_id":
orig.TraceID.UnmarshalJSON(iter)
case "spanID", "span_id":
orig.SpanID.UnmarshalJSON(iter)
case "traceFlags", "trace_flags":
orig.TraceFlags = iter.ReadUint32()
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "remote":
orig.Remote = iter.ReadBool()
default:
iter.Skip()
}
}
}
func (orig *SpanContext) SizeProto() int {
var n int
var l int
_ = l
l = orig.TraceID.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanID.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.TraceFlags != 0 {
n += 5
}
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Remote {
n += 2
}
return n
}
func (orig *SpanContext) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.TraceID.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = orig.SpanID.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
if orig.TraceFlags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.TraceFlags))
pos--
buf[pos] = 0x1d
}
l = len(orig.TraceState)
if l > 0 {
pos -= l
copy(buf[pos:], orig.TraceState)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
if orig.Remote {
pos--
if orig.Remote {
buf[pos] = 1
} else {
buf[pos] = 0
}
pos--
buf[pos] = 0x28
}
return len(buf) - pos
}
func (orig *SpanContext) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceID.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanID.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field TraceFlags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.TraceFlags = uint32(num)
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.TraceState = string(buf[startPos:pos])
case 5:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Remote = num != 0
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSpanContext() *SpanContext {
orig := NewSpanContext()
orig.TraceID = *GenTestTraceID()
orig.SpanID = *GenTestSpanID()
orig.TraceFlags = uint32(13)
orig.TraceState = "test_tracestate"
orig.Remote = true
return orig
}
func GenTestSpanContextPtrSlice() []*SpanContext {
orig := make([]*SpanContext, 5)
orig[0] = NewSpanContext()
orig[1] = GenTestSpanContext()
orig[2] = NewSpanContext()
orig[3] = GenTestSpanContext()
orig[4] = NewSpanContext()
return orig
}
func GenTestSpanContextSlice() []SpanContext {
orig := make([]SpanContext, 5)
orig[1] = *GenTestSpanContext()
orig[3] = *GenTestSpanContext()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs. See OTLP for event definition.
type SpanEvent struct {
TimeUnixNano uint64
Name string
Attributes []KeyValue
DroppedAttributesCount uint32
}
var (
protoPoolSpanEvent = sync.Pool{
New: func() any {
return &SpanEvent{}
},
}
)
func NewSpanEvent() *SpanEvent {
if !UseProtoPooling.IsEnabled() {
return &SpanEvent{}
}
return protoPoolSpanEvent.Get().(*SpanEvent)
}
func DeleteSpanEvent(orig *SpanEvent, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolSpanEvent.Put(orig)
}
}
func CopySpanEvent(dest, src *SpanEvent) *SpanEvent {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSpanEvent()
}
dest.TimeUnixNano = src.TimeUnixNano
dest.Name = src.Name
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
return dest
}
func CopySpanEventSlice(dest, src []SpanEvent) []SpanEvent {
var newDest []SpanEvent
if cap(dest) < len(src) {
newDest = make([]SpanEvent, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanEvent(&dest[i], false)
}
}
for i := range src {
CopySpanEvent(&newDest[i], &src[i])
}
return newDest
}
func CopySpanEventPtrSlice(dest, src []*SpanEvent) []*SpanEvent {
var newDest []*SpanEvent
if cap(dest) < len(src) {
newDest = make([]*SpanEvent, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanEvent()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanEvent(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanEvent()
}
}
for i := range src {
CopySpanEvent(newDest[i], src[i])
}
return newDest
}
func (orig *SpanEvent) Reset() {
*orig = SpanEvent{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *SpanEvent) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *SpanEvent) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "name":
orig.Name = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *SpanEvent) SizeProto() int {
var n int
var l int
_ = l
if orig.TimeUnixNano != 0 {
n += 9
}
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
return n
}
func (orig *SpanEvent) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x9
}
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x20
}
return len(buf) - pos
}
func (orig *SpanEvent) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 4:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSpanEvent() *SpanEvent {
orig := NewSpanEvent()
orig.TimeUnixNano = uint64(13)
orig.Name = "test_name"
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
return orig
}
func GenTestSpanEventPtrSlice() []*SpanEvent {
orig := make([]*SpanEvent, 5)
orig[0] = NewSpanEvent()
orig[1] = GenTestSpanEvent()
orig[2] = NewSpanEvent()
orig[3] = GenTestSpanEvent()
orig[4] = NewSpanEvent()
return orig
}
func GenTestSpanEventSlice() []SpanEvent {
orig := make([]SpanEvent, 5)
orig[1] = *GenTestSpanEvent()
orig[3] = *GenTestSpanEvent()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// SpanLink is a pointer from the current span to another span in the same trace or in a
// different trace.
// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
type SpanLink struct {
TraceId TraceID
SpanId SpanID
TraceState string
Attributes []KeyValue
DroppedAttributesCount uint32
Flags uint32
}
var (
protoPoolSpanLink = sync.Pool{
New: func() any {
return &SpanLink{}
},
}
)
func NewSpanLink() *SpanLink {
if !UseProtoPooling.IsEnabled() {
return &SpanLink{}
}
return protoPoolSpanLink.Get().(*SpanLink)
}
func DeleteSpanLink(orig *SpanLink, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteTraceID(&orig.TraceId, false)
DeleteSpanID(&orig.SpanId, false)
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
orig.Reset()
if nullable {
protoPoolSpanLink.Put(orig)
}
}
func CopySpanLink(dest, src *SpanLink) *SpanLink {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSpanLink()
}
CopyTraceID(&dest.TraceId, &src.TraceId)
CopySpanID(&dest.SpanId, &src.SpanId)
dest.TraceState = src.TraceState
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.DroppedAttributesCount = src.DroppedAttributesCount
dest.Flags = src.Flags
return dest
}
func CopySpanLinkSlice(dest, src []SpanLink) []SpanLink {
var newDest []SpanLink
if cap(dest) < len(src) {
newDest = make([]SpanLink, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanLink(&dest[i], false)
}
}
for i := range src {
CopySpanLink(&newDest[i], &src[i])
}
return newDest
}
func CopySpanLinkPtrSlice(dest, src []*SpanLink) []*SpanLink {
var newDest []*SpanLink
if cap(dest) < len(src) {
newDest = make([]*SpanLink, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanLink()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSpanLink(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSpanLink()
}
}
for i := range src {
CopySpanLink(newDest[i], src[i])
}
return newDest
}
func (orig *SpanLink) Reset() {
*orig = SpanLink{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *SpanLink) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if !orig.TraceId.IsEmpty() {
dest.WriteObjectField("traceId")
orig.TraceId.MarshalJSON(dest)
}
if !orig.SpanId.IsEmpty() {
dest.WriteObjectField("spanId")
orig.SpanId.MarshalJSON(dest)
}
if orig.TraceState != "" {
dest.WriteObjectField("traceState")
dest.WriteString(orig.TraceState)
}
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.DroppedAttributesCount != uint32(0) {
dest.WriteObjectField("droppedAttributesCount")
dest.WriteUint32(orig.DroppedAttributesCount)
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *SpanLink) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "traceId", "trace_id":
orig.TraceId.UnmarshalJSON(iter)
case "spanId", "span_id":
orig.SpanId.UnmarshalJSON(iter)
case "traceState", "trace_state":
orig.TraceState = iter.ReadString()
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "droppedAttributesCount", "dropped_attributes_count":
orig.DroppedAttributesCount = iter.ReadUint32()
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *SpanLink) SizeProto() int {
var n int
var l int
_ = l
l = orig.TraceId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = orig.SpanId.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
l = len(orig.TraceState)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.DroppedAttributesCount != 0 {
n += 1 + proto.Sov(uint64(orig.DroppedAttributesCount))
}
if orig.Flags != 0 {
n += 5
}
return n
}
func (orig *SpanLink) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = orig.TraceId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
l = orig.SpanId.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
l = len(orig.TraceState)
if l > 0 {
pos -= l
copy(buf[pos:], orig.TraceState)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x22
}
if orig.DroppedAttributesCount != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.DroppedAttributesCount))
pos--
buf[pos] = 0x28
}
if orig.Flags != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.Flags))
pos--
buf[pos] = 0x35
}
return len(buf) - pos
}
func (orig *SpanLink) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TraceId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.SpanId.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.TraceState = string(buf[startPos:pos])
case 4:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 5:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.DroppedAttributesCount = uint32(num)
case 6:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSpanLink() *SpanLink {
orig := NewSpanLink()
orig.TraceId = *GenTestTraceID()
orig.SpanId = *GenTestSpanID()
orig.TraceState = "test_tracestate"
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.DroppedAttributesCount = uint32(13)
orig.Flags = uint32(13)
return orig
}
func GenTestSpanLinkPtrSlice() []*SpanLink {
orig := make([]*SpanLink, 5)
orig[0] = NewSpanLink()
orig[1] = GenTestSpanLink()
orig[2] = NewSpanLink()
orig[3] = GenTestSpanLink()
orig[4] = NewSpanLink()
return orig
}
func GenTestSpanLinkSlice() []SpanLink {
orig := make([]SpanLink, 5)
orig[1] = *GenTestSpanLink()
orig[3] = *GenTestSpanLink()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Stack represents a stack trace as a list of locations.
type Stack struct {
LocationIndices []int32
}
var (
protoPoolStack = sync.Pool{
New: func() any {
return &Stack{}
},
}
)
func NewStack() *Stack {
if !UseProtoPooling.IsEnabled() {
return &Stack{}
}
return protoPoolStack.Get().(*Stack)
}
func DeleteStack(orig *Stack, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolStack.Put(orig)
}
}
func CopyStack(dest, src *Stack) *Stack {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewStack()
}
dest.LocationIndices = append(dest.LocationIndices[:0], src.LocationIndices...)
return dest
}
func CopyStackSlice(dest, src []Stack) []Stack {
var newDest []Stack
if cap(dest) < len(src) {
newDest = make([]Stack, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteStack(&dest[i], false)
}
}
for i := range src {
CopyStack(&newDest[i], &src[i])
}
return newDest
}
func CopyStackPtrSlice(dest, src []*Stack) []*Stack {
var newDest []*Stack
if cap(dest) < len(src) {
newDest = make([]*Stack, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStack()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteStack(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStack()
}
}
for i := range src {
CopyStack(newDest[i], src[i])
}
return newDest
}
func (orig *Stack) Reset() {
*orig = Stack{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Stack) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.LocationIndices) > 0 {
dest.WriteObjectField("locationIndices")
dest.WriteArrayStart()
dest.WriteInt32(orig.LocationIndices[0])
for i := 1; i < len(orig.LocationIndices); i++ {
dest.WriteMore()
dest.WriteInt32(orig.LocationIndices[i])
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Stack) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "locationIndices", "location_indices":
for iter.ReadArray() {
orig.LocationIndices = append(orig.LocationIndices, iter.ReadInt32())
}
default:
iter.Skip()
}
}
}
func (orig *Stack) SizeProto() int {
var n int
var l int
_ = l
if len(orig.LocationIndices) > 0 {
l = 0
for _, e := range orig.LocationIndices {
l += proto.Sov(uint64(e))
}
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Stack) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.LocationIndices)
if l > 0 {
endPos := pos
for i := l - 1; i >= 0; i-- {
pos = proto.EncodeVarint(buf, pos, uint64(orig.LocationIndices[i]))
}
pos = proto.EncodeVarint(buf, pos, uint64(endPos-pos))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *Stack) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
switch wireType {
case proto.WireTypeLen:
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
var num uint64
for startPos < pos {
num, startPos, err = proto.ConsumeVarint(buf[:pos], startPos)
if err != nil {
return err
}
orig.LocationIndices = append(orig.LocationIndices, int32(num))
}
if startPos != pos {
return fmt.Errorf("proto: invalid field len = %d for field LocationIndices", pos-startPos)
}
case proto.WireTypeVarint:
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.LocationIndices = append(orig.LocationIndices, int32(num))
default:
return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType)
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestStack() *Stack {
orig := NewStack()
orig.LocationIndices = []int32{int32(0), int32(13)}
return orig
}
func GenTestStackPtrSlice() []*Stack {
orig := make([]*Stack, 5)
orig[0] = NewStack()
orig[1] = GenTestStack()
orig[2] = NewStack()
orig[3] = GenTestStack()
orig[4] = NewStack()
return orig
}
func GenTestStackSlice() []Stack {
orig := make([]Stack, 5)
orig[1] = *GenTestStack()
orig[3] = *GenTestStack()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Status is an optional final status for this span. Semantically, when Status was not
// set, that means the span ended without errors and to assume Status.Ok (code = 0).
type Status struct {
Message string
Code StatusCode
}
var (
protoPoolStatus = sync.Pool{
New: func() any {
return &Status{}
},
}
)
func NewStatus() *Status {
if !UseProtoPooling.IsEnabled() {
return &Status{}
}
return protoPoolStatus.Get().(*Status)
}
func DeleteStatus(orig *Status, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolStatus.Put(orig)
}
}
func CopyStatus(dest, src *Status) *Status {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewStatus()
}
dest.Message = src.Message
dest.Code = src.Code
return dest
}
func CopyStatusSlice(dest, src []Status) []Status {
var newDest []Status
if cap(dest) < len(src) {
newDest = make([]Status, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteStatus(&dest[i], false)
}
}
for i := range src {
CopyStatus(&newDest[i], &src[i])
}
return newDest
}
func CopyStatusPtrSlice(dest, src []*Status) []*Status {
var newDest []*Status
if cap(dest) < len(src) {
newDest = make([]*Status, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStatus()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteStatus(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewStatus()
}
}
for i := range src {
CopyStatus(newDest[i], src[i])
}
return newDest
}
func (orig *Status) Reset() {
*orig = Status{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Status) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Message != "" {
dest.WriteObjectField("message")
dest.WriteString(orig.Message)
}
if int32(orig.Code) != 0 {
dest.WriteObjectField("code")
dest.WriteInt32(int32(orig.Code))
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Status) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "message":
orig.Message = iter.ReadString()
case "code":
orig.Code = StatusCode(iter.ReadEnumValue(StatusCode_value))
default:
iter.Skip()
}
}
}
func (orig *Status) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.Message)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Code != 0 {
n += 1 + proto.Sov(uint64(orig.Code))
}
return n
}
func (orig *Status) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Message)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Message)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
if orig.Code != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Code))
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func (orig *Status) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Message = string(buf[startPos:pos])
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Code = StatusCode(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestStatus() *Status {
orig := NewStatus()
orig.Message = "test_message"
orig.Code = StatusCode(13)
return orig
}
func GenTestStatusPtrSlice() []*Status {
orig := make([]*Status, 5)
orig[0] = NewStatus()
orig[1] = GenTestStatus()
orig[2] = NewStatus()
orig[3] = GenTestStatus()
orig[4] = NewStatus()
return orig
}
func GenTestStatusSlice() []Status {
orig := make([]Status, 5)
orig[1] = *GenTestStatus()
orig[3] = *GenTestStatus()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
type Sum struct {
DataPoints []*NumberDataPoint
AggregationTemporality AggregationTemporality
IsMonotonic bool
}
var (
protoPoolSum = sync.Pool{
New: func() any {
return &Sum{}
},
}
)
func NewSum() *Sum {
if !UseProtoPooling.IsEnabled() {
return &Sum{}
}
return protoPoolSum.Get().(*Sum)
}
func DeleteSum(orig *Sum, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteNumberDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolSum.Put(orig)
}
}
func CopySum(dest, src *Sum) *Sum {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSum()
}
dest.DataPoints = CopyNumberDataPointPtrSlice(dest.DataPoints, src.DataPoints)
dest.AggregationTemporality = src.AggregationTemporality
dest.IsMonotonic = src.IsMonotonic
return dest
}
func CopySumSlice(dest, src []Sum) []Sum {
var newDest []Sum
if cap(dest) < len(src) {
newDest = make([]Sum, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSum(&dest[i], false)
}
}
for i := range src {
CopySum(&newDest[i], &src[i])
}
return newDest
}
func CopySumPtrSlice(dest, src []*Sum) []*Sum {
var newDest []*Sum
if cap(dest) < len(src) {
newDest = make([]*Sum, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSum()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSum(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSum()
}
}
for i := range src {
CopySum(newDest[i], src[i])
}
return newDest
}
func (orig *Sum) Reset() {
*orig = Sum{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Sum) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if int32(orig.AggregationTemporality) != 0 {
dest.WriteObjectField("aggregationTemporality")
dest.WriteInt32(int32(orig.AggregationTemporality))
}
if orig.IsMonotonic != false {
dest.WriteObjectField("isMonotonic")
dest.WriteBool(orig.IsMonotonic)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Sum) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
case "aggregationTemporality", "aggregation_temporality":
orig.AggregationTemporality = AggregationTemporality(iter.ReadEnumValue(AggregationTemporality_value))
case "isMonotonic", "is_monotonic":
orig.IsMonotonic = iter.ReadBool()
default:
iter.Skip()
}
}
}
func (orig *Sum) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.AggregationTemporality != 0 {
n += 1 + proto.Sov(uint64(orig.AggregationTemporality))
}
if orig.IsMonotonic {
n += 2
}
return n
}
func (orig *Sum) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.AggregationTemporality != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.AggregationTemporality))
pos--
buf[pos] = 0x10
}
if orig.IsMonotonic {
pos--
if orig.IsMonotonic {
buf[pos] = 1
} else {
buf[pos] = 0
}
pos--
buf[pos] = 0x18
}
return len(buf) - pos
}
func (orig *Sum) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewNumberDataPoint())
err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.AggregationTemporality = AggregationTemporality(num)
case 3:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.IsMonotonic = num != 0
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSum() *Sum {
orig := NewSum()
orig.DataPoints = []*NumberDataPoint{{}, GenTestNumberDataPoint()}
orig.AggregationTemporality = AggregationTemporality(13)
orig.IsMonotonic = true
return orig
}
func GenTestSumPtrSlice() []*Sum {
orig := make([]*Sum, 5)
orig[0] = NewSum()
orig[1] = GenTestSum()
orig[2] = NewSum()
orig[3] = GenTestSum()
orig[4] = NewSum()
return orig
}
func GenTestSumSlice() []Sum {
orig := make([]Sum, 5)
orig[1] = *GenTestSum()
orig[3] = *GenTestSum()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
type Summary struct {
DataPoints []*SummaryDataPoint
}
var (
protoPoolSummary = sync.Pool{
New: func() any {
return &Summary{}
},
}
)
func NewSummary() *Summary {
if !UseProtoPooling.IsEnabled() {
return &Summary{}
}
return protoPoolSummary.Get().(*Summary)
}
func DeleteSummary(orig *Summary, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.DataPoints {
DeleteSummaryDataPoint(orig.DataPoints[i], true)
}
orig.Reset()
if nullable {
protoPoolSummary.Put(orig)
}
}
func CopySummary(dest, src *Summary) *Summary {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSummary()
}
dest.DataPoints = CopySummaryDataPointPtrSlice(dest.DataPoints, src.DataPoints)
return dest
}
func CopySummarySlice(dest, src []Summary) []Summary {
var newDest []Summary
if cap(dest) < len(src) {
newDest = make([]Summary, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummary(&dest[i], false)
}
}
for i := range src {
CopySummary(&newDest[i], &src[i])
}
return newDest
}
func CopySummaryPtrSlice(dest, src []*Summary) []*Summary {
var newDest []*Summary
if cap(dest) < len(src) {
newDest = make([]*Summary, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummary()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummary(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummary()
}
}
for i := range src {
CopySummary(newDest[i], src[i])
}
return newDest
}
func (orig *Summary) Reset() {
*orig = Summary{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *Summary) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.DataPoints) > 0 {
dest.WriteObjectField("dataPoints")
dest.WriteArrayStart()
orig.DataPoints[0].MarshalJSON(dest)
for i := 1; i < len(orig.DataPoints); i++ {
dest.WriteMore()
orig.DataPoints[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *Summary) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "dataPoints", "data_points":
for iter.ReadArray() {
orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
orig.DataPoints[len(orig.DataPoints)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *Summary) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.DataPoints {
l = orig.DataPoints[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *Summary) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.DataPoints) - 1; i >= 0; i-- {
l = orig.DataPoints[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *Summary) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.DataPoints = append(orig.DataPoints, NewSummaryDataPoint())
err = orig.DataPoints[len(orig.DataPoints)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSummary() *Summary {
orig := NewSummary()
orig.DataPoints = []*SummaryDataPoint{{}, GenTestSummaryDataPoint()}
return orig
}
func GenTestSummaryPtrSlice() []*Summary {
orig := make([]*Summary, 5)
orig[0] = NewSummary()
orig[1] = GenTestSummary()
orig[2] = NewSummary()
orig[3] = GenTestSummary()
orig[4] = NewSummary()
return orig
}
func GenTestSummarySlice() []Summary {
orig := make([]Summary, 5)
orig[1] = *GenTestSummary()
orig[3] = *GenTestSummary()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.
type SummaryDataPoint struct {
Attributes []KeyValue
StartTimeUnixNano uint64
TimeUnixNano uint64
Count uint64
Sum float64
QuantileValues []*SummaryDataPointValueAtQuantile
Flags uint32
}
var (
protoPoolSummaryDataPoint = sync.Pool{
New: func() any {
return &SummaryDataPoint{}
},
}
)
func NewSummaryDataPoint() *SummaryDataPoint {
if !UseProtoPooling.IsEnabled() {
return &SummaryDataPoint{}
}
return protoPoolSummaryDataPoint.Get().(*SummaryDataPoint)
}
func DeleteSummaryDataPoint(orig *SummaryDataPoint, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.Attributes {
DeleteKeyValue(&orig.Attributes[i], false)
}
for i := range orig.QuantileValues {
DeleteSummaryDataPointValueAtQuantile(orig.QuantileValues[i], true)
}
orig.Reset()
if nullable {
protoPoolSummaryDataPoint.Put(orig)
}
}
func CopySummaryDataPoint(dest, src *SummaryDataPoint) *SummaryDataPoint {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSummaryDataPoint()
}
dest.Attributes = CopyKeyValueSlice(dest.Attributes, src.Attributes)
dest.StartTimeUnixNano = src.StartTimeUnixNano
dest.TimeUnixNano = src.TimeUnixNano
dest.Count = src.Count
dest.Sum = src.Sum
dest.QuantileValues = CopySummaryDataPointValueAtQuantilePtrSlice(dest.QuantileValues, src.QuantileValues)
dest.Flags = src.Flags
return dest
}
func CopySummaryDataPointSlice(dest, src []SummaryDataPoint) []SummaryDataPoint {
var newDest []SummaryDataPoint
if cap(dest) < len(src) {
newDest = make([]SummaryDataPoint, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPoint(&dest[i], false)
}
}
for i := range src {
CopySummaryDataPoint(&newDest[i], &src[i])
}
return newDest
}
func CopySummaryDataPointPtrSlice(dest, src []*SummaryDataPoint) []*SummaryDataPoint {
var newDest []*SummaryDataPoint
if cap(dest) < len(src) {
newDest = make([]*SummaryDataPoint, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPoint(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPoint()
}
}
for i := range src {
CopySummaryDataPoint(newDest[i], src[i])
}
return newDest
}
func (orig *SummaryDataPoint) Reset() {
*orig = SummaryDataPoint{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *SummaryDataPoint) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.Attributes) > 0 {
dest.WriteObjectField("attributes")
dest.WriteArrayStart()
orig.Attributes[0].MarshalJSON(dest)
for i := 1; i < len(orig.Attributes); i++ {
dest.WriteMore()
orig.Attributes[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.StartTimeUnixNano != uint64(0) {
dest.WriteObjectField("startTimeUnixNano")
dest.WriteUint64(orig.StartTimeUnixNano)
}
if orig.TimeUnixNano != uint64(0) {
dest.WriteObjectField("timeUnixNano")
dest.WriteUint64(orig.TimeUnixNano)
}
if orig.Count != uint64(0) {
dest.WriteObjectField("count")
dest.WriteUint64(orig.Count)
}
if orig.Sum != float64(0) {
dest.WriteObjectField("sum")
dest.WriteFloat64(orig.Sum)
}
if len(orig.QuantileValues) > 0 {
dest.WriteObjectField("quantileValues")
dest.WriteArrayStart()
orig.QuantileValues[0].MarshalJSON(dest)
for i := 1; i < len(orig.QuantileValues); i++ {
dest.WriteMore()
orig.QuantileValues[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
if orig.Flags != uint32(0) {
dest.WriteObjectField("flags")
dest.WriteUint32(orig.Flags)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *SummaryDataPoint) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "attributes":
for iter.ReadArray() {
orig.Attributes = append(orig.Attributes, KeyValue{})
orig.Attributes[len(orig.Attributes)-1].UnmarshalJSON(iter)
}
case "startTimeUnixNano", "start_time_unix_nano":
orig.StartTimeUnixNano = iter.ReadUint64()
case "timeUnixNano", "time_unix_nano":
orig.TimeUnixNano = iter.ReadUint64()
case "count":
orig.Count = iter.ReadUint64()
case "sum":
orig.Sum = iter.ReadFloat64()
case "quantileValues", "quantile_values":
for iter.ReadArray() {
orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalJSON(iter)
}
case "flags":
orig.Flags = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *SummaryDataPoint) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.Attributes {
l = orig.Attributes[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.StartTimeUnixNano != 0 {
n += 9
}
if orig.TimeUnixNano != 0 {
n += 9
}
if orig.Count != 0 {
n += 9
}
if orig.Sum != 0 {
n += 9
}
for i := range orig.QuantileValues {
l = orig.QuantileValues[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Flags != 0 {
n += 1 + proto.Sov(uint64(orig.Flags))
}
return n
}
func (orig *SummaryDataPoint) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.Attributes) - 1; i >= 0; i-- {
l = orig.Attributes[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x3a
}
if orig.StartTimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.StartTimeUnixNano))
pos--
buf[pos] = 0x11
}
if orig.TimeUnixNano != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.TimeUnixNano))
pos--
buf[pos] = 0x19
}
if orig.Count != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], uint64(orig.Count))
pos--
buf[pos] = 0x21
}
if orig.Sum != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Sum))
pos--
buf[pos] = 0x29
}
for i := len(orig.QuantileValues) - 1; i >= 0; i-- {
l = orig.QuantileValues[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x32
}
if orig.Flags != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Flags))
pos--
buf[pos] = 0x40
}
return len(buf) - pos
}
func (orig *SummaryDataPoint) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 7:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Attributes = append(orig.Attributes, KeyValue{})
err = orig.Attributes[len(orig.Attributes)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.StartTimeUnixNano = uint64(num)
case 3:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.TimeUnixNano = uint64(num)
case 4:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Count = uint64(num)
case 5:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Sum = math.Float64frombits(num)
case 6:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.QuantileValues = append(orig.QuantileValues, NewSummaryDataPointValueAtQuantile())
err = orig.QuantileValues[len(orig.QuantileValues)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 8:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Flags = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSummaryDataPoint() *SummaryDataPoint {
orig := NewSummaryDataPoint()
orig.Attributes = []KeyValue{{}, *GenTestKeyValue()}
orig.StartTimeUnixNano = uint64(13)
orig.TimeUnixNano = uint64(13)
orig.Count = uint64(13)
orig.Sum = float64(3.1415926)
orig.QuantileValues = []*SummaryDataPointValueAtQuantile{{}, GenTestSummaryDataPointValueAtQuantile()}
orig.Flags = uint32(13)
return orig
}
func GenTestSummaryDataPointPtrSlice() []*SummaryDataPoint {
orig := make([]*SummaryDataPoint, 5)
orig[0] = NewSummaryDataPoint()
orig[1] = GenTestSummaryDataPoint()
orig[2] = NewSummaryDataPoint()
orig[3] = GenTestSummaryDataPoint()
orig[4] = NewSummaryDataPoint()
return orig
}
func GenTestSummaryDataPointSlice() []SummaryDataPoint {
orig := make([]SummaryDataPoint, 5)
orig[1] = *GenTestSummaryDataPoint()
orig[3] = *GenTestSummaryDataPoint()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"math"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
type SummaryDataPointValueAtQuantile struct {
Quantile float64
Value float64
}
var (
protoPoolSummaryDataPointValueAtQuantile = sync.Pool{
New: func() any {
return &SummaryDataPointValueAtQuantile{}
},
}
)
func NewSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
if !UseProtoPooling.IsEnabled() {
return &SummaryDataPointValueAtQuantile{}
}
return protoPoolSummaryDataPointValueAtQuantile.Get().(*SummaryDataPointValueAtQuantile)
}
func DeleteSummaryDataPointValueAtQuantile(orig *SummaryDataPointValueAtQuantile, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolSummaryDataPointValueAtQuantile.Put(orig)
}
}
func CopySummaryDataPointValueAtQuantile(dest, src *SummaryDataPointValueAtQuantile) *SummaryDataPointValueAtQuantile {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewSummaryDataPointValueAtQuantile()
}
dest.Quantile = src.Quantile
dest.Value = src.Value
return dest
}
func CopySummaryDataPointValueAtQuantileSlice(dest, src []SummaryDataPointValueAtQuantile) []SummaryDataPointValueAtQuantile {
var newDest []SummaryDataPointValueAtQuantile
if cap(dest) < len(src) {
newDest = make([]SummaryDataPointValueAtQuantile, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPointValueAtQuantile(&dest[i], false)
}
}
for i := range src {
CopySummaryDataPointValueAtQuantile(&newDest[i], &src[i])
}
return newDest
}
func CopySummaryDataPointValueAtQuantilePtrSlice(dest, src []*SummaryDataPointValueAtQuantile) []*SummaryDataPointValueAtQuantile {
var newDest []*SummaryDataPointValueAtQuantile
if cap(dest) < len(src) {
newDest = make([]*SummaryDataPointValueAtQuantile, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPointValueAtQuantile()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteSummaryDataPointValueAtQuantile(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewSummaryDataPointValueAtQuantile()
}
}
for i := range src {
CopySummaryDataPointValueAtQuantile(newDest[i], src[i])
}
return newDest
}
func (orig *SummaryDataPointValueAtQuantile) Reset() {
*orig = SummaryDataPointValueAtQuantile{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *SummaryDataPointValueAtQuantile) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Quantile != float64(0) {
dest.WriteObjectField("quantile")
dest.WriteFloat64(orig.Quantile)
}
if orig.Value != float64(0) {
dest.WriteObjectField("value")
dest.WriteFloat64(orig.Value)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *SummaryDataPointValueAtQuantile) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "quantile":
orig.Quantile = iter.ReadFloat64()
case "value":
orig.Value = iter.ReadFloat64()
default:
iter.Skip()
}
}
}
func (orig *SummaryDataPointValueAtQuantile) SizeProto() int {
var n int
var l int
_ = l
if orig.Quantile != 0 {
n += 9
}
if orig.Value != 0 {
n += 9
}
return n
}
func (orig *SummaryDataPointValueAtQuantile) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.Quantile != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Quantile))
pos--
buf[pos] = 0x9
}
if orig.Value != 0 {
pos -= 8
binary.LittleEndian.PutUint64(buf[pos:], math.Float64bits(orig.Value))
pos--
buf[pos] = 0x11
}
return len(buf) - pos
}
func (orig *SummaryDataPointValueAtQuantile) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Quantile = math.Float64frombits(num)
case 2:
if wireType != proto.WireTypeI64 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var num uint64
num, pos, err = proto.ConsumeI64(buf, pos)
if err != nil {
return err
}
orig.Value = math.Float64frombits(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestSummaryDataPointValueAtQuantile() *SummaryDataPointValueAtQuantile {
orig := NewSummaryDataPointValueAtQuantile()
orig.Quantile = float64(3.1415926)
orig.Value = float64(3.1415926)
return orig
}
func GenTestSummaryDataPointValueAtQuantilePtrSlice() []*SummaryDataPointValueAtQuantile {
orig := make([]*SummaryDataPointValueAtQuantile, 5)
orig[0] = NewSummaryDataPointValueAtQuantile()
orig[1] = GenTestSummaryDataPointValueAtQuantile()
orig[2] = NewSummaryDataPointValueAtQuantile()
orig[3] = GenTestSummaryDataPointValueAtQuantile()
orig[4] = NewSummaryDataPointValueAtQuantile()
return orig
}
func GenTestSummaryDataPointValueAtQuantileSlice() []SummaryDataPointValueAtQuantile {
orig := make([]SummaryDataPointValueAtQuantile, 5)
orig[1] = *GenTestSummaryDataPointValueAtQuantile()
orig[3] = *GenTestSummaryDataPointValueAtQuantile()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type TCPAddr struct {
IP []byte
Port int64
Zone string
}
var (
protoPoolTCPAddr = sync.Pool{
New: func() any {
return &TCPAddr{}
},
}
)
func NewTCPAddr() *TCPAddr {
if !UseProtoPooling.IsEnabled() {
return &TCPAddr{}
}
return protoPoolTCPAddr.Get().(*TCPAddr)
}
func DeleteTCPAddr(orig *TCPAddr, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolTCPAddr.Put(orig)
}
}
func CopyTCPAddr(dest, src *TCPAddr) *TCPAddr {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewTCPAddr()
}
dest.IP = src.IP
dest.Port = src.Port
dest.Zone = src.Zone
return dest
}
func CopyTCPAddrSlice(dest, src []TCPAddr) []TCPAddr {
var newDest []TCPAddr
if cap(dest) < len(src) {
newDest = make([]TCPAddr, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTCPAddr(&dest[i], false)
}
}
for i := range src {
CopyTCPAddr(&newDest[i], &src[i])
}
return newDest
}
func CopyTCPAddrPtrSlice(dest, src []*TCPAddr) []*TCPAddr {
var newDest []*TCPAddr
if cap(dest) < len(src) {
newDest = make([]*TCPAddr, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTCPAddr()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTCPAddr(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTCPAddr()
}
}
for i := range src {
CopyTCPAddr(newDest[i], src[i])
}
return newDest
}
func (orig *TCPAddr) Reset() {
*orig = TCPAddr{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *TCPAddr) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.IP) > 0 {
dest.WriteObjectField("iP")
dest.WriteBytes(orig.IP)
}
if orig.Port != int64(0) {
dest.WriteObjectField("port")
dest.WriteInt64(orig.Port)
}
if orig.Zone != "" {
dest.WriteObjectField("zone")
dest.WriteString(orig.Zone)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *TCPAddr) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "iP":
orig.IP = iter.ReadBytes()
case "port":
orig.Port = iter.ReadInt64()
case "zone":
orig.Zone = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *TCPAddr) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.IP)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Port != 0 {
n += 1 + proto.Sov(uint64(orig.Port))
}
l = len(orig.Zone)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *TCPAddr) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.IP)
if l > 0 {
pos -= l
copy(buf[pos:], orig.IP)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.Port != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
pos--
buf[pos] = 0x10
}
l = len(orig.Zone)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Zone)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *TCPAddr) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
if length != 0 {
orig.IP = make([]byte, length)
copy(orig.IP, buf[startPos:pos])
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Port = int64(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Zone = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestTCPAddr() *TCPAddr {
orig := NewTCPAddr()
orig.IP = []byte{1, 2, 3}
orig.Port = int64(13)
orig.Zone = "test_zone"
return orig
}
func GenTestTCPAddrPtrSlice() []*TCPAddr {
orig := make([]*TCPAddr, 5)
orig[0] = NewTCPAddr()
orig[1] = GenTestTCPAddr()
orig[2] = NewTCPAddr()
orig[3] = GenTestTCPAddr()
orig[4] = NewTCPAddr()
return orig
}
func GenTestTCPAddrSlice() []TCPAddr {
orig := make([]TCPAddr, 5)
orig[1] = *GenTestTCPAddr()
orig[3] = *GenTestTCPAddr()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// TracesData represents the traces data that can be stored in a persistent storage,
// OR can be embedded by other protocols that transfer OTLP traces data but do not
// implement the OTLP protocol.
type TracesData struct {
ResourceSpans []*ResourceSpans
}
var (
protoPoolTracesData = sync.Pool{
New: func() any {
return &TracesData{}
},
}
)
func NewTracesData() *TracesData {
if !UseProtoPooling.IsEnabled() {
return &TracesData{}
}
return protoPoolTracesData.Get().(*TracesData)
}
func DeleteTracesData(orig *TracesData, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
for i := range orig.ResourceSpans {
DeleteResourceSpans(orig.ResourceSpans[i], true)
}
orig.Reset()
if nullable {
protoPoolTracesData.Put(orig)
}
}
func CopyTracesData(dest, src *TracesData) *TracesData {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewTracesData()
}
dest.ResourceSpans = CopyResourceSpansPtrSlice(dest.ResourceSpans, src.ResourceSpans)
return dest
}
func CopyTracesDataSlice(dest, src []TracesData) []TracesData {
var newDest []TracesData
if cap(dest) < len(src) {
newDest = make([]TracesData, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTracesData(&dest[i], false)
}
}
for i := range src {
CopyTracesData(&newDest[i], &src[i])
}
return newDest
}
func CopyTracesDataPtrSlice(dest, src []*TracesData) []*TracesData {
var newDest []*TracesData
if cap(dest) < len(src) {
newDest = make([]*TracesData, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTracesData()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTracesData(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTracesData()
}
}
for i := range src {
CopyTracesData(newDest[i], src[i])
}
return newDest
}
func (orig *TracesData) Reset() {
*orig = TracesData{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *TracesData) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.ResourceSpans) > 0 {
dest.WriteObjectField("resourceSpans")
dest.WriteArrayStart()
orig.ResourceSpans[0].MarshalJSON(dest)
for i := 1; i < len(orig.ResourceSpans); i++ {
dest.WriteMore()
orig.ResourceSpans[i].MarshalJSON(dest)
}
dest.WriteArrayEnd()
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *TracesData) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "resourceSpans", "resource_spans":
for iter.ReadArray() {
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalJSON(iter)
}
default:
iter.Skip()
}
}
}
func (orig *TracesData) SizeProto() int {
var n int
var l int
_ = l
for i := range orig.ResourceSpans {
l = orig.ResourceSpans[i].SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *TracesData) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
for i := len(orig.ResourceSpans) - 1; i >= 0; i-- {
l = orig.ResourceSpans[i].MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
return len(buf) - pos
}
func (orig *TracesData) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.ResourceSpans = append(orig.ResourceSpans, NewResourceSpans())
err = orig.ResourceSpans[len(orig.ResourceSpans)-1].UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestTracesData() *TracesData {
orig := NewTracesData()
orig.ResourceSpans = []*ResourceSpans{{}, GenTestResourceSpans()}
return orig
}
func GenTestTracesDataPtrSlice() []*TracesData {
orig := make([]*TracesData, 5)
orig[0] = NewTracesData()
orig[1] = GenTestTracesData()
orig[2] = NewTracesData()
orig[3] = GenTestTracesData()
orig[4] = NewTracesData()
return orig
}
func GenTestTracesDataSlice() []TracesData {
orig := make([]TracesData, 5)
orig[1] = *GenTestTracesData()
orig[3] = *GenTestTracesData()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"encoding/binary"
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type TracesRequest struct {
RequestContext *RequestContext
TracesData TracesData
FormatVersion uint32
}
var (
protoPoolTracesRequest = sync.Pool{
New: func() any {
return &TracesRequest{}
},
}
)
func NewTracesRequest() *TracesRequest {
if !UseProtoPooling.IsEnabled() {
return &TracesRequest{}
}
return protoPoolTracesRequest.Get().(*TracesRequest)
}
func DeleteTracesRequest(orig *TracesRequest, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
DeleteRequestContext(orig.RequestContext, true)
DeleteTracesData(&orig.TracesData, false)
orig.Reset()
if nullable {
protoPoolTracesRequest.Put(orig)
}
}
func CopyTracesRequest(dest, src *TracesRequest) *TracesRequest {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewTracesRequest()
}
dest.RequestContext = CopyRequestContext(dest.RequestContext, src.RequestContext)
CopyTracesData(&dest.TracesData, &src.TracesData)
dest.FormatVersion = src.FormatVersion
return dest
}
func CopyTracesRequestSlice(dest, src []TracesRequest) []TracesRequest {
var newDest []TracesRequest
if cap(dest) < len(src) {
newDest = make([]TracesRequest, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTracesRequest(&dest[i], false)
}
}
for i := range src {
CopyTracesRequest(&newDest[i], &src[i])
}
return newDest
}
func CopyTracesRequestPtrSlice(dest, src []*TracesRequest) []*TracesRequest {
var newDest []*TracesRequest
if cap(dest) < len(src) {
newDest = make([]*TracesRequest, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTracesRequest()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteTracesRequest(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewTracesRequest()
}
}
for i := range src {
CopyTracesRequest(newDest[i], src[i])
}
return newDest
}
func (orig *TracesRequest) Reset() {
*orig = TracesRequest{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *TracesRequest) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.RequestContext != nil {
dest.WriteObjectField("requestContext")
orig.RequestContext.MarshalJSON(dest)
}
dest.WriteObjectField("tracesData")
orig.TracesData.MarshalJSON(dest)
if orig.FormatVersion != uint32(0) {
dest.WriteObjectField("formatVersion")
dest.WriteUint32(orig.FormatVersion)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *TracesRequest) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "requestContext", "request_context":
orig.RequestContext = NewRequestContext()
orig.RequestContext.UnmarshalJSON(iter)
case "tracesData", "traces_data":
orig.TracesData.UnmarshalJSON(iter)
case "formatVersion", "format_version":
orig.FormatVersion = iter.ReadUint32()
default:
iter.Skip()
}
}
}
func (orig *TracesRequest) SizeProto() int {
var n int
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
}
l = orig.TracesData.SizeProto()
n += 1 + proto.Sov(uint64(l)) + l
if orig.FormatVersion != 0 {
n += 5
}
return n
}
func (orig *TracesRequest) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.RequestContext != nil {
l = orig.RequestContext.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
l = orig.TracesData.MarshalProto(buf[:pos])
pos -= l
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
if orig.FormatVersion != 0 {
pos -= 4
binary.LittleEndian.PutUint32(buf[pos:], uint32(orig.FormatVersion))
pos--
buf[pos] = 0xd
}
return len(buf) - pos
}
func (orig *TracesRequest) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field RequestContext", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.RequestContext = NewRequestContext()
err = orig.RequestContext.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field TracesData", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
err = orig.TracesData.UnmarshalProto(buf[startPos:pos])
if err != nil {
return err
}
case 1:
if wireType != proto.WireTypeI32 {
return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType)
}
var num uint32
num, pos, err = proto.ConsumeI32(buf, pos)
if err != nil {
return err
}
orig.FormatVersion = uint32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestTracesRequest() *TracesRequest {
orig := NewTracesRequest()
orig.RequestContext = GenTestRequestContext()
orig.TracesData = *GenTestTracesData()
orig.FormatVersion = uint32(13)
return orig
}
func GenTestTracesRequestPtrSlice() []*TracesRequest {
orig := make([]*TracesRequest, 5)
orig[0] = NewTracesRequest()
orig[1] = GenTestTracesRequest()
orig[2] = NewTracesRequest()
orig[3] = GenTestTracesRequest()
orig[4] = NewTracesRequest()
return orig
}
func GenTestTracesRequestSlice() []TracesRequest {
orig := make([]TracesRequest, 5)
orig[1] = *GenTestTracesRequest()
orig[3] = *GenTestTracesRequest()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type UDPAddr struct {
IP []byte
Port int64
Zone string
}
var (
protoPoolUDPAddr = sync.Pool{
New: func() any {
return &UDPAddr{}
},
}
)
func NewUDPAddr() *UDPAddr {
if !UseProtoPooling.IsEnabled() {
return &UDPAddr{}
}
return protoPoolUDPAddr.Get().(*UDPAddr)
}
func DeleteUDPAddr(orig *UDPAddr, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolUDPAddr.Put(orig)
}
}
func CopyUDPAddr(dest, src *UDPAddr) *UDPAddr {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewUDPAddr()
}
dest.IP = src.IP
dest.Port = src.Port
dest.Zone = src.Zone
return dest
}
func CopyUDPAddrSlice(dest, src []UDPAddr) []UDPAddr {
var newDest []UDPAddr
if cap(dest) < len(src) {
newDest = make([]UDPAddr, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteUDPAddr(&dest[i], false)
}
}
for i := range src {
CopyUDPAddr(&newDest[i], &src[i])
}
return newDest
}
func CopyUDPAddrPtrSlice(dest, src []*UDPAddr) []*UDPAddr {
var newDest []*UDPAddr
if cap(dest) < len(src) {
newDest = make([]*UDPAddr, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewUDPAddr()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteUDPAddr(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewUDPAddr()
}
}
for i := range src {
CopyUDPAddr(newDest[i], src[i])
}
return newDest
}
func (orig *UDPAddr) Reset() {
*orig = UDPAddr{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *UDPAddr) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if len(orig.IP) > 0 {
dest.WriteObjectField("iP")
dest.WriteBytes(orig.IP)
}
if orig.Port != int64(0) {
dest.WriteObjectField("port")
dest.WriteInt64(orig.Port)
}
if orig.Zone != "" {
dest.WriteObjectField("zone")
dest.WriteString(orig.Zone)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *UDPAddr) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "iP":
orig.IP = iter.ReadBytes()
case "port":
orig.Port = iter.ReadInt64()
case "zone":
orig.Zone = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *UDPAddr) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.IP)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
if orig.Port != 0 {
n += 1 + proto.Sov(uint64(orig.Port))
}
l = len(orig.Zone)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *UDPAddr) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.IP)
if l > 0 {
pos -= l
copy(buf[pos:], orig.IP)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
if orig.Port != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.Port))
pos--
buf[pos] = 0x10
}
l = len(orig.Zone)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Zone)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x1a
}
return len(buf) - pos
}
func (orig *UDPAddr) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
if length != 0 {
orig.IP = make([]byte, length)
copy(orig.IP, buf[startPos:pos])
}
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.Port = int64(num)
case 3:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Zone = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestUDPAddr() *UDPAddr {
orig := NewUDPAddr()
orig.IP = []byte{1, 2, 3}
orig.Port = int64(13)
orig.Zone = "test_zone"
return orig
}
func GenTestUDPAddrPtrSlice() []*UDPAddr {
orig := make([]*UDPAddr, 5)
orig[0] = NewUDPAddr()
orig[1] = GenTestUDPAddr()
orig[2] = NewUDPAddr()
orig[3] = GenTestUDPAddr()
orig[4] = NewUDPAddr()
return orig
}
func GenTestUDPAddrSlice() []UDPAddr {
orig := make([]UDPAddr, 5)
orig[1] = *GenTestUDPAddr()
orig[3] = *GenTestUDPAddr()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
type UnixAddr struct {
Name string
Net string
}
var (
protoPoolUnixAddr = sync.Pool{
New: func() any {
return &UnixAddr{}
},
}
)
func NewUnixAddr() *UnixAddr {
if !UseProtoPooling.IsEnabled() {
return &UnixAddr{}
}
return protoPoolUnixAddr.Get().(*UnixAddr)
}
func DeleteUnixAddr(orig *UnixAddr, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolUnixAddr.Put(orig)
}
}
func CopyUnixAddr(dest, src *UnixAddr) *UnixAddr {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewUnixAddr()
}
dest.Name = src.Name
dest.Net = src.Net
return dest
}
func CopyUnixAddrSlice(dest, src []UnixAddr) []UnixAddr {
var newDest []UnixAddr
if cap(dest) < len(src) {
newDest = make([]UnixAddr, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteUnixAddr(&dest[i], false)
}
}
for i := range src {
CopyUnixAddr(&newDest[i], &src[i])
}
return newDest
}
func CopyUnixAddrPtrSlice(dest, src []*UnixAddr) []*UnixAddr {
var newDest []*UnixAddr
if cap(dest) < len(src) {
newDest = make([]*UnixAddr, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewUnixAddr()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteUnixAddr(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewUnixAddr()
}
}
for i := range src {
CopyUnixAddr(newDest[i], src[i])
}
return newDest
}
func (orig *UnixAddr) Reset() {
*orig = UnixAddr{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *UnixAddr) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.Name != "" {
dest.WriteObjectField("name")
dest.WriteString(orig.Name)
}
if orig.Net != "" {
dest.WriteObjectField("net")
dest.WriteString(orig.Net)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *UnixAddr) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "name":
orig.Name = iter.ReadString()
case "net":
orig.Net = iter.ReadString()
default:
iter.Skip()
}
}
}
func (orig *UnixAddr) SizeProto() int {
var n int
var l int
_ = l
l = len(orig.Name)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
l = len(orig.Net)
if l > 0 {
n += 1 + proto.Sov(uint64(l)) + l
}
return n
}
func (orig *UnixAddr) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
l = len(orig.Name)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Name)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0xa
}
l = len(orig.Net)
if l > 0 {
pos -= l
copy(buf[pos:], orig.Net)
pos = proto.EncodeVarint(buf, pos, uint64(l))
pos--
buf[pos] = 0x12
}
return len(buf) - pos
}
func (orig *UnixAddr) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Name = string(buf[startPos:pos])
case 2:
if wireType != proto.WireTypeLen {
return fmt.Errorf("proto: wrong wireType = %d for field Net", wireType)
}
var length int
length, pos, err = proto.ConsumeLen(buf, pos)
if err != nil {
return err
}
startPos := pos - length
orig.Net = string(buf[startPos:pos])
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestUnixAddr() *UnixAddr {
orig := NewUnixAddr()
orig.Name = "test_name"
orig.Net = "test_net"
return orig
}
func GenTestUnixAddrPtrSlice() []*UnixAddr {
orig := make([]*UnixAddr, 5)
orig[0] = NewUnixAddr()
orig[1] = GenTestUnixAddr()
orig[2] = NewUnixAddr()
orig[3] = GenTestUnixAddr()
orig[4] = NewUnixAddr()
return orig
}
func GenTestUnixAddrSlice() []UnixAddr {
orig := make([]UnixAddr, 5)
orig[1] = *GenTestUnixAddr()
orig[3] = *GenTestUnixAddr()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
import (
"fmt"
"sync"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/proto"
)
// ValueType describes the type and units of a value.
type ValueType struct {
TypeStrindex int32
UnitStrindex int32
}
var (
protoPoolValueType = sync.Pool{
New: func() any {
return &ValueType{}
},
}
)
func NewValueType() *ValueType {
if !UseProtoPooling.IsEnabled() {
return &ValueType{}
}
return protoPoolValueType.Get().(*ValueType)
}
func DeleteValueType(orig *ValueType, nullable bool) {
if orig == nil {
return
}
if !UseProtoPooling.IsEnabled() {
orig.Reset()
return
}
orig.Reset()
if nullable {
protoPoolValueType.Put(orig)
}
}
func CopyValueType(dest, src *ValueType) *ValueType {
// If copying to same object, just return.
if src == dest {
return dest
}
if src == nil {
return nil
}
if dest == nil {
dest = NewValueType()
}
dest.TypeStrindex = src.TypeStrindex
dest.UnitStrindex = src.UnitStrindex
return dest
}
func CopyValueTypeSlice(dest, src []ValueType) []ValueType {
var newDest []ValueType
if cap(dest) < len(src) {
newDest = make([]ValueType, len(src))
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteValueType(&dest[i], false)
}
}
for i := range src {
CopyValueType(&newDest[i], &src[i])
}
return newDest
}
func CopyValueTypePtrSlice(dest, src []*ValueType) []*ValueType {
var newDest []*ValueType
if cap(dest) < len(src) {
newDest = make([]*ValueType, len(src))
// Copy old pointers to re-use.
copy(newDest, dest)
// Add new pointers for missing elements from len(dest) to len(srt).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewValueType()
}
} else {
newDest = dest[:len(src)]
// Cleanup the rest of the elements so GC can free the memory.
// This can happen when len(src) < len(dest) < cap(dest).
for i := len(src); i < len(dest); i++ {
DeleteValueType(dest[i], true)
dest[i] = nil
}
// Add new pointers for missing elements.
// This can happen when len(dest) < len(src) < cap(dest).
for i := len(dest); i < len(src); i++ {
newDest[i] = NewValueType()
}
}
for i := range src {
CopyValueType(newDest[i], src[i])
}
return newDest
}
func (orig *ValueType) Reset() {
*orig = ValueType{}
}
// MarshalJSON marshals all properties from the current struct to the destination stream.
func (orig *ValueType) MarshalJSON(dest *json.Stream) {
dest.WriteObjectStart()
if orig.TypeStrindex != int32(0) {
dest.WriteObjectField("typeStrindex")
dest.WriteInt32(orig.TypeStrindex)
}
if orig.UnitStrindex != int32(0) {
dest.WriteObjectField("unitStrindex")
dest.WriteInt32(orig.UnitStrindex)
}
dest.WriteObjectEnd()
}
// UnmarshalJSON unmarshals all properties from the current struct from the source iterator.
func (orig *ValueType) UnmarshalJSON(iter *json.Iterator) {
for f := iter.ReadObject(); f != ""; f = iter.ReadObject() {
switch f {
case "typeStrindex", "type_strindex":
orig.TypeStrindex = iter.ReadInt32()
case "unitStrindex", "unit_strindex":
orig.UnitStrindex = iter.ReadInt32()
default:
iter.Skip()
}
}
}
func (orig *ValueType) SizeProto() int {
var n int
var l int
_ = l
if orig.TypeStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.TypeStrindex))
}
if orig.UnitStrindex != 0 {
n += 1 + proto.Sov(uint64(orig.UnitStrindex))
}
return n
}
func (orig *ValueType) MarshalProto(buf []byte) int {
pos := len(buf)
var l int
_ = l
if orig.TypeStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.TypeStrindex))
pos--
buf[pos] = 0x8
}
if orig.UnitStrindex != 0 {
pos = proto.EncodeVarint(buf, pos, uint64(orig.UnitStrindex))
pos--
buf[pos] = 0x10
}
return len(buf) - pos
}
func (orig *ValueType) UnmarshalProto(buf []byte) error {
var err error
var fieldNum int32
var wireType proto.WireType
l := len(buf)
pos := 0
for pos < l {
// If in a group parsing, move to the next tag.
fieldNum, wireType, pos, err = proto.ConsumeTag(buf, pos)
if err != nil {
return err
}
switch fieldNum {
case 1:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.TypeStrindex = int32(num)
case 2:
if wireType != proto.WireTypeVarint {
return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType)
}
var num uint64
num, pos, err = proto.ConsumeVarint(buf, pos)
if err != nil {
return err
}
orig.UnitStrindex = int32(num)
default:
pos, err = proto.ConsumeUnknown(buf, pos, wireType)
if err != nil {
return err
}
}
}
return nil
}
func GenTestValueType() *ValueType {
orig := NewValueType()
orig.TypeStrindex = int32(13)
orig.UnitStrindex = int32(13)
return orig
}
func GenTestValueTypePtrSlice() []*ValueType {
orig := make([]*ValueType, 5)
orig[0] = NewValueType()
orig[1] = GenTestValueType()
orig[2] = NewValueType()
orig[3] = GenTestValueType()
orig[4] = NewValueType()
return orig
}
func GenTestValueTypeSlice() []ValueType {
orig := make([]ValueType, 5)
orig[1] = *GenTestValueType()
orig[3] = *GenTestValueType()
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type SliceWrapper struct {
orig *[]AnyValue
state *State
}
func GetSliceOrig(ms SliceWrapper) *[]AnyValue {
return ms.orig
}
func GetSliceState(ms SliceWrapper) *State {
return ms.state
}
func NewSliceWrapper(orig *[]AnyValue, state *State) SliceWrapper {
return SliceWrapper{orig: orig, state: state}
}
func GenTestSliceWrapper() SliceWrapper {
orig := GenTestAnyValueSlice()
return NewSliceWrapper(&orig, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type ByteSliceWrapper struct {
orig *[]byte
state *State
}
func GetByteSliceOrig(ms ByteSliceWrapper) *[]byte {
return ms.orig
}
func GetByteSliceState(ms ByteSliceWrapper) *State {
return ms.state
}
func NewByteSliceWrapper(orig *[]byte, state *State) ByteSliceWrapper {
return ByteSliceWrapper{orig: orig, state: state}
}
func GenTestByteSliceWrapper() ByteSliceWrapper {
orig := []byte{1, 2, 3}
return NewByteSliceWrapper(&orig, NewState())
}
func GenTestByteSlice() []byte {
return []byte{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type EntityRefWrapper struct {
orig *EntityRef
state *State
}
func GetEntityRefOrig(ms EntityRefWrapper) *EntityRef {
return ms.orig
}
func GetEntityRefState(ms EntityRefWrapper) *State {
return ms.state
}
func NewEntityRefWrapper(orig *EntityRef, state *State) EntityRefWrapper {
return EntityRefWrapper{orig: orig, state: state}
}
func GenTestEntityRefWrapper() EntityRefWrapper {
return NewEntityRefWrapper(GenTestEntityRef(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type EntityRefSliceWrapper struct {
orig *[]*EntityRef
state *State
}
func GetEntityRefSliceOrig(ms EntityRefSliceWrapper) *[]*EntityRef {
return ms.orig
}
func GetEntityRefSliceState(ms EntityRefSliceWrapper) *State {
return ms.state
}
func NewEntityRefSliceWrapper(orig *[]*EntityRef, state *State) EntityRefSliceWrapper {
return EntityRefSliceWrapper{orig: orig, state: state}
}
func GenTestEntityRefSliceWrapper() EntityRefSliceWrapper {
orig := GenTestEntityRefPtrSlice()
return NewEntityRefSliceWrapper(&orig, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type LogsWrapper struct {
orig *ExportLogsServiceRequest
state *State
}
func GetLogsOrig(ms LogsWrapper) *ExportLogsServiceRequest {
return ms.orig
}
func GetLogsState(ms LogsWrapper) *State {
return ms.state
}
func NewLogsWrapper(orig *ExportLogsServiceRequest, state *State) LogsWrapper {
return LogsWrapper{orig: orig, state: state}
}
func GenTestLogsWrapper() LogsWrapper {
return NewLogsWrapper(GenTestExportLogsServiceRequest(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type MetricsWrapper struct {
orig *ExportMetricsServiceRequest
state *State
}
func GetMetricsOrig(ms MetricsWrapper) *ExportMetricsServiceRequest {
return ms.orig
}
func GetMetricsState(ms MetricsWrapper) *State {
return ms.state
}
func NewMetricsWrapper(orig *ExportMetricsServiceRequest, state *State) MetricsWrapper {
return MetricsWrapper{orig: orig, state: state}
}
func GenTestMetricsWrapper() MetricsWrapper {
return NewMetricsWrapper(GenTestExportMetricsServiceRequest(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type ProfilesWrapper struct {
orig *ExportProfilesServiceRequest
state *State
}
func GetProfilesOrig(ms ProfilesWrapper) *ExportProfilesServiceRequest {
return ms.orig
}
func GetProfilesState(ms ProfilesWrapper) *State {
return ms.state
}
func NewProfilesWrapper(orig *ExportProfilesServiceRequest, state *State) ProfilesWrapper {
return ProfilesWrapper{orig: orig, state: state}
}
func GenTestProfilesWrapper() ProfilesWrapper {
return NewProfilesWrapper(GenTestExportProfilesServiceRequest(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type TracesWrapper struct {
orig *ExportTraceServiceRequest
state *State
}
func GetTracesOrig(ms TracesWrapper) *ExportTraceServiceRequest {
return ms.orig
}
func GetTracesState(ms TracesWrapper) *State {
return ms.state
}
func NewTracesWrapper(orig *ExportTraceServiceRequest, state *State) TracesWrapper {
return TracesWrapper{orig: orig, state: state}
}
func GenTestTracesWrapper() TracesWrapper {
return NewTracesWrapper(GenTestExportTraceServiceRequest(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Float64SliceWrapper struct {
orig *[]float64
state *State
}
func GetFloat64SliceOrig(ms Float64SliceWrapper) *[]float64 {
return ms.orig
}
func GetFloat64SliceState(ms Float64SliceWrapper) *State {
return ms.state
}
func NewFloat64SliceWrapper(orig *[]float64, state *State) Float64SliceWrapper {
return Float64SliceWrapper{orig: orig, state: state}
}
func GenTestFloat64SliceWrapper() Float64SliceWrapper {
orig := []float64{1.1, 2.2, 3.3}
return NewFloat64SliceWrapper(&orig, NewState())
}
func GenTestFloat64Slice() []float64 {
return []float64{1.1, 2.2, 3.3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type InstrumentationScopeWrapper struct {
orig *InstrumentationScope
state *State
}
func GetInstrumentationScopeOrig(ms InstrumentationScopeWrapper) *InstrumentationScope {
return ms.orig
}
func GetInstrumentationScopeState(ms InstrumentationScopeWrapper) *State {
return ms.state
}
func NewInstrumentationScopeWrapper(orig *InstrumentationScope, state *State) InstrumentationScopeWrapper {
return InstrumentationScopeWrapper{orig: orig, state: state}
}
func GenTestInstrumentationScopeWrapper() InstrumentationScopeWrapper {
return NewInstrumentationScopeWrapper(GenTestInstrumentationScope(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Int32SliceWrapper struct {
orig *[]int32
state *State
}
func GetInt32SliceOrig(ms Int32SliceWrapper) *[]int32 {
return ms.orig
}
func GetInt32SliceState(ms Int32SliceWrapper) *State {
return ms.state
}
func NewInt32SliceWrapper(orig *[]int32, state *State) Int32SliceWrapper {
return Int32SliceWrapper{orig: orig, state: state}
}
func GenTestInt32SliceWrapper() Int32SliceWrapper {
orig := []int32{1, 2, 3}
return NewInt32SliceWrapper(&orig, NewState())
}
func GenTestInt32Slice() []int32 {
return []int32{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type Int64SliceWrapper struct {
orig *[]int64
state *State
}
func GetInt64SliceOrig(ms Int64SliceWrapper) *[]int64 {
return ms.orig
}
func GetInt64SliceState(ms Int64SliceWrapper) *State {
return ms.state
}
func NewInt64SliceWrapper(orig *[]int64, state *State) Int64SliceWrapper {
return Int64SliceWrapper{orig: orig, state: state}
}
func GenTestInt64SliceWrapper() Int64SliceWrapper {
orig := []int64{1, 2, 3}
return NewInt64SliceWrapper(&orig, NewState())
}
func GenTestInt64Slice() []int64 {
return []int64{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type ProfilesDataWrapper struct {
orig *ProfilesData
state *State
}
func GetProfilesDataOrig(ms ProfilesDataWrapper) *ProfilesData {
return ms.orig
}
func GetProfilesDataState(ms ProfilesDataWrapper) *State {
return ms.state
}
func NewProfilesDataWrapper(orig *ProfilesData, state *State) ProfilesDataWrapper {
return ProfilesDataWrapper{orig: orig, state: state}
}
func GenTestProfilesDataWrapper() ProfilesDataWrapper {
return NewProfilesDataWrapper(GenTestProfilesData(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type ResourceWrapper struct {
orig *Resource
state *State
}
func GetResourceOrig(ms ResourceWrapper) *Resource {
return ms.orig
}
func GetResourceState(ms ResourceWrapper) *State {
return ms.state
}
func NewResourceWrapper(orig *Resource, state *State) ResourceWrapper {
return ResourceWrapper{orig: orig, state: state}
}
func GenTestResourceWrapper() ResourceWrapper {
return NewResourceWrapper(GenTestResource(), NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type StringSliceWrapper struct {
orig *[]string
state *State
}
func GetStringSliceOrig(ms StringSliceWrapper) *[]string {
return ms.orig
}
func GetStringSliceState(ms StringSliceWrapper) *State {
return ms.state
}
func NewStringSliceWrapper(orig *[]string, state *State) StringSliceWrapper {
return StringSliceWrapper{orig: orig, state: state}
}
func GenTestStringSliceWrapper() StringSliceWrapper {
orig := []string{"a", "b", "c"}
return NewStringSliceWrapper(&orig, NewState())
}
func GenTestStringSlice() []string {
return []string{"a", "b", "c"}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package internal
type UInt64SliceWrapper struct {
orig *[]uint64
state *State
}
func GetUInt64SliceOrig(ms UInt64SliceWrapper) *[]uint64 {
return ms.orig
}
func GetUInt64SliceState(ms UInt64SliceWrapper) *State {
return ms.state
}
func NewUInt64SliceWrapper(orig *[]uint64, state *State) UInt64SliceWrapper {
return UInt64SliceWrapper{orig: orig, state: state}
}
func GenTestUInt64SliceWrapper() UInt64SliceWrapper {
orig := []uint64{1, 2, 3}
return NewUInt64SliceWrapper(&orig, NewState())
}
func GenTestUint64Slice() []uint64 {
return []uint64{1, 2, 3}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package json // import "go.opentelemetry.io/collector/pdata/internal/json"
import (
"encoding/base64"
"strconv"
jsoniter "github.com/json-iterator/go"
)
func BorrowIterator(data []byte) *Iterator {
return &Iterator{
delegate: jsoniter.ConfigFastest.BorrowIterator(data),
}
}
func ReturnIterator(s *Iterator) {
jsoniter.ConfigFastest.ReturnIterator(s.delegate)
}
type Iterator struct {
delegate *jsoniter.Iterator
}
// ReadInt32 unmarshalls JSON data into an int32. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadInt32() int32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadInt32()
case jsoniter.StringValue:
val, err := strconv.ParseInt(iter.ReadString(), 10, 32)
if err != nil {
iter.ReportError("ReadInt32", err.Error())
return 0
}
return int32(val)
default:
iter.ReportError("ReadInt32", "unsupported value type")
return 0
}
}
// ReadUint32 unmarshalls JSON data into an uint32. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadUint32() uint32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadUint32()
case jsoniter.StringValue:
val, err := strconv.ParseUint(iter.ReadString(), 10, 32)
if err != nil {
iter.ReportError("ReadUint32", err.Error())
return 0
}
return uint32(val)
default:
iter.ReportError("ReadUint32", "unsupported value type")
return 0
}
}
// ReadInt64 unmarshalls JSON data into an int64. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadInt64() int64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadInt64()
case jsoniter.StringValue:
val, err := strconv.ParseInt(iter.ReadString(), 10, 64)
if err != nil {
iter.ReportError("ReadInt64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadInt64", "unsupported value type")
return 0
}
}
// ReadUint64 unmarshalls JSON data into an uint64. Accepts both numbers and strings decimal.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadUint64() uint64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadUint64()
case jsoniter.StringValue:
val, err := strconv.ParseUint(iter.ReadString(), 10, 64)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
func (iter *Iterator) ReadFloat32() float32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadFloat32()
case jsoniter.StringValue:
val, err := strconv.ParseFloat(iter.ReadString(), 32)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return float32(val)
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
func (iter *Iterator) ReadFloat64() float64 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.delegate.ReadFloat64()
case jsoniter.StringValue:
val, err := strconv.ParseFloat(iter.ReadString(), 64)
if err != nil {
iter.ReportError("ReadUint64", err.Error())
return 0
}
return val
default:
iter.ReportError("ReadUint64", "unsupported value type")
return 0
}
}
// ReadBool reads a json object as BoolValue
func (iter *Iterator) ReadBool() bool {
return iter.delegate.ReadBool()
}
// ReadString read string from iterator
func (iter *Iterator) ReadString() string {
return iter.delegate.ReadString()
}
// ReadBytes read base64 encoded bytes from iterator.
func (iter *Iterator) ReadBytes() []byte {
buf := iter.ReadStringAsSlice()
if len(buf) == 0 {
return nil
}
orig := make([]byte, base64.StdEncoding.DecodedLen(len(buf)))
n, err := base64.StdEncoding.Decode(orig, buf)
if err != nil {
iter.ReportError("base64.Decode", err.Error())
}
return orig[:n]
}
// ReadStringAsSlice read string from iterator without copying into string form.
// The []byte cannot be kept, as it will change after next iterator call.
func (iter *Iterator) ReadStringAsSlice() []byte {
return iter.delegate.ReadStringAsSlice()
}
// ReportError record a error in iterator instance with current position.
func (iter *Iterator) ReportError(operation, msg string) {
iter.delegate.ReportError(operation, msg)
}
// Error returns any recorded error if any otherwise it returns nil.
func (iter *Iterator) Error() error {
return iter.delegate.Error
}
// Skip skips a json object and positions to relatively the next json object
func (iter *Iterator) Skip() {
iter.delegate.Skip()
}
// ReadArray read array element, returns true if the array has more element to read.
func (iter *Iterator) ReadArray() bool {
return iter.delegate.ReadArray()
}
// ReadObject read one field from object.
// If object ended, returns empty string. Otherwise, returns the field name.
func (iter *Iterator) ReadObject() string {
return iter.delegate.ReadObject()
}
// ReadEnumValue returns the enum integer value representation. Accepts both enum names and enum integer values.
// See https://developers.google.com/protocol-buffers/docs/proto3#json.
func (iter *Iterator) ReadEnumValue(valueMap map[string]int32) int32 {
switch iter.delegate.WhatIsNext() {
case jsoniter.NumberValue:
return iter.ReadInt32()
case jsoniter.StringValue:
val, ok := valueMap[iter.ReadString()]
// Same behavior with official protobuf JSON decoder,
// see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81
if !ok {
iter.ReportError("ReadEnumValue", "unknown string value")
return 0
}
return val
default:
iter.ReportError("ReadEnumValue", "unsupported value type")
return 0
}
}
// ResetBytes reuse iterator instance by specifying another byte array as input
func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.delegate.ResetBytes(input)
return iter
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package json // import "go.opentelemetry.io/collector/pdata/internal/json"
import (
"encoding/base64"
"errors"
"io"
"math"
"strconv"
jsoniter "github.com/json-iterator/go"
)
func BorrowStream(writer io.Writer) *Stream {
return &Stream{
Stream: jsoniter.ConfigFastest.BorrowStream(writer),
wmTracker: make([]bool, 32),
}
}
func ReturnStream(s *Stream) {
jsoniter.ConfigFastest.ReturnStream(s.Stream)
}
// Stream avoids the need to explicitly call the `Stream.WriteMore` method while marshaling objects by
// checking if a field was previously written inside the current object and automatically appending a ","
// if so before writing the next field.
type Stream struct {
*jsoniter.Stream
// wmTracker acts like a stack which pushes a new value when an object is started and removes the
// top when it is ended. The value added for every object tracks if there is any written field
// already for that object, and if it is then automatically add a "," before any new field.
wmTracker []bool
}
func (ots *Stream) WriteObjectStart() {
ots.Stream.WriteObjectStart()
ots.wmTracker = append(ots.wmTracker, false)
}
func (ots *Stream) WriteObjectField(field string) {
if ots.wmTracker[len(ots.wmTracker)-1] {
ots.WriteMore()
}
ots.Stream.WriteObjectField(field)
ots.wmTracker[len(ots.wmTracker)-1] = true
}
func (ots *Stream) WriteObjectEnd() {
ots.Stream.WriteObjectEnd()
ots.wmTracker = ots.wmTracker[:len(ots.wmTracker)-1]
}
// WriteInt64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
func (ots *Stream) WriteInt64(val int64) {
ots.WriteString(strconv.FormatInt(val, 10))
}
// WriteUint64 writes the values as a decimal string. This is per the protobuf encoding rules for int64, fixed64, uint64.
func (ots *Stream) WriteUint64(val uint64) {
ots.WriteString(strconv.FormatUint(val, 10))
}
// WriteBytes writes the values as a base64 encoded string. This is per the protobuf encoding rules for bytes.
func (ots *Stream) WriteBytes(val []byte) {
if len(val) == 0 {
ots.WriteString("")
return
}
ots.WriteString(base64.StdEncoding.EncodeToString(val))
}
// WriteFloat64 writes the JSON value that will be a number or one of the special string
// values "NaN", "Infinity", and "-Infinity". Either numbers or strings are accepted.
// Empty strings are invalid. Exponent notation is also accepted.
// See https://protobuf.dev/programming-guides/json/.
func (ots *Stream) WriteFloat64(val float64) {
if math.IsNaN(val) {
ots.WriteString("NaN")
return
}
if math.IsInf(val, 1) {
ots.WriteString("Infinity")
return
}
if math.IsInf(val, -1) {
ots.WriteString("-Infinity")
return
}
ots.Stream.WriteFloat64(val)
}
func (ots *Stream) ReportError(err error) {
ots.Stream.Error = errors.Join(ots.Stream.Error, err)
}
func (ots *Stream) Error() error {
return ots.Stream.Error
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/mem"
)
var (
defaultBufferPoolSizes = []int{
256,
4 << 10, // 4KB (go page size)
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
512 << 10, // 512KB
1 << 20, // 1MB
4 << 20, // 4MB
16 << 20, // 16MB
}
otelBufferPool = mem.NewTieredBufferPool(defaultBufferPoolSizes...)
)
// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
// created with mem.NewTieredBufferPool that uses a set of default sizes optimized for
// expected telemetry workflows.
func DefaultBufferPool() mem.BufferPool {
return otelBufferPool
}
// Name is the name registered for the proto compressor.
const Name = "proto"
func init() {
encoding.RegisterCodecV2(&codecV2{delegate: encoding.GetCodecV2(Name)})
}
// codecV2 is a custom proto encoding that uses a different tier schema for the TieredBufferPool as well
// as it call into the custom marshal/unmarshal logic that works with memory pooling.
// If not an otlp payload fallback on the default grpc/proto encoding.
type codecV2 struct {
delegate encoding.CodecV2
}
type otelEncoder interface {
SizeProto() int
MarshalProto([]byte) int
UnmarshalProto([]byte) error
}
func (c *codecV2) Marshal(v any) (mem.BufferSlice, error) {
if m, ok := v.(otelEncoder); ok {
size := m.SizeProto()
buf := otelBufferPool.Get(size)
n := m.MarshalProto((*buf)[:size])
*buf = (*buf)[:n]
return []mem.Buffer{mem.NewBuffer(buf, otelBufferPool)}, nil
}
return c.delegate.Marshal(v)
}
func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
if m, ok := v.(otelEncoder); ok {
// TODO: Upgrade custom Unmarshal logic to support reading from mem.BufferSlice.
buf := data.MaterializeToBuffer(otelBufferPool)
defer buf.Free()
return m.UnmarshalProto(buf.ReadOnlyData())
}
return c.delegate.Unmarshal(data, v)
}
func (c *codecV2) Name() string {
return Name
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
)
// LogsServiceClient is the client API for LogsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type LogsServiceClient interface {
Export(context.Context, *internal.ExportLogsServiceRequest, ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error)
}
type logsServiceClient struct {
cc *grpc.ClientConn
}
func NewLogsServiceClient(cc *grpc.ClientConn) LogsServiceClient {
return &logsServiceClient{cc}
}
func (c *logsServiceClient) Export(ctx context.Context, in *internal.ExportLogsServiceRequest, opts ...grpc.CallOption) (*internal.ExportLogsServiceResponse, error) {
out := new(internal.ExportLogsServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// LogsServiceServer is the server API for LogsService service.
type LogsServiceServer interface {
Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error)
}
// UnimplementedLogsServiceServer can be embedded to have forward compatible implementations.
type UnimplementedLogsServiceServer struct{}
func (*UnimplementedLogsServiceServer) Export(context.Context, *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterLogsServiceServer(s *grpc.Server, srv LogsServiceServer) {
s.RegisterService(&logsServiceServiceDesc, srv)
}
// Context cannot be the first parameter of the function because gRPC definition.
//
//nolint:revive
func logsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
in := new(internal.ExportLogsServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LogsServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export",
}
handler := func(ctx context.Context, req any) (any, error) {
return srv.(LogsServiceServer).Export(ctx, req.(*internal.ExportLogsServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var logsServiceServiceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService",
HandlerType: (*LogsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: logsServiceExportHandler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto",
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
)
// MetricsServiceClient is the client API for MetricsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type MetricsServiceClient interface {
Export(context.Context, *internal.ExportMetricsServiceRequest, ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error)
}
type metricsServiceClient struct {
cc *grpc.ClientConn
}
func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
return &metricsServiceClient{cc}
}
func (c *metricsServiceClient) Export(ctx context.Context, in *internal.ExportMetricsServiceRequest, opts ...grpc.CallOption) (*internal.ExportMetricsServiceResponse, error) {
out := new(internal.ExportMetricsServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// MetricsServiceServer is the server API for MetricsService service.
type MetricsServiceServer interface {
Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error)
}
// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
type UnimplementedMetricsServiceServer struct{}
func (*UnimplementedMetricsServiceServer) Export(context.Context, *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
s.RegisterService(&metricsServiceServiceDesc, srv)
}
// Context cannot be the first parameter of the function because gRPC definition.
//
//nolint:revive
func metricsServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
in := new(internal.ExportMetricsServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MetricsServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
}
handler := func(ctx context.Context, req any) (any, error) {
return srv.(MetricsServiceServer).Export(ctx, req.(*internal.ExportMetricsServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var metricsServiceServiceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
HandlerType: (*MetricsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: metricsServiceExportHandler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
)
// ProfilesServiceClient is the client API for ProfilesService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ProfilesServiceClient interface {
Export(context.Context, *internal.ExportProfilesServiceRequest, ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error)
}
type profilesServiceClient struct {
cc *grpc.ClientConn
}
func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient {
return &profilesServiceClient{cc}
}
func (c *profilesServiceClient) Export(ctx context.Context, in *internal.ExportProfilesServiceRequest, opts ...grpc.CallOption) (*internal.ExportProfilesServiceResponse, error) {
out := new(internal.ExportProfilesServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ProfilesServiceServer is the server API for ProfilesService service.
type ProfilesServiceServer interface {
Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error)
}
// UnimplementedProfilesServiceServer can be embedded to have forward compatible implementations.
type UnimplementedProfilesServiceServer struct{}
func (*UnimplementedProfilesServiceServer) Export(context.Context, *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterProfilesServiceServer(s *grpc.Server, srv ProfilesServiceServer) {
s.RegisterService(&profilesServiceServiceDesc, srv)
}
// Context cannot be the first parameter of the function because gRPC definition.
//
//nolint:revive
func profilesServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
in := new(internal.ExportProfilesServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProfilesServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export",
}
handler := func(ctx context.Context, req any) (any, error) {
return srv.(ProfilesServiceServer).Export(ctx, req.(*internal.ExportProfilesServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var profilesServiceServiceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService",
HandlerType: (*ProfilesServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: profilesServiceExportHandler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto",
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otelgrpc // import "go.opentelemetry.io/collector/pdata/internal/otelgrpc"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
)
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TraceServiceClient interface {
Export(context.Context, *internal.ExportTraceServiceRequest, ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error)
}
type traceServiceClient struct {
cc *grpc.ClientConn
}
func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Export(ctx context.Context, in *internal.ExportTraceServiceRequest, opts ...grpc.CallOption) (*internal.ExportTraceServiceResponse, error) {
out := new(internal.ExportTraceServiceResponse)
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// TraceServiceServer is the server API for TraceService service.
type TraceServiceServer interface {
Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error)
}
// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
type UnimplementedTraceServiceServer struct{}
func (*UnimplementedTraceServiceServer) Export(context.Context, *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&traceServiceServiceDesc, srv)
}
// Context cannot be the first parameter of the function because gRPC definition.
//
//nolint:revive
func traceServiceExportHandler(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) {
in := new(internal.ExportTraceServiceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TraceServiceServer).Export(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
}
handler := func(ctx context.Context, req any) (any, error) {
return srv.(TraceServiceServer).Export(ctx, req.(*internal.ExportTraceServiceRequest))
}
return interceptor(ctx, in, info, handler)
}
var traceServiceServiceDesc = grpc.ServiceDesc{
ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Export",
Handler: traceServiceExportHandler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol.
// Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateLogs(rls []*internal.ResourceLogs) {
for _, rl := range rls {
if len(rl.ScopeLogs) == 0 {
rl.ScopeLogs = rl.DeprecatedScopeLogs
}
rl.DeprecatedScopeLogs = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol.
// Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateMetrics(rms []*internal.ResourceMetrics) {
for _, rm := range rms {
if len(rm.ScopeMetrics) == 0 {
rm.ScopeMetrics = rm.DeprecatedScopeMetrics
}
rm.DeprecatedScopeMetrics = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol.
// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateProfiles(_ []*internal.ResourceProfiles) {}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol.
// Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation.
func MigrateTraces(rss []*internal.ResourceSpans) {
for _, rs := range rss {
if len(rs.ScopeSpans) == 0 {
rs.ScopeSpans = rs.DeprecatedScopeSpans
}
rs.DeprecatedScopeSpans = nil
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"encoding/hex"
"errors"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const profileIDSize = 16
var errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length")
// ProfileID is a custom data type that is used for all profile_id fields in OTLP
// Protobuf messages.
type ProfileID [profileIDSize]byte
func DeleteProfileID(*ProfileID, bool) {}
func CopyProfileID(dest, src *ProfileID) {
*dest = *src
}
// IsEmpty returns true if id contains at leas one non-zero byte.
func (pid ProfileID) IsEmpty() bool {
return pid == [profileIDSize]byte{}
}
// SizeProto returns the size of the data to serialize in proto format.
func (pid ProfileID) SizeProto() int {
if pid.IsEmpty() {
return 0
}
return profileIDSize
}
// MarshalProto converts profile ID into a binary representation. Called by Protobuf serialization.
func (pid ProfileID) MarshalProto(buf []byte) int {
if pid.IsEmpty() {
return 0
}
return copy(buf[len(buf)-profileIDSize:], pid[:])
}
// UnmarshalProto inflates this profile ID from binary representation. Called by Protobuf serialization.
func (pid *ProfileID) UnmarshalProto(buf []byte) error {
if len(buf) == 0 {
*pid = [profileIDSize]byte{}
return nil
}
if len(buf) != profileIDSize {
return errUnmarshalProfileID
}
copy(pid[:], buf)
return nil
}
// MarshalJSON converts ProfileID into a hex string.
//
//nolint:govet
func (pid ProfileID) MarshalJSON(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(pid[:]))
}
// UnmarshalJSON decodes ProfileID from hex string.
//
//nolint:govet
func (pid *ProfileID) UnmarshalJSON(iter *json.Iterator) {
*pid = [profileIDSize]byte{}
unmarshalJSON(pid[:], iter)
}
func GenTestProfileID() *ProfileID {
pid := ProfileID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
return &pid
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
// EncodeVarint encodes the variant at the end of the buffer.
func EncodeVarint(buf []byte, offset int, v uint64) int {
offset -= Sov(v)
base := offset
for v >= 1<<7 {
//nolint:gosec
buf[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
buf[offset] = uint8(v)
return base
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
import (
"math/bits"
)
func Sov(x uint64) (n int) {
return (bits.Len64(x|1) + 6) / 7
}
func Soz(x uint64) (n int) {
//nolint:gosec
return Sov((x << 1) ^ uint64((int64(x) >> 63)))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package proto // import "go.opentelemetry.io/collector/pdata/internal/proto"
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
// WireType represents the proto wire type.
type WireType int8
const (
WireTypeVarint WireType = 0
WireTypeI64 WireType = 1
WireTypeLen WireType = 2
WireTypeStartGroup WireType = 3
WireTypeEndGroup WireType = 4
WireTypeI32 WireType = 5
)
var (
ErrInvalidLength = errors.New("proto: negative length found during unmarshaling")
ErrIntOverflow = errors.New("proto: integer overflow")
ErrUnexpectedEndOfGroup = errors.New("proto: unexpected end of group")
)
// ConsumeUnknown parses buf starting at pos as a wireType field, reporting the new position.
func ConsumeUnknown(buf []byte, pos int, wireType WireType) (int, error) {
var err error
l := len(buf)
depth := 0
for pos < l {
switch wireType {
case WireTypeVarint:
_, pos, err = ConsumeVarint(buf, pos)
return pos, err
case WireTypeI64:
_, pos, err = ConsumeI64(buf, pos)
return pos, err
case WireTypeLen:
_, pos, err = ConsumeLen(buf, pos)
return pos, err
case WireTypeStartGroup:
depth++
case WireTypeEndGroup:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroup
}
depth--
case WireTypeI32:
_, pos, err = ConsumeI32(buf, pos)
return pos, err
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
// Only when parsing a group can be here, if done return otherwise parse more tags.
if depth == 0 {
return pos, nil
}
// If in a group parsing, move to the next tag.
_, wireType, pos, err = ConsumeTag(buf, pos)
if err != nil {
return 0, err
}
}
return 0, io.ErrUnexpectedEOF
}
// ConsumeI64 parses buf starting at pos as a WireTypeI64 field, reporting the value and the new position.
func ConsumeI64(buf []byte, pos int) (uint64, int, error) {
pos += 8
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return binary.LittleEndian.Uint64(buf[pos-8:]), pos, nil
}
// ConsumeLen parses buf starting at pos as a WireTypeLen field, reporting the len and the new position.
func ConsumeLen(buf []byte, pos int) (int, int, error) {
var num uint64
var err error
num, pos, err = ConsumeVarint(buf, pos)
if err != nil {
return 0, 0, err
}
//nolint:gosec
length := int(num)
if length < 0 {
return 0, 0, ErrInvalidLength
}
pos += length
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return length, pos, nil
}
// ConsumeI32 parses buf starting at pos as a WireTypeI32 field, reporting the value and the new position.
func ConsumeI32(buf []byte, pos int) (uint32, int, error) {
pos += 4
if pos < 0 || pos > len(buf) {
return 0, 0, io.ErrUnexpectedEOF
}
return binary.LittleEndian.Uint32(buf[pos-4:]), pos, nil
}
// ConsumeTag parses buf starting at pos as a varint-encoded tag, reporting the new position.
func ConsumeTag(buf []byte, pos int) (int32, WireType, int, error) {
tag, pos, err := ConsumeVarint(buf, pos)
if err != nil {
return 0, 0, 0, err
}
//nolint:gosec
fieldNum := int32(tag >> 3)
//nolint:gosec
wireType := int8(tag & 0x7)
if fieldNum <= 0 {
return 0, 0, 0, fmt.Errorf("proto: Link: illegal field=%d (tag=%d, pos=%d)", fieldNum, tag, pos)
}
return fieldNum, WireType(wireType), pos, nil
}
// ConsumeVarint parses buf starting at pos as a varint-encoded uint64, reporting the new position.
func ConsumeVarint(buf []byte, pos int) (uint64, int, error) {
l := len(buf)
var num uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, 0, ErrIntOverflow
}
if pos >= l {
return 0, 0, io.ErrUnexpectedEOF
}
b := buf[pos]
pos++
num |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
return num, pos, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"encoding/hex"
"errors"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const spanIDSize = 8
var errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length")
// SpanID is a custom data type that is used for all span_id fields in OTLP
// Protobuf messages.
type SpanID [spanIDSize]byte
func DeleteSpanID(*SpanID, bool) {}
func CopySpanID(dest, src *SpanID) {
*dest = *src
}
// IsEmpty returns true if id contains at least one non-zero byte.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
// SizeProto returns the size of the data to serialize in proto format.
func (sid SpanID) SizeProto() int {
if sid.IsEmpty() {
return 0
}
return spanIDSize
}
// MarshalProto converts span ID into a binary representation. Called by Protobuf serialization.
func (sid SpanID) MarshalProto(buf []byte) int {
if sid.IsEmpty() {
return 0
}
return copy(buf[len(buf)-spanIDSize:], sid[:])
}
// UnmarshalProto inflates this span ID from binary representation. Called by Protobuf serialization.
func (sid *SpanID) UnmarshalProto(data []byte) error {
if len(data) == 0 {
*sid = [spanIDSize]byte{}
return nil
}
if len(data) != spanIDSize {
return errUnmarshalSpanID
}
copy(sid[:], data)
return nil
}
// MarshalJSON converts SpanID into a hex string.
//
//nolint:govet
func (sid SpanID) MarshalJSON(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(sid[:]))
}
// UnmarshalJSON decodes SpanID from hex string.
//
//nolint:govet
func (sid *SpanID) UnmarshalJSON(iter *json.Iterator) {
*sid = [spanIDSize]byte{}
unmarshalJSON(sid[:], iter)
}
func GenTestSpanID() *SpanID {
sid := SpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1})
return &sid
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"sync/atomic"
"go.opentelemetry.io/collector/featuregate"
)
var _ = featuregate.GlobalRegistry().MustRegister(
"pdata.useCustomProtoEncoding",
featuregate.StageStable,
featuregate.WithRegisterDescription("When enabled, enable custom proto encoding. This is required step to enable featuregate pdata.useProtoPooling."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
featuregate.WithRegisterFromVersion("v0.133.0"),
featuregate.WithRegisterToVersion("v0.137.0"),
)
var UseProtoPooling = featuregate.GlobalRegistry().MustRegister(
"pdata.useProtoPooling",
featuregate.StageAlpha,
featuregate.WithRegisterDescription("When enabled, enable using local memory pools for underlying data that the pdata messages are pushed to."),
featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/13631"),
featuregate.WithRegisterFromVersion("v0.133.0"),
)
// State defines an ownership state of pmetric.Metrics, plog.Logs or ptrace.Traces.
type State struct {
refs atomic.Int32
state uint32
}
const (
defaultState uint32 = 0
stateReadOnlyBit = uint32(1 << 0)
statePipelineOwnedBit = uint32(1 << 1)
)
func NewState() *State {
st := &State{
state: defaultState,
}
st.refs.Store(1)
return st
}
func (st *State) MarkReadOnly() {
st.state |= stateReadOnlyBit
}
func (st *State) IsReadOnly() bool {
return st.state&stateReadOnlyBit != 0
}
// AssertMutable panics if the state is not StateMutable.
func (st *State) AssertMutable() {
if st.state&stateReadOnlyBit != 0 {
panic("invalid access to shared data")
}
}
// MarkPipelineOwned marks the data as owned by the pipeline, returns true if the data were
// previously not owned by the pipeline, otherwise false.
func (st *State) MarkPipelineOwned() bool {
if st.state&statePipelineOwnedBit != 0 {
return false
}
st.state |= statePipelineOwnedBit
return true
}
// Ref add one to the count of active references.
func (st *State) Ref() {
st.refs.Add(1)
}
// Unref returns true if reference count got to 0 which means no more active references,
// otherwise it returns false.
func (st *State) Unref() bool {
refs := st.refs.Add(-1)
switch {
case refs > 0:
return false
case refs == 0:
return true
default:
panic("Cannot unref freed data")
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
import (
"encoding/hex"
"errors"
"go.opentelemetry.io/collector/pdata/internal/json"
)
const traceIDSize = 16
var errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length")
// TraceID is a custom data type that is used for all trace_id fields in OTLP
// Protobuf messages.
type TraceID [traceIDSize]byte
func DeleteTraceID(*TraceID, bool) {}
func CopyTraceID(dest, src *TraceID) {
*dest = *src
}
// IsEmpty returns true if id contains at leas one non-zero byte.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
// SizeProto returns the size of the data to serialize in proto format.
func (tid TraceID) SizeProto() int {
if tid.IsEmpty() {
return 0
}
return traceIDSize
}
// MarshalProto converts trace ID into a binary representation. Called by Protobuf serialization.
func (tid TraceID) MarshalProto(buf []byte) int {
if tid.IsEmpty() {
return 0
}
return copy(buf[len(buf)-traceIDSize:], tid[:])
}
// UnmarshalProto inflates this trace ID from binary representation. Called by Protobuf serialization.
func (tid *TraceID) UnmarshalProto(buf []byte) error {
if len(buf) == 0 {
*tid = [traceIDSize]byte{}
return nil
}
if len(buf) != traceIDSize {
return errUnmarshalTraceID
}
copy(tid[:], buf)
return nil
}
// MarshalJSON converts TraceID into a hex string.
//
//nolint:govet
func (tid TraceID) MarshalJSON(dest *json.Stream) {
dest.WriteString(hex.EncodeToString(tid[:]))
}
// UnmarshalJSON decodes TraceID from hex string.
//
//nolint:govet
func (tid *TraceID) UnmarshalJSON(iter *json.Iterator) {
*tid = [profileIDSize]byte{}
unmarshalJSON(tid[:], iter)
}
func GenTestTraceID() *TraceID {
tid := TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})
return &tid
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
// LogsToProto internal helper to convert Logs to protobuf representation.
func LogsToProto(l LogsWrapper) LogsData {
return LogsData{
ResourceLogs: l.orig.ResourceLogs,
}
}
// LogsFromProto internal helper to convert protobuf representation to Logs.
// This function set exclusive state assuming that it's called only once per Logs.
func LogsFromProto(orig LogsData) LogsWrapper {
return NewLogsWrapper(&ExportLogsServiceRequest{
ResourceLogs: orig.ResourceLogs,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
type MapWrapper struct {
orig *[]KeyValue
state *State
}
func GetMapOrig(ms MapWrapper) *[]KeyValue {
return ms.orig
}
func GetMapState(ms MapWrapper) *State {
return ms.state
}
func NewMapWrapper(orig *[]KeyValue, state *State) MapWrapper {
return MapWrapper{orig: orig, state: state}
}
func GenTestMapWrapper() MapWrapper {
orig := GenTestKeyValueSlice()
return NewMapWrapper(&orig, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
// MetricsToProto internal helper to convert Metrics to protobuf representation.
func MetricsToProto(l MetricsWrapper) MetricsData {
return MetricsData{
ResourceMetrics: l.orig.ResourceMetrics,
}
}
// MetricsFromProto internal helper to convert protobuf representation to Metrics.
// This function set exclusive state assuming that it's called only once per Metrics.
func MetricsFromProto(orig MetricsData) MetricsWrapper {
return NewMetricsWrapper(&ExportMetricsServiceRequest{
ResourceMetrics: orig.ResourceMetrics,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
// ProfilesToProto internal helper to convert Profiles to protobuf representation.
func ProfilesToProto(l ProfilesWrapper) ProfilesData {
return ProfilesData{
ResourceProfiles: l.orig.ResourceProfiles,
Dictionary: l.orig.Dictionary,
}
}
// ProfilesFromProto internal helper to convert protobuf representation to Profiles.
// This function set exclusive state assuming that it's called only once per Profiles.
func ProfilesFromProto(orig ProfilesData) ProfilesWrapper {
return NewProfilesWrapper(&ExportProfilesServiceRequest{
ResourceProfiles: orig.ResourceProfiles,
Dictionary: orig.Dictionary,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
// TracesToProto internal helper to convert Traces to protobuf representation.
func TracesToProto(l TracesWrapper) TracesData {
return TracesData{
ResourceSpans: l.orig.ResourceSpans,
}
}
// TracesFromProto internal helper to convert protobuf representation to Traces.
// This function set exclusive state assuming that it's called only once per Traces.
func TracesFromProto(orig TracesData) TracesWrapper {
return NewTracesWrapper(&ExportTraceServiceRequest{
ResourceSpans: orig.ResourceSpans,
}, NewState())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
type TraceStateWrapper struct {
orig *string
state *State
}
func GetTraceStateOrig(ms TraceStateWrapper) *string {
return ms.orig
}
func GetTraceStateState(ms TraceStateWrapper) *State {
return ms.state
}
func NewTraceStateWrapper(orig *string, state *State) TraceStateWrapper {
return TraceStateWrapper{orig: orig, state: state}
}
func GenTestTraceStateWrapper() TraceStateWrapper {
return NewTraceStateWrapper(GenTestTraceState(), NewState())
}
func GenTestTraceState() *string {
orig := new(string)
*orig = "rojo=00f067aa0ba902b7"
return orig
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/collector/pdata/internal"
type ValueWrapper struct {
orig *AnyValue
state *State
}
func GetValueOrig(ms ValueWrapper) *AnyValue {
return ms.orig
}
func GetValueState(ms ValueWrapper) *State {
return ms.state
}
func NewValueWrapper(orig *AnyValue, state *State) ValueWrapper {
return ValueWrapper{orig: orig, state: state}
}
func GenTestValueWrapper() ValueWrapper {
orig := GenTestAnyValue()
return NewValueWrapper(orig, NewState())
}
func NewAnyValueStringValue() *AnyValue_StringValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_StringValue{}
}
return ProtoPoolAnyValue_StringValue.Get().(*AnyValue_StringValue)
}
func NewAnyValueIntValue() *AnyValue_IntValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_IntValue{}
}
return ProtoPoolAnyValue_IntValue.Get().(*AnyValue_IntValue)
}
func NewAnyValueBoolValue() *AnyValue_BoolValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_BoolValue{}
}
return ProtoPoolAnyValue_BoolValue.Get().(*AnyValue_BoolValue)
}
func NewAnyValueDoubleValue() *AnyValue_DoubleValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_DoubleValue{}
}
return ProtoPoolAnyValue_DoubleValue.Get().(*AnyValue_DoubleValue)
}
func NewAnyValueBytesValue() *AnyValue_BytesValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_BytesValue{}
}
return ProtoPoolAnyValue_BytesValue.Get().(*AnyValue_BytesValue)
}
func NewAnyValueArrayValue() *AnyValue_ArrayValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_ArrayValue{}
}
return ProtoPoolAnyValue_ArrayValue.Get().(*AnyValue_ArrayValue)
}
func NewAnyValueKvlistValue() *AnyValue_KvlistValue {
if !UseProtoPooling.IsEnabled() {
return &AnyValue_KvlistValue{}
}
return ProtoPoolAnyValue_KvlistValue.Get().(*AnyValue_KvlistValue)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// ByteSlice represents a []byte slice.
// The instance of ByteSlice can be assigned to multiple objects since it's immutable.
//
// Must use NewByteSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ByteSlice internal.ByteSliceWrapper
func (ms ByteSlice) getOrig() *[]byte {
return internal.GetByteSliceOrig(internal.ByteSliceWrapper(ms))
}
func (ms ByteSlice) getState() *internal.State {
return internal.GetByteSliceState(internal.ByteSliceWrapper(ms))
}
// NewByteSlice creates a new empty ByteSlice.
func NewByteSlice() ByteSlice {
orig := []byte(nil)
return ByteSlice(internal.NewByteSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []byte slice.
func (ms ByteSlice) AsRaw() []byte {
return copyByteSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []byte into the slice ByteSlice.
func (ms ByteSlice) FromRaw(val []byte) {
ms.getState().AssertMutable()
*ms.getOrig() = copyByteSlice(*ms.getOrig(), val)
}
// Len returns length of the []byte slice value.
// Equivalent of len(byteSlice).
func (ms ByteSlice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of byteSlice[i].
func (ms ByteSlice) At(i int) byte {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms ByteSlice) All() iter.Seq2[int, byte] {
return func(yield func(int, byte) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets byte item at particular index.
// Equivalent of byteSlice[i] = val
func (ms ByteSlice) SetAt(i int, val byte) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures ByteSlice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]byte, len(byteSlice), newCap)
// copy(buf, byteSlice)
// byteSlice = buf
func (ms ByteSlice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]byte, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to ByteSlice.
// Equivalent of byteSlice = append(byteSlice, elms...)
func (ms ByteSlice) Append(elms ...byte) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms ByteSlice) MoveTo(dest ByteSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms ByteSlice) MoveAndAppendTo(dest ByteSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms ByteSlice) RemoveIf(f func(byte) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero byte
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms ByteSlice) CopyTo(dest ByteSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another ByteSlice
func (ms ByteSlice) Equal(val ByteSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyByteSlice(dst, src []byte) []byte {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Float64Slice represents a []float64 slice.
// The instance of Float64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewFloat64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Float64Slice internal.Float64SliceWrapper
func (ms Float64Slice) getOrig() *[]float64 {
return internal.GetFloat64SliceOrig(internal.Float64SliceWrapper(ms))
}
func (ms Float64Slice) getState() *internal.State {
return internal.GetFloat64SliceState(internal.Float64SliceWrapper(ms))
}
// NewFloat64Slice creates a new empty Float64Slice.
func NewFloat64Slice() Float64Slice {
orig := []float64(nil)
return Float64Slice(internal.NewFloat64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []float64 slice.
func (ms Float64Slice) AsRaw() []float64 {
return copyFloat64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []float64 into the slice Float64Slice.
func (ms Float64Slice) FromRaw(val []float64) {
ms.getState().AssertMutable()
*ms.getOrig() = copyFloat64Slice(*ms.getOrig(), val)
}
// Len returns length of the []float64 slice value.
// Equivalent of len(float64Slice).
func (ms Float64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of float64Slice[i].
func (ms Float64Slice) At(i int) float64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Float64Slice) All() iter.Seq2[int, float64] {
return func(yield func(int, float64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets float64 item at particular index.
// Equivalent of float64Slice[i] = val
func (ms Float64Slice) SetAt(i int, val float64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Float64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]float64, len(float64Slice), newCap)
// copy(buf, float64Slice)
// float64Slice = buf
func (ms Float64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]float64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Float64Slice.
// Equivalent of float64Slice = append(float64Slice, elms...)
func (ms Float64Slice) Append(elms ...float64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Float64Slice) MoveTo(dest Float64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Float64Slice) MoveAndAppendTo(dest Float64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Float64Slice) RemoveIf(f func(float64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero float64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Float64Slice) CopyTo(dest Float64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Float64Slice
func (ms Float64Slice) Equal(val Float64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyFloat64Slice(dst, src []float64) []float64 {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// InstrumentationScope is a message representing the instrumentation scope information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewInstrumentationScope function to create new instances.
// Important: zero-initialized instance is not valid for use.
type InstrumentationScope internal.InstrumentationScopeWrapper
func newInstrumentationScope(orig *internal.InstrumentationScope, state *internal.State) InstrumentationScope {
return InstrumentationScope(internal.NewInstrumentationScopeWrapper(orig, state))
}
// NewInstrumentationScope creates a new empty InstrumentationScope.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewInstrumentationScope() InstrumentationScope {
return newInstrumentationScope(internal.NewInstrumentationScope(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteInstrumentationScope(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Name returns the name associated with this InstrumentationScope.
func (ms InstrumentationScope) Name() string {
return ms.getOrig().Name
}
// SetName replaces the name associated with this InstrumentationScope.
func (ms InstrumentationScope) SetName(v string) {
ms.getState().AssertMutable()
ms.getOrig().Name = v
}
// Version returns the version associated with this InstrumentationScope.
func (ms InstrumentationScope) Version() string {
return ms.getOrig().Version
}
// SetVersion replaces the version associated with this InstrumentationScope.
func (ms InstrumentationScope) SetVersion(v string) {
ms.getState().AssertMutable()
ms.getOrig().Version = v
}
// Attributes returns the Attributes associated with this InstrumentationScope.
func (ms InstrumentationScope) Attributes() Map {
return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope.
func (ms InstrumentationScope) DroppedAttributesCount() uint32 {
return ms.getOrig().DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this InstrumentationScope.
func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) {
ms.getState().AssertMutable()
ms.getOrig().DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) {
dest.getState().AssertMutable()
internal.CopyInstrumentationScope(dest.getOrig(), ms.getOrig())
}
func (ms InstrumentationScope) getOrig() *internal.InstrumentationScope {
return internal.GetInstrumentationScopeOrig(internal.InstrumentationScopeWrapper(ms))
}
func (ms InstrumentationScope) getState() *internal.State {
return internal.GetInstrumentationScopeState(internal.InstrumentationScopeWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Int32Slice represents a []int32 slice.
// The instance of Int32Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewInt32Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Int32Slice internal.Int32SliceWrapper
func (ms Int32Slice) getOrig() *[]int32 {
return internal.GetInt32SliceOrig(internal.Int32SliceWrapper(ms))
}
func (ms Int32Slice) getState() *internal.State {
return internal.GetInt32SliceState(internal.Int32SliceWrapper(ms))
}
// NewInt32Slice creates a new empty Int32Slice.
func NewInt32Slice() Int32Slice {
orig := []int32(nil)
return Int32Slice(internal.NewInt32SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int32 slice.
func (ms Int32Slice) AsRaw() []int32 {
return copyInt32Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int32 into the slice Int32Slice.
func (ms Int32Slice) FromRaw(val []int32) {
ms.getState().AssertMutable()
*ms.getOrig() = copyInt32Slice(*ms.getOrig(), val)
}
// Len returns length of the []int32 slice value.
// Equivalent of len(int32Slice).
func (ms Int32Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of int32Slice[i].
func (ms Int32Slice) At(i int) int32 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Int32Slice) All() iter.Seq2[int, int32] {
return func(yield func(int, int32) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets int32 item at particular index.
// Equivalent of int32Slice[i] = val
func (ms Int32Slice) SetAt(i int, val int32) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Int32Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]int32, len(int32Slice), newCap)
// copy(buf, int32Slice)
// int32Slice = buf
func (ms Int32Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]int32, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Int32Slice.
// Equivalent of int32Slice = append(int32Slice, elms...)
func (ms Int32Slice) Append(elms ...int32) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Int32Slice) MoveTo(dest Int32Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Int32Slice) MoveAndAppendTo(dest Int32Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Int32Slice) RemoveIf(f func(int32) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero int32
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int32Slice) CopyTo(dest Int32Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int32Slice
func (ms Int32Slice) Equal(val Int32Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyInt32Slice(dst, src []int32) []int32 {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// Int64Slice represents a []int64 slice.
// The instance of Int64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Int64Slice internal.Int64SliceWrapper
func (ms Int64Slice) getOrig() *[]int64 {
return internal.GetInt64SliceOrig(internal.Int64SliceWrapper(ms))
}
func (ms Int64Slice) getState() *internal.State {
return internal.GetInt64SliceState(internal.Int64SliceWrapper(ms))
}
// NewInt64Slice creates a new empty Int64Slice.
func NewInt64Slice() Int64Slice {
orig := []int64(nil)
return Int64Slice(internal.NewInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []int64 slice.
func (ms Int64Slice) AsRaw() []int64 {
return copyInt64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []int64 into the slice Int64Slice.
func (ms Int64Slice) FromRaw(val []int64) {
ms.getState().AssertMutable()
*ms.getOrig() = copyInt64Slice(*ms.getOrig(), val)
}
// Len returns length of the []int64 slice value.
// Equivalent of len(int64Slice).
func (ms Int64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of int64Slice[i].
func (ms Int64Slice) At(i int) int64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms Int64Slice) All() iter.Seq2[int, int64] {
return func(yield func(int, int64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets int64 item at particular index.
// Equivalent of int64Slice[i] = val
func (ms Int64Slice) SetAt(i int, val int64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures Int64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]int64, len(int64Slice), newCap)
// copy(buf, int64Slice)
// int64Slice = buf
func (ms Int64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]int64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to Int64Slice.
// Equivalent of int64Slice = append(int64Slice, elms...)
func (ms Int64Slice) Append(elms ...int64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms Int64Slice) MoveTo(dest Int64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms Int64Slice) MoveAndAppendTo(dest Int64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms Int64Slice) RemoveIf(f func(int64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero int64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms Int64Slice) CopyTo(dest Int64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another Int64Slice
func (ms Int64Slice) Equal(val Int64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyInt64Slice(dst, src []int64) []int64 {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Resource is a message representing the resource information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResource function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Resource internal.ResourceWrapper
func newResource(orig *internal.Resource, state *internal.State) Resource {
return Resource(internal.NewResourceWrapper(orig, state))
}
// NewResource creates a new empty Resource.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResource() Resource {
return newResource(internal.NewResource(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Resource) MoveTo(dest Resource) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteResource(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// Attributes returns the Attributes associated with this Resource.
func (ms Resource) Attributes() Map {
return Map(internal.NewMapWrapper(&ms.getOrig().Attributes, ms.getState()))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Resource.
func (ms Resource) DroppedAttributesCount() uint32 {
return ms.getOrig().DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Resource.
func (ms Resource) SetDroppedAttributesCount(v uint32) {
ms.getState().AssertMutable()
ms.getOrig().DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Resource) CopyTo(dest Resource) {
dest.getState().AssertMutable()
internal.CopyResource(dest.getOrig(), ms.getOrig())
}
func (ms Resource) getOrig() *internal.Resource {
return internal.GetResourceOrig(internal.ResourceWrapper(ms))
}
func (ms Resource) getState() *internal.State {
return internal.GetResourceState(internal.ResourceWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
)
// Slice logically represents a slice of Value.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Slice internal.SliceWrapper
func newSlice(orig *[]internal.AnyValue, state *internal.State) Slice {
return Slice(internal.NewSliceWrapper(orig, state))
}
// NewSlice creates a SliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSlice() Slice {
orig := []internal.AnyValue(nil)
return newSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSlice()".
func (es Slice) Len() int {
return len(*es.getOrig())
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es Slice) At(i int) Value {
return newValue(&(*es.getOrig())[i], es.getState())
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es Slice) All() iter.Seq2[int, Value] {
return func(yield func(int, Value) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new Slice can be initialized:
//
// es := NewSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es Slice) EnsureCapacity(newCap int) {
es.getState().AssertMutable()
oldCap := cap(*es.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]internal.AnyValue, len(*es.getOrig()), newCap)
copy(newOrig, *es.getOrig())
*es.getOrig() = newOrig
}
// AppendEmpty will append to the end of the slice an empty Value.
// It returns the newly added Value.
func (es Slice) AppendEmpty() Value {
es.getState().AssertMutable()
*es.getOrig() = append(*es.getOrig(), internal.AnyValue{})
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es Slice) MoveAndAppendTo(dest Slice) {
es.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.getOrig() == dest.getOrig() {
return
}
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *es.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...)
}
*es.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es Slice) RemoveIf(f func(Value) bool) {
es.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*es.getOrig()); i++ {
if f(es.At(i)) {
internal.DeleteAnyValue(&(*es.getOrig())[i], false)
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.getOrig())[newLen] = (*es.getOrig())[i]
(*es.getOrig())[i].Reset()
newLen++
}
*es.getOrig() = (*es.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es Slice) CopyTo(dest Slice) {
dest.getState().AssertMutable()
if es.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyAnyValueSlice(*dest.getOrig(), *es.getOrig())
}
func (ms Slice) getOrig() *[]internal.AnyValue {
return internal.GetSliceOrig(internal.SliceWrapper(ms))
}
func (ms Slice) getState() *internal.State {
return internal.GetSliceState(internal.SliceWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// StringSlice represents a []string slice.
// The instance of StringSlice can be assigned to multiple objects since it's immutable.
//
// Must use NewStringSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type StringSlice internal.StringSliceWrapper
func (ms StringSlice) getOrig() *[]string {
return internal.GetStringSliceOrig(internal.StringSliceWrapper(ms))
}
func (ms StringSlice) getState() *internal.State {
return internal.GetStringSliceState(internal.StringSliceWrapper(ms))
}
// NewStringSlice creates a new empty StringSlice.
func NewStringSlice() StringSlice {
orig := []string(nil)
return StringSlice(internal.NewStringSliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []string slice.
func (ms StringSlice) AsRaw() []string {
return copyStringSlice(nil, *ms.getOrig())
}
// FromRaw copies raw []string into the slice StringSlice.
func (ms StringSlice) FromRaw(val []string) {
ms.getState().AssertMutable()
*ms.getOrig() = copyStringSlice(*ms.getOrig(), val)
}
// Len returns length of the []string slice value.
// Equivalent of len(stringSlice).
func (ms StringSlice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of stringSlice[i].
func (ms StringSlice) At(i int) string {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms StringSlice) All() iter.Seq2[int, string] {
return func(yield func(int, string) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets string item at particular index.
// Equivalent of stringSlice[i] = val
func (ms StringSlice) SetAt(i int, val string) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures StringSlice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]string, len(stringSlice), newCap)
// copy(buf, stringSlice)
// stringSlice = buf
func (ms StringSlice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]string, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to StringSlice.
// Equivalent of stringSlice = append(stringSlice, elms...)
func (ms StringSlice) Append(elms ...string) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms StringSlice) MoveTo(dest StringSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms StringSlice) MoveAndAppendTo(dest StringSlice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms StringSlice) RemoveIf(f func(string) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero string
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms StringSlice) CopyTo(dest StringSlice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another StringSlice
func (ms StringSlice) Equal(val StringSlice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyStringSlice(dst, src []string) []string {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pcommon
import (
"iter"
"slices"
"go.opentelemetry.io/collector/pdata/internal"
)
// UInt64Slice represents a []uint64 slice.
// The instance of UInt64Slice can be assigned to multiple objects since it's immutable.
//
// Must use NewUInt64Slice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type UInt64Slice internal.UInt64SliceWrapper
func (ms UInt64Slice) getOrig() *[]uint64 {
return internal.GetUInt64SliceOrig(internal.UInt64SliceWrapper(ms))
}
func (ms UInt64Slice) getState() *internal.State {
return internal.GetUInt64SliceState(internal.UInt64SliceWrapper(ms))
}
// NewUInt64Slice creates a new empty UInt64Slice.
func NewUInt64Slice() UInt64Slice {
orig := []uint64(nil)
return UInt64Slice(internal.NewUInt64SliceWrapper(&orig, internal.NewState()))
}
// AsRaw returns a copy of the []uint64 slice.
func (ms UInt64Slice) AsRaw() []uint64 {
return copyUint64Slice(nil, *ms.getOrig())
}
// FromRaw copies raw []uint64 into the slice UInt64Slice.
func (ms UInt64Slice) FromRaw(val []uint64) {
ms.getState().AssertMutable()
*ms.getOrig() = copyUint64Slice(*ms.getOrig(), val)
}
// Len returns length of the []uint64 slice value.
// Equivalent of len(uInt64Slice).
func (ms UInt64Slice) Len() int {
return len(*ms.getOrig())
}
// At returns an item from particular index.
// Equivalent of uInt64Slice[i].
func (ms UInt64Slice) At(i int) uint64 {
return (*ms.getOrig())[i]
}
// All returns an iterator over index-value pairs in the slice.
func (ms UInt64Slice) All() iter.Seq2[int, uint64] {
return func(yield func(int, uint64) bool) {
for i := 0; i < ms.Len(); i++ {
if !yield(i, ms.At(i)) {
return
}
}
}
}
// SetAt sets uint64 item at particular index.
// Equivalent of uInt64Slice[i] = val
func (ms UInt64Slice) SetAt(i int, val uint64) {
ms.getState().AssertMutable()
(*ms.getOrig())[i] = val
}
// EnsureCapacity ensures UInt64Slice has at least the specified capacity.
// 1. If the newCap <= cap, then is no change in capacity.
// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of:
// buf := make([]uint64, len(uInt64Slice), newCap)
// copy(buf, uInt64Slice)
// uInt64Slice = buf
func (ms UInt64Slice) EnsureCapacity(newCap int) {
ms.getState().AssertMutable()
oldCap := cap(*ms.getOrig())
if newCap <= oldCap {
return
}
newOrig := make([]uint64, len(*ms.getOrig()), newCap)
copy(newOrig, *ms.getOrig())
*ms.getOrig() = newOrig
}
// Append appends extra elements to UInt64Slice.
// Equivalent of uInt64Slice = append(uInt64Slice, elms...)
func (ms UInt64Slice) Append(elms ...uint64) {
ms.getState().AssertMutable()
*ms.getOrig() = append(*ms.getOrig(), elms...)
}
// MoveTo moves all elements from the current slice overriding the destination and
// resetting the current instance to its zero value.
func (ms UInt64Slice) MoveTo(dest UInt64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = nil
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (ms UInt64Slice) MoveAndAppendTo(dest UInt64Slice) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
if *dest.getOrig() == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.getOrig() = *ms.getOrig()
} else {
*dest.getOrig() = append(*dest.getOrig(), *ms.getOrig()...)
}
*ms.getOrig() = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (ms UInt64Slice) RemoveIf(f func(uint64) bool) {
ms.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*ms.getOrig()); i++ {
if f((*ms.getOrig())[i]) {
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*ms.getOrig())[newLen] = (*ms.getOrig())[i]
var zero uint64
(*ms.getOrig())[i] = zero
newLen++
}
*ms.getOrig() = (*ms.getOrig())[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
dest.getState().AssertMutable()
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = copyUint64Slice(*dest.getOrig(), *ms.getOrig())
}
// Equal checks equality with another UInt64Slice
func (ms UInt64Slice) Equal(val UInt64Slice) bool {
return slices.Equal(*ms.getOrig(), *val.getOrig())
}
func copyUint64Slice(dst, src []uint64) []uint64 {
return append(dst[:0], src...)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"iter"
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
)
// Map stores a map of string keys to elements of Value type.
//
// Must use NewMap function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Map internal.MapWrapper
// NewMap creates a Map with 0 elements.
func NewMap() Map {
orig := []internal.KeyValue(nil)
return Map(internal.NewMapWrapper(&orig, internal.NewState()))
}
func (m Map) getOrig() *[]internal.KeyValue {
return internal.GetMapOrig(internal.MapWrapper(m))
}
func (m Map) getState() *internal.State {
return internal.GetMapState(internal.MapWrapper(m))
}
func newMap(orig *[]internal.KeyValue, state *internal.State) Map {
return Map(internal.NewMapWrapper(orig, state))
}
// Clear erases any existing entries in this Map instance.
func (m Map) Clear() {
m.getState().AssertMutable()
*m.getOrig() = nil
}
// EnsureCapacity increases the capacity of this Map instance, if necessary,
// to ensure that it can hold at least the number of elements specified by the capacity argument.
func (m Map) EnsureCapacity(capacity int) {
m.getState().AssertMutable()
oldOrig := *m.getOrig()
if capacity <= cap(oldOrig) {
return
}
*m.getOrig() = make([]internal.KeyValue, len(oldOrig), capacity)
copy(*m.getOrig(), oldOrig)
}
// Get returns the Value associated with the key and true. The returned
// Value is not a copy, it is a reference to the value stored in this map.
// It is allowed to modify the returned value using Value.Set* functions.
// Such modification will be applied to the value stored in this map.
// Accessing the returned value after modifying the underlying map
// (removing or adding new values) is an undefined behavior.
//
// If the key does not exist, returns a zero-initialized KeyValue and false.
// Calling any functions on the returned invalid instance may cause a panic.
func (m Map) Get(key string) (Value, bool) {
for i := range *m.getOrig() {
akv := &(*m.getOrig())[i]
if akv.Key == key {
return newValue(&akv.Value, m.getState()), true
}
}
return newValue(nil, m.getState()), false
}
// Remove removes the entry associated with the key and returns true if the key
// was present in the map, otherwise returns false.
func (m Map) Remove(key string) bool {
m.getState().AssertMutable()
for i := range *m.getOrig() {
akv := &(*m.getOrig())[i]
if akv.Key == key {
*akv = (*m.getOrig())[len(*m.getOrig())-1]
*m.getOrig() = (*m.getOrig())[:len(*m.getOrig())-1]
return true
}
}
return false
}
// RemoveIf removes the entries for which the function in question returns true
func (m Map) RemoveIf(f func(string, Value) bool) {
m.getState().AssertMutable()
newLen := 0
for i := 0; i < len(*m.getOrig()); i++ {
if f((*m.getOrig())[i].Key, newValue(&(*m.getOrig())[i].Value, m.getState())) {
(*m.getOrig())[i] = internal.KeyValue{}
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*m.getOrig())[newLen] = (*m.getOrig())[i]
(*m.getOrig())[i] = internal.KeyValue{}
newLen++
}
*m.getOrig() = (*m.getOrig())[:newLen]
}
// PutEmpty inserts or updates an empty value to the map under given key
// and return the updated/inserted value.
func (m Map) PutEmpty(k string) Value {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.getOrig().Value = nil
return newValue(av.getOrig(), m.getState())
}
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState())
}
// GetOrPutEmpty returns the Value associated with the key and true (loaded) if the key exists in the map,
// otherwise inserts an empty value to the map under the given key and returns the inserted value
// and false (loaded).
func (m Map) GetOrPutEmpty(k string) (Value, bool) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av, true
}
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k})
return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()), false
}
// PutStr performs the Insert or Update action. The Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutStr(k, v string) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetStr(v)
return
}
ov := internal.NewAnyValueStringValue()
ov.StringValue = v
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutInt performs the Insert or Update action. The int Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutInt(k string, v int64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetInt(v)
return
}
ov := internal.NewAnyValueIntValue()
ov.IntValue = v
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutDouble performs the Insert or Update action. The double Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutDouble(k string, v float64) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetDouble(v)
return
}
ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = v
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutBool performs the Insert or Update action. The bool Value is
// inserted to the map that did not originally have the key. The key/value is
// updated to the map where the key already existed.
func (m Map) PutBool(k string, v bool) {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
av.SetBool(v)
return
}
ov := internal.NewAnyValueBoolValue()
ov.BoolValue = v
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
}
// PutEmptyBytes inserts or updates an empty byte slice under given key and returns it.
func (m Map) PutEmptyBytes(k string) ByteSlice {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptyBytes()
}
ov := internal.NewAnyValueBytesValue()
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
return ByteSlice(internal.NewByteSliceWrapper(&ov.BytesValue, m.getState()))
}
// PutEmptyMap inserts or updates an empty map under given key and returns it.
func (m Map) PutEmptyMap(k string) Map {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptyMap()
}
ov := internal.NewAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
return Map(internal.NewMapWrapper(&ov.KvlistValue.Values, m.getState()))
}
// PutEmptySlice inserts or updates an empty slice under given key and returns it.
func (m Map) PutEmptySlice(k string) Slice {
m.getState().AssertMutable()
if av, existing := m.Get(k); existing {
return av.SetEmptySlice()
}
ov := internal.NewAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
*m.getOrig() = append(*m.getOrig(), internal.KeyValue{Key: k, Value: internal.AnyValue{Value: ov}})
return Slice(internal.NewSliceWrapper(&ov.ArrayValue.Values, m.getState()))
}
// Len returns the length of this map.
//
// Because the Map is represented internally by a slice of pointers, and the data are comping from the wire,
// it is possible that when iterating using "Range" to get access to fewer elements because nil elements are skipped.
func (m Map) Len() int {
return len(*m.getOrig())
}
// Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration.
//
// Example:
//
// sm.Range(func(k string, v Value) bool {
// ...
// })
func (m Map) Range(f func(k string, v Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
if !f(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
break
}
}
}
// All returns an iterator over key-value pairs in the Map.
//
// for k, v := range es.All() {
// ... // Do something with key-value pair
// }
func (m Map) All() iter.Seq2[string, Value] {
return func(yield func(string, Value) bool) {
for i := range *m.getOrig() {
kv := &(*m.getOrig())[i]
if !yield(kv.Key, Value(internal.NewValueWrapper(&kv.Value, m.getState()))) {
return
}
}
}
}
// MoveTo moves all key/values from the current map overriding the destination and
// resetting the current instance to its zero value
func (m Map) MoveTo(dest Map) {
m.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if m.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *m.getOrig()
*m.getOrig() = nil
}
// CopyTo copies all elements from the current map overriding the destination.
func (m Map) CopyTo(dest Map) {
dest.getState().AssertMutable()
if m.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = internal.CopyKeyValueSlice(*dest.getOrig(), *m.getOrig())
}
// AsRaw returns a standard go map representation of this Map.
func (m Map) AsRaw() map[string]any {
rawMap := make(map[string]any, m.Len())
m.Range(func(k string, v Value) bool {
rawMap[k] = v.AsRaw()
return true
})
return rawMap
}
// FromRaw overrides this Map instance from a standard go map.
func (m Map) FromRaw(rawMap map[string]any) error {
m.getState().AssertMutable()
if len(rawMap) == 0 {
*m.getOrig() = nil
return nil
}
var errs error
origs := make([]internal.KeyValue, len(rawMap))
ix := 0
for k, iv := range rawMap {
origs[ix].Key = k
errs = multierr.Append(errs, newValue(&origs[ix].Value, m.getState()).FromRaw(iv))
ix++
}
*m.getOrig() = origs
return errs
}
// Equal checks equality with another Map
func (m Map) Equal(val Map) bool {
if m.Len() != val.Len() {
return false
}
fullEqual := true
m.Range(func(k string, v Value) bool {
vv, ok := val.Get(k)
if !ok {
fullEqual = false
return fullEqual
}
if !v.Equal(vv) {
fullEqual = false
}
return fullEqual
})
return fullEqual
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
)
// AsRaw return []any copy of the Slice.
func (es Slice) AsRaw() []any {
rawSlice := make([]any, 0, es.Len())
for i := 0; i < es.Len(); i++ {
rawSlice = append(rawSlice, es.At(i).AsRaw())
}
return rawSlice
}
// FromRaw copies []any into the Slice.
func (es Slice) FromRaw(rawSlice []any) error {
es.getState().AssertMutable()
if len(rawSlice) == 0 {
*es.getOrig() = nil
return nil
}
var errs error
origs := make([]internal.AnyValue, len(rawSlice))
for ix, iv := range rawSlice {
errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv))
}
*es.getOrig() = origs
return errs
}
// Equal checks equality with another Slice
func (es Slice) Equal(val Slice) bool {
if es.Len() != val.Len() {
return false
}
for i := 0; i < es.Len(); i++ {
if !es.At(i).Equal(val.At(i)) {
return false
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal"
)
var emptySpanID = SpanID([8]byte{})
// SpanID is span identifier.
type SpanID [8]byte
// NewSpanIDEmpty returns a new empty (all zero bytes) SpanID.
func NewSpanIDEmpty() SpanID {
return emptySpanID
}
// String returns string representation of the SpanID.
//
// Important: Don't rely on this method to get a string identifier of SpanID,
// Use hex.EncodeToString explicitly instead.
// This method meant to implement Stringer interface for display purposes only.
func (ms SpanID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms SpanID) IsEmpty() bool {
return internal.SpanID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"time"
)
// Timestamp is a time specified as UNIX Epoch time in nanoseconds since
// 1970-01-01 00:00:00 +0000 UTC.
type Timestamp uint64
// NewTimestampFromTime constructs a new Timestamp from the provided time.Time.
func NewTimestampFromTime(t time.Time) Timestamp {
//nolint:gosec
return Timestamp(uint64(t.UnixNano()))
}
// AsTime converts this to a time.Time.
func (ts Timestamp) AsTime() time.Time {
//nolint:gosec
return time.Unix(0, int64(ts)).UTC()
}
// String returns the string representation of this in UTC.
func (ts Timestamp) String() string {
return ts.AsTime().String()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// TraceState represents the trace state from the w3c-trace-context.
//
// Must use NewTraceState function to create new instances.
// Important: zero-initialized instance is not valid for use.
type TraceState internal.TraceStateWrapper
func NewTraceState() TraceState {
return TraceState(internal.NewTraceStateWrapper(new(string), internal.NewState()))
}
func (ms TraceState) getOrig() *string {
return internal.GetTraceStateOrig(internal.TraceStateWrapper(ms))
}
func (ms TraceState) getState() *internal.State {
return internal.GetTraceStateState(internal.TraceStateWrapper(ms))
}
// AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
func (ms TraceState) AsRaw() string {
return *ms.getOrig()
}
// FromRaw copies the string representation in w3c-trace-context format of the tracestate into this TraceState.
func (ms TraceState) FromRaw(v string) {
ms.getState().AssertMutable()
*ms.getOrig() = v
}
// MoveTo moves the TraceState instance overriding the destination
// and resetting the current instance to its zero value.
func (ms TraceState) MoveTo(dest TraceState) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *ms.getOrig()
*ms.getOrig() = ""
}
// CopyTo copies the TraceState instance overriding the destination.
func (ms TraceState) CopyTo(dest TraceState) {
dest.getState().AssertMutable()
*dest.getOrig() = *ms.getOrig()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal"
)
var emptyTraceID = TraceID([16]byte{})
// TraceID is a trace identifier.
type TraceID [16]byte
// NewTraceIDEmpty returns a new empty (all zero bytes) TraceID.
func NewTraceIDEmpty() TraceID {
return emptyTraceID
}
// String returns string representation of the TraceID.
//
// Important: Don't rely on this method to get a string identifier of TraceID.
// Use hex.EncodeToString explicitly instead.
// This method meant to implement Stringer interface for display purposes only.
func (ms TraceID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms TraceID) IsEmpty() bool {
return internal.TraceID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
"encoding/base64"
"encoding/json"
"fmt"
"math"
"strconv"
"go.opentelemetry.io/collector/pdata/internal"
)
// ValueType specifies the type of Value.
type ValueType int32
const (
ValueTypeEmpty ValueType = iota
ValueTypeStr
ValueTypeInt
ValueTypeDouble
ValueTypeBool
ValueTypeMap
ValueTypeSlice
ValueTypeBytes
)
// String returns the string representation of the ValueType.
func (avt ValueType) String() string {
switch avt {
case ValueTypeEmpty:
return "Empty"
case ValueTypeStr:
return "Str"
case ValueTypeBool:
return "Bool"
case ValueTypeInt:
return "Int"
case ValueTypeDouble:
return "Double"
case ValueTypeMap:
return "Map"
case ValueTypeSlice:
return "Slice"
case ValueTypeBytes:
return "Bytes"
}
return ""
}
// Value is a mutable cell containing any value. Typically used as an element of Map or Slice.
// Must use one of NewValue+ functions below to create new instances.
//
// Intended to be passed by value since internally it is just a pointer to actual
// value representation. For the same reason passing by value and calling setters
// will modify the original, e.g.:
//
// func f1(val Value) { val.SetInt(234) }
// func f2() {
// v := NewValueStr("a string")
// f1(v)
// _ := v.Type() // this will return ValueTypeInt
// }
//
// Important: zero-initialized instance is not valid for use. All Value functions below must
// be called only on instances that are created via NewValue+ functions.
type Value internal.ValueWrapper
// NewValueEmpty creates a new Value with an empty value.
func NewValueEmpty() Value {
return newValue(&internal.AnyValue{}, internal.NewState())
}
// NewValueStr creates a new Value with the given string value.
func NewValueStr(v string) Value {
ov := internal.NewAnyValueStringValue()
ov.StringValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueInt creates a new Value with the given int64 value.
func NewValueInt(v int64) Value {
ov := internal.NewAnyValueIntValue()
ov.IntValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueDouble creates a new Value with the given float64 value.
func NewValueDouble(v float64) Value {
ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBool creates a new Value with the given bool value.
func NewValueBool(v bool) Value {
ov := internal.NewAnyValueBoolValue()
ov.BoolValue = v
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueMap creates a new Value of map type.
func NewValueMap() Value {
ov := internal.NewAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueSlice creates a new Value of array type.
func NewValueSlice() Value {
ov := internal.NewAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
// NewValueBytes creates a new empty Value of byte type.
func NewValueBytes() Value {
ov := internal.NewAnyValueBytesValue()
orig := internal.NewAnyValue()
orig.Value = ov
return newValue(orig, internal.NewState())
}
func newValue(orig *internal.AnyValue, state *internal.State) Value {
return Value(internal.NewValueWrapper(orig, state))
}
func (v Value) getOrig() *internal.AnyValue {
return internal.GetValueOrig(internal.ValueWrapper(v))
}
func (v Value) getState() *internal.State {
return internal.GetValueState(internal.ValueWrapper(v))
}
// FromRaw sets the value from the given raw value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) FromRaw(iv any) error {
switch tv := iv.(type) {
case nil:
v.getOrig().Value = nil
case string:
v.SetStr(tv)
case int:
v.SetInt(int64(tv))
case int8:
v.SetInt(int64(tv))
case int16:
v.SetInt(int64(tv))
case int32:
v.SetInt(int64(tv))
case int64:
v.SetInt(tv)
case uint:
//nolint:gosec
v.SetInt(int64(tv))
case uint8:
v.SetInt(int64(tv))
case uint16:
v.SetInt(int64(tv))
case uint32:
v.SetInt(int64(tv))
case uint64:
//nolint:gosec
v.SetInt(int64(tv))
case float32:
v.SetDouble(float64(tv))
case float64:
v.SetDouble(tv)
case bool:
v.SetBool(tv)
case []byte:
v.SetEmptyBytes().FromRaw(tv)
case map[string]any:
return v.SetEmptyMap().FromRaw(tv)
case []any:
return v.SetEmptySlice().FromRaw(tv)
default:
return fmt.Errorf("<Invalid value type %T>", tv)
}
return nil
}
// Type returns the type of the value for this Value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) Type() ValueType {
switch v.getOrig().Value.(type) {
case *internal.AnyValue_StringValue:
return ValueTypeStr
case *internal.AnyValue_BoolValue:
return ValueTypeBool
case *internal.AnyValue_IntValue:
return ValueTypeInt
case *internal.AnyValue_DoubleValue:
return ValueTypeDouble
case *internal.AnyValue_KvlistValue:
return ValueTypeMap
case *internal.AnyValue_ArrayValue:
return ValueTypeSlice
case *internal.AnyValue_BytesValue:
return ValueTypeBytes
}
return ValueTypeEmpty
}
// Str returns the string value associated with this Value.
// The shorter name is used instead of String to avoid implementing fmt.Stringer interface.
// If the Type() is not ValueTypeStr then returns empty string.
func (v Value) Str() string {
return v.getOrig().GetStringValue()
}
// Int returns the int64 value associated with this Value.
// If the Type() is not ValueTypeInt then returns int64(0).
func (v Value) Int() int64 {
return v.getOrig().GetIntValue()
}
// Double returns the float64 value associated with this Value.
// If the Type() is not ValueTypeDouble then returns float64(0).
func (v Value) Double() float64 {
return v.getOrig().GetDoubleValue()
}
// Bool returns the bool value associated with this Value.
// If the Type() is not ValueTypeBool then returns false.
func (v Value) Bool() bool {
return v.getOrig().GetBoolValue()
}
// Map returns the map value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeMap
// then it returns an invalid map. Note that using such map can cause panic.
func (v Value) Map() Map {
kvlist := v.getOrig().GetKvlistValue()
if kvlist == nil {
return Map{}
}
return newMap(&kvlist.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Slice returns the slice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeSlice
// then returns an invalid slice. Note that using such slice can cause panic.
func (v Value) Slice() Slice {
arr := v.getOrig().GetArrayValue()
if arr == nil {
return Slice{}
}
return newSlice(&arr.Values, internal.GetValueState(internal.ValueWrapper(v)))
}
// Bytes returns the ByteSlice value associated with this Value.
// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes
// then returns an invalid ByteSlice object. Note that using such slice can cause panic.
func (v Value) Bytes() ByteSlice {
bv, ok := v.getOrig().GetValue().(*internal.AnyValue_BytesValue)
if !ok {
return ByteSlice{}
}
return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, internal.GetValueState(internal.ValueWrapper(v))))
}
// SetStr replaces the string value associated with this Value,
// it also changes the type to be ValueTypeStr.
// The shorter name is used instead of SetString to avoid implementing
// fmt.Stringer interface by the corresponding getter method.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetStr(sv string) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueStringValue()
ov.StringValue = sv
v.getOrig().Value = ov
}
// SetInt replaces the int64 value associated with this Value,
// it also changes the type to be ValueTypeInt.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetInt(iv int64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueIntValue()
ov.IntValue = iv
v.getOrig().Value = ov
}
// SetDouble replaces the float64 value associated with this Value,
// it also changes the type to be ValueTypeDouble.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetDouble(dv float64) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueDoubleValue()
ov.DoubleValue = dv
v.getOrig().Value = ov
}
// SetBool replaces the bool value associated with this Value,
// it also changes the type to be ValueTypeBool.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetBool(bv bool) {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueBoolValue()
ov.BoolValue = bv
v.getOrig().Value = ov
}
// SetEmptyBytes sets value to an empty byte slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyBytes() ByteSlice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
bv := internal.NewAnyValueBytesValue()
v.getOrig().Value = bv
return ByteSlice(internal.NewByteSliceWrapper(&bv.BytesValue, v.getState()))
}
// SetEmptyMap sets value to an empty map and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptyMap() Map {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueKvlistValue()
ov.KvlistValue = internal.NewKeyValueList()
v.getOrig().Value = ov
return newMap(&ov.KvlistValue.Values, v.getState())
}
// SetEmptySlice sets value to an empty slice and returns it.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) SetEmptySlice() Slice {
v.getState().AssertMutable()
// Delete everything but the AnyValue object itself.
internal.DeleteAnyValue(v.getOrig(), false)
ov := internal.NewAnyValueArrayValue()
ov.ArrayValue = internal.NewArrayValue()
v.getOrig().Value = ov
return newSlice(&ov.ArrayValue.Values, v.getState())
}
// MoveTo moves the Value from current overriding the destination and
// resetting the current instance to empty value.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) MoveTo(dest Value) {
v.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if v.getOrig() == dest.getOrig() {
return
}
*dest.getOrig() = *v.getOrig()
v.getOrig().Value = nil
}
// CopyTo copies the Value instance overriding the destination.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) CopyTo(dest Value) {
dest.getState().AssertMutable()
internal.CopyAnyValue(dest.getOrig(), v.getOrig())
}
// AsString converts an OTLP Value object of any type to its equivalent string
// representation. This differs from Str which only returns a non-empty value
// if the ValueType is ValueTypeStr.
// Calling this function on zero-initialized Value will cause a panic.
func (v Value) AsString() string {
switch v.Type() {
case ValueTypeEmpty:
return ""
case ValueTypeStr:
return v.Str()
case ValueTypeBool:
return strconv.FormatBool(v.Bool())
case ValueTypeDouble:
return float64AsString(v.Double())
case ValueTypeInt:
return strconv.FormatInt(v.Int(), 10)
case ValueTypeMap:
jsonStr, _ := json.Marshal(v.Map().AsRaw())
return string(jsonStr)
case ValueTypeBytes:
return base64.StdEncoding.EncodeToString(*v.Bytes().getOrig())
case ValueTypeSlice:
jsonStr, _ := json.Marshal(v.Slice().AsRaw())
return string(jsonStr)
default:
return fmt.Sprintf("<Unknown OpenTelemetry attribute value type %q>", v.Type())
}
}
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.7:src/encoding/json/encode.go;l=585.
// This allows us to avoid using reflection.
func float64AsString(f float64) string {
if math.IsInf(f, 0) || math.IsNaN(f) {
return "json: unsupported value: " + strconv.FormatFloat(f, 'g', -1, 64)
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
scratch := [64]byte{}
b := scratch[:0]
abs := math.Abs(f)
fmt := byte('f')
if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
fmt = 'e'
}
b = strconv.AppendFloat(b, f, fmt, -1, 64)
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
return string(b)
}
func (v Value) AsRaw() any {
switch v.Type() {
case ValueTypeEmpty:
return nil
case ValueTypeStr:
return v.Str()
case ValueTypeBool:
return v.Bool()
case ValueTypeDouble:
return v.Double()
case ValueTypeInt:
return v.Int()
case ValueTypeBytes:
return v.Bytes().AsRaw()
case ValueTypeMap:
return v.Map().AsRaw()
case ValueTypeSlice:
return v.Slice().AsRaw()
}
return fmt.Sprintf("<Unknown OpenTelemetry value type %q>", v.Type())
}
func (v Value) Equal(c Value) bool {
if v.Type() != c.Type() {
return false
}
switch v.Type() {
case ValueTypeEmpty:
return true
case ValueTypeStr:
return v.Str() == c.Str()
case ValueTypeBool:
return v.Bool() == c.Bool()
case ValueTypeDouble:
return v.Double() == c.Double()
case ValueTypeInt:
return v.Int() == c.Int()
case ValueTypeBytes:
return v.Bytes().Equal(c.Bytes())
case ValueTypeMap:
return v.Map().Equal(c.Map())
case ValueTypeSlice:
return v.Slice().Equal(c.Slice())
}
return false
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// LogRecord are experimental implementation of OpenTelemetry Log Data Model.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLogRecord function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecord struct {
orig *internal.LogRecord
state *internal.State
}
func newLogRecord(orig *internal.LogRecord, state *internal.State) LogRecord {
return LogRecord{orig: orig, state: state}
}
// NewLogRecord creates a new empty LogRecord.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLogRecord() LogRecord {
return newLogRecord(internal.NewLogRecord(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms LogRecord) MoveTo(dest LogRecord) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLogRecord(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Timestamp returns the timestamp associated with this LogRecord.
func (ms LogRecord) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this LogRecord.
func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ObservedTimestamp returns the observedtimestamp associated with this LogRecord.
func (ms LogRecord) ObservedTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.ObservedTimeUnixNano)
}
// SetObservedTimestamp replaces the observedtimestamp associated with this LogRecord.
func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.ObservedTimeUnixNano = uint64(v)
}
// SeverityNumber returns the severitynumber associated with this LogRecord.
func (ms LogRecord) SeverityNumber() SeverityNumber {
return SeverityNumber(ms.orig.SeverityNumber)
}
// SetSeverityNumber replaces the severitynumber associated with this LogRecord.
func (ms LogRecord) SetSeverityNumber(v SeverityNumber) {
ms.state.AssertMutable()
ms.orig.SeverityNumber = internal.SeverityNumber(v)
}
// SeverityText returns the severitytext associated with this LogRecord.
func (ms LogRecord) SeverityText() string {
return ms.orig.SeverityText
}
// SetSeverityText replaces the severitytext associated with this LogRecord.
func (ms LogRecord) SetSeverityText(v string) {
ms.state.AssertMutable()
ms.orig.SeverityText = v
}
// Body returns the body associated with this LogRecord.
func (ms LogRecord) Body() pcommon.Value {
return pcommon.Value(internal.NewValueWrapper(&ms.orig.Body, ms.state))
}
// Attributes returns the Attributes associated with this LogRecord.
func (ms LogRecord) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this LogRecord.
func (ms LogRecord) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord.
func (ms LogRecord) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Flags returns the flags associated with this LogRecord.
func (ms LogRecord) Flags() LogRecordFlags {
return LogRecordFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this LogRecord.
func (ms LogRecord) SetFlags(v LogRecordFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// TraceID returns the traceid associated with this LogRecord.
func (ms LogRecord) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this LogRecord.
func (ms LogRecord) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this LogRecord.
func (ms LogRecord) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this LogRecord.
func (ms LogRecord) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = internal.SpanID(v)
}
// EventName returns the eventname associated with this LogRecord.
func (ms LogRecord) EventName() string {
return ms.orig.EventName
}
// SetEventName replaces the eventname associated with this LogRecord.
func (ms LogRecord) SetEventName(v string) {
ms.state.AssertMutable()
ms.orig.EventName = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms LogRecord) CopyTo(dest LogRecord) {
dest.state.AssertMutable()
internal.CopyLogRecord(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// LogRecordSlice logically represents a slice of LogRecord.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLogRecordSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LogRecordSlice struct {
orig *[]*internal.LogRecord
state *internal.State
}
func newLogRecordSlice(orig *[]*internal.LogRecord, state *internal.State) LogRecordSlice {
return LogRecordSlice{orig: orig, state: state}
}
// NewLogRecordSlice creates a LogRecordSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLogRecordSlice() LogRecordSlice {
orig := []*internal.LogRecord(nil)
return newLogRecordSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLogRecordSlice()".
func (es LogRecordSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LogRecordSlice) At(i int) LogRecord {
return newLogRecord((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LogRecordSlice) All() iter.Seq2[int, LogRecord] {
return func(yield func(int, LogRecord) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LogRecordSlice can be initialized:
//
// es := NewLogRecordSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LogRecordSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.LogRecord, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty LogRecord.
// It returns the newly added LogRecord.
func (es LogRecordSlice) AppendEmpty() LogRecord {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLogRecord())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LogRecordSlice) MoveAndAppendTo(dest LogRecordSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLogRecord((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LogRecordSlice) CopyTo(dest LogRecordSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLogRecordPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the LogRecord elements within LogRecordSlice given the
// provided less function so that two instances of LogRecordSlice
// can be compared.
func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Logs is the top-level struct that is propagated through the logs pipeline.
// Use NewLogs to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Logs internal.LogsWrapper
func newLogs(orig *internal.ExportLogsServiceRequest, state *internal.State) Logs {
return Logs(internal.NewLogsWrapper(orig, state))
}
// NewLogs creates a new empty Logs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLogs() Logs {
return newLogs(internal.NewExportLogsServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Logs) MoveTo(dest Logs) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportLogsServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceLogs returns the ResourceLogs associated with this Logs.
func (ms Logs) ResourceLogs() ResourceLogsSlice {
return newResourceLogsSlice(&ms.getOrig().ResourceLogs, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Logs) CopyTo(dest Logs) {
dest.getState().AssertMutable()
internal.CopyExportLogsServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Logs) getOrig() *internal.ExportLogsServiceRequest {
return internal.GetLogsOrig(internal.LogsWrapper(ms))
}
func (ms Logs) getState() *internal.State {
return internal.GetLogsState(internal.LogsWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceLogs is a collection of logs from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogs struct {
orig *internal.ResourceLogs
state *internal.State
}
func newResourceLogs(orig *internal.ResourceLogs, state *internal.State) ResourceLogs {
return ResourceLogs{orig: orig, state: state}
}
// NewResourceLogs creates a new empty ResourceLogs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceLogs() ResourceLogs {
return newResourceLogs(internal.NewResourceLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceLogs) MoveTo(dest ResourceLogs) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceLogs(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceLogs.
func (ms ResourceLogs) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeLogs returns the ScopeLogs associated with this ResourceLogs.
func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice {
return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceLogs.
func (ms ResourceLogs) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceLogs.
func (ms ResourceLogs) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceLogs) CopyTo(dest ResourceLogs) {
dest.state.AssertMutable()
internal.CopyResourceLogs(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ResourceLogsSlice logically represents a slice of ResourceLogs.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceLogsSlice struct {
orig *[]*internal.ResourceLogs
state *internal.State
}
func newResourceLogsSlice(orig *[]*internal.ResourceLogs, state *internal.State) ResourceLogsSlice {
return ResourceLogsSlice{orig: orig, state: state}
}
// NewResourceLogsSlice creates a ResourceLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceLogsSlice() ResourceLogsSlice {
orig := []*internal.ResourceLogs(nil)
return newResourceLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceLogsSlice()".
func (es ResourceLogsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceLogsSlice) At(i int) ResourceLogs {
return newResourceLogs((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceLogsSlice) All() iter.Seq2[int, ResourceLogs] {
return func(yield func(int, ResourceLogs) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceLogsSlice can be initialized:
//
// es := NewResourceLogsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceLogsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ResourceLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceLogs.
// It returns the newly added ResourceLogs.
func (es ResourceLogsSlice) AppendEmpty() ResourceLogs {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceLogs())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceLogs((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceLogsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceLogs elements within ResourceLogsSlice given the
// provided less function so that two instances of ResourceLogsSlice
// can be compared.
func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeLogs is a collection of logs from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeLogs function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogs struct {
orig *internal.ScopeLogs
state *internal.State
}
func newScopeLogs(orig *internal.ScopeLogs, state *internal.State) ScopeLogs {
return ScopeLogs{orig: orig, state: state}
}
// NewScopeLogs creates a new empty ScopeLogs.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeLogs() ScopeLogs {
return newScopeLogs(internal.NewScopeLogs(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeLogs) MoveTo(dest ScopeLogs) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeLogs(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeLogs.
func (ms ScopeLogs) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// LogRecords returns the LogRecords associated with this ScopeLogs.
func (ms ScopeLogs) LogRecords() LogRecordSlice {
return newLogRecordSlice(&ms.orig.LogRecords, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeLogs.
func (ms ScopeLogs) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeLogs.
func (ms ScopeLogs) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeLogs) CopyTo(dest ScopeLogs) {
dest.state.AssertMutable()
internal.CopyScopeLogs(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plog
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ScopeLogsSlice logically represents a slice of ScopeLogs.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeLogsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeLogsSlice struct {
orig *[]*internal.ScopeLogs
state *internal.State
}
func newScopeLogsSlice(orig *[]*internal.ScopeLogs, state *internal.State) ScopeLogsSlice {
return ScopeLogsSlice{orig: orig, state: state}
}
// NewScopeLogsSlice creates a ScopeLogsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeLogsSlice() ScopeLogsSlice {
orig := []*internal.ScopeLogs(nil)
return newScopeLogsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeLogsSlice()".
func (es ScopeLogsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeLogsSlice) At(i int) ScopeLogs {
return newScopeLogs((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeLogsSlice) All() iter.Seq2[int, ScopeLogs] {
return func(yield func(int, ScopeLogs) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeLogsSlice can be initialized:
//
// es := NewScopeLogsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeLogsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ScopeLogs, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeLogs.
// It returns the newly added ScopeLogs.
func (es ScopeLogsSlice) AppendEmpty() ScopeLogs {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeLogs())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeLogsSlice) MoveAndAppendTo(dest ScopeLogsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeLogs((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeLogsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeLogs elements within ScopeLogsSlice given the
// provided less function so that two instances of ScopeLogsSlice
// can be compared.
func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals Logs to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalLogs to the OTLP/JSON format.
func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ld.getOrig().MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
var _ Unmarshaler = (*JSONUnmarshaler)(nil)
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Logs.
type JSONUnmarshaler struct{}
// UnmarshalLogs from OTLP/JSON format into Logs.
func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
ld := NewLogs()
ld.getOrig().UnmarshalJSON(iter)
if iter.Error() != nil {
return Logs{}, iter.Error()
}
otlp.MigrateLogs(ld.getOrig().ResourceLogs)
return ld, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
const isSampledMask = uint32(1)
var DefaultLogRecordFlags = LogRecordFlags(0)
// LogRecordFlags defines flags for the LogRecord. The 8 least significant bits are the trace flags as
// defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0.
type LogRecordFlags uint32
// IsSampled returns true if the LogRecordFlags contains the IsSampled flag.
func (ms LogRecordFlags) IsSampled() bool {
return uint32(ms)&isSampledMask != 0
}
// WithIsSampled returns a new LogRecordFlags, with the IsSampled flag set to the given value.
func (ms LogRecordFlags) WithIsSampled(b bool) LogRecordFlags {
orig := uint32(ms)
if b {
orig |= isSampledMask
} else {
orig &^= isSampledMask
}
return LogRecordFlags(orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
// MarkReadOnly marks the Logs as shared so that no further modifications can be done on it.
func (ms Logs) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Logs instance is read-only.
func (ms Logs) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// LogRecordCount calculates the total number of log records.
func (ms Logs) LogRecordCount() int {
logCount := 0
rss := ms.ResourceLogs()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ill := rs.ScopeLogs()
for i := 0; i < ill.Len(); i++ {
logs := ill.At(i)
logCount += logs.LogRecords().Len()
}
}
return logCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) {
size := ld.getOrig().SizeProto()
buf := make([]byte, size)
_ = ld.getOrig().MarshalProto(buf)
return buf, nil
}
func (e *ProtoMarshaler) LogsSize(ld Logs) int {
return ld.getOrig().SizeProto()
}
func (e *ProtoMarshaler) ResourceLogsSize(ld ResourceLogs) int {
return ld.orig.SizeProto()
}
func (e *ProtoMarshaler) ScopeLogsSize(ld ScopeLogs) int {
return ld.orig.SizeProto()
}
func (e *ProtoMarshaler) LogRecordSize(ld LogRecord) int {
return ld.orig.SizeProto()
}
var _ Unmarshaler = (*ProtoUnmarshaler)(nil)
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) {
ld := NewLogs()
err := ld.getOrig().UnmarshalProto(buf)
if err != nil {
return Logs{}, err
}
return ld, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plogotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *internal.ExportLogsPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *internal.ExportLogsPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportLogsPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportLogsPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedLogRecords returns the rejectedlogrecords associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedLogRecords() int64 {
return ms.orig.RejectedLogRecords
}
// SetRejectedLogRecords replaces the rejectedlogrecords associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedLogRecords(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedLogRecords = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportLogsPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package plogotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *internal.ExportLogsServiceResponse
state *internal.State
}
func newExportResponse(orig *internal.ExportLogsServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportLogsServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportLogsServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportLogsServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/otelgrpc"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Logs service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export plog.Logs to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otelgrpc.NewLogsServiceClient(cc)}
}
type grpcClient struct {
rawClient otelgrpc.LogsServiceClient
}
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC LogsService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the Server to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otelgrpc.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv})
}
type rawLogsServer struct {
srv GRPCServer
}
func (s rawLogsServer) Export(ctx context.Context, request *internal.ExportLogsServiceRequest) (*internal.ExportLogsServiceResponse, error) {
otlp.MigrateLogs(request.ResourceLogs)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/plog"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for plog.Logs data.
type ExportRequest struct {
orig *internal.ExportLogsServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &internal.ExportLogsServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromLogs returns a ExportRequest from plog.Logs.
// Because ExportRequest is a wrapper for plog.Logs,
// any changes to the provided Logs struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromLogs(ld plog.Logs) ExportRequest {
return ExportRequest{
orig: internal.GetLogsOrig(internal.LogsWrapper(ld)),
state: internal.GetLogsState(internal.LogsWrapper(ld)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := ms.orig.UnmarshalProto(data)
if err != nil {
return err
}
otlp.MigrateLogs(ms.orig.ResourceLogs)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
func (ms ExportRequest) Logs() plog.Logs {
return plog.Logs(internal.NewLogsWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return ms.orig.UnmarshalProto(data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package plog // import "go.opentelemetry.io/collector/pdata/plog"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// SeverityNumber represents severity number of a log record.
type SeverityNumber int32
const (
SeverityNumberUnspecified = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED)
SeverityNumberTrace = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE)
SeverityNumberTrace2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE2)
SeverityNumberTrace3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE3)
SeverityNumberTrace4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_TRACE4)
SeverityNumberDebug = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG)
SeverityNumberDebug2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG2)
SeverityNumberDebug3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG3)
SeverityNumberDebug4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_DEBUG4)
SeverityNumberInfo = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO)
SeverityNumberInfo2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO2)
SeverityNumberInfo3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO3)
SeverityNumberInfo4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_INFO4)
SeverityNumberWarn = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN)
SeverityNumberWarn2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN2)
SeverityNumberWarn3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN3)
SeverityNumberWarn4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_WARN4)
SeverityNumberError = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR)
SeverityNumberError2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR2)
SeverityNumberError3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR3)
SeverityNumberError4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_ERROR4)
SeverityNumberFatal = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL)
SeverityNumberFatal2 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL2)
SeverityNumberFatal3 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL3)
SeverityNumberFatal4 = SeverityNumber(internal.SeverityNumber_SEVERITY_NUMBER_FATAL4)
)
// String returns the string representation of the SeverityNumber.
func (sn SeverityNumber) String() string {
switch sn {
case SeverityNumberUnspecified:
return "Unspecified"
case SeverityNumberTrace:
return "Trace"
case SeverityNumberTrace2:
return "Trace2"
case SeverityNumberTrace3:
return "Trace3"
case SeverityNumberTrace4:
return "Trace4"
case SeverityNumberDebug:
return "Debug"
case SeverityNumberDebug2:
return "Debug2"
case SeverityNumberDebug3:
return "Debug3"
case SeverityNumberDebug4:
return "Debug4"
case SeverityNumberInfo:
return "Info"
case SeverityNumberInfo2:
return "Info2"
case SeverityNumberInfo3:
return "Info3"
case SeverityNumberInfo4:
return "Info4"
case SeverityNumberWarn:
return "Warn"
case SeverityNumberWarn2:
return "Warn2"
case SeverityNumberWarn3:
return "Warn3"
case SeverityNumberWarn4:
return "Warn4"
case SeverityNumberError:
return "Error"
case SeverityNumberError2:
return "Error2"
case SeverityNumberError3:
return "Error3"
case SeverityNumberError4:
return "Error4"
case SeverityNumberFatal:
return "Fatal"
case SeverityNumberFatal2:
return "Fatal2"
case SeverityNumberFatal3:
return "Fatal3"
case SeverityNumberFatal4:
return "Fatal4"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// AggregationTemporality defines how a metric aggregator reports aggregated values.
// It describes how those values relate to the time interval over which they are aggregated.
type AggregationTemporality int32
const (
// AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
// AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
// AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
)
// String returns the string representation of the AggregationTemporality.
func (at AggregationTemporality) String() string {
switch at {
case AggregationTemporalityUnspecified:
return "Unspecified"
case AggregationTemporalityDelta:
return "Delta"
case AggregationTemporalityCumulative:
return "Cumulative"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// ExemplarValueType specifies the type of Exemplar measurement value.
type ExemplarValueType int32
const (
// ExemplarValueTypeEmpty means that exemplar value is unset.
ExemplarValueTypeEmpty ExemplarValueType = iota
ExemplarValueTypeInt
ExemplarValueTypeDouble
)
// String returns the string representation of the ExemplarValueType.
func (nt ExemplarValueType) String() string {
switch nt {
case ExemplarValueTypeEmpty:
return "Empty"
case ExemplarValueTypeInt:
return "Int"
case ExemplarValueTypeDouble:
return "Double"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Exemplar is a sample input double measurement.
//
// Exemplars also hold information about the environment when the measurement was recorded,
// for example the span and trace ID of the active span when the exemplar was recorded.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExemplar function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Exemplar struct {
orig *internal.Exemplar
state *internal.State
}
func newExemplar(orig *internal.Exemplar, state *internal.State) Exemplar {
return Exemplar{orig: orig, state: state}
}
// NewExemplar creates a new empty Exemplar.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExemplar() Exemplar {
return newExemplar(internal.NewExemplar(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Exemplar) MoveTo(dest Exemplar) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExemplar(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// FilteredAttributes returns the FilteredAttributes associated with this Exemplar.
func (ms Exemplar) FilteredAttributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.FilteredAttributes, ms.state))
}
// Timestamp returns the timestamp associated with this Exemplar.
func (ms Exemplar) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this Exemplar.
func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ValueType returns the type of the value for this Exemplar.
// Calling this function on zero-initialized Exemplar will cause a panic.
func (ms Exemplar) ValueType() ExemplarValueType {
switch ms.orig.Value.(type) {
case *internal.Exemplar_AsDouble:
return ExemplarValueTypeDouble
case *internal.Exemplar_AsInt:
return ExemplarValueTypeInt
}
return ExemplarValueTypeEmpty
}
// DoubleValue returns the double associated with this Exemplar.
func (ms Exemplar) DoubleValue() float64 {
return ms.orig.GetAsDouble()
}
// SetDoubleValue replaces the double associated with this Exemplar.
func (ms Exemplar) SetDoubleValue(v float64) {
ms.state.AssertMutable()
var ov *internal.Exemplar_AsDouble
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Exemplar_AsDouble{}
} else {
ov = internal.ProtoPoolExemplar_AsDouble.Get().(*internal.Exemplar_AsDouble)
}
ov.AsDouble = v
ms.orig.Value = ov
}
// IntValue returns the int associated with this Exemplar.
func (ms Exemplar) IntValue() int64 {
return ms.orig.GetAsInt()
}
// SetIntValue replaces the int associated with this Exemplar.
func (ms Exemplar) SetIntValue(v int64) {
ms.state.AssertMutable()
var ov *internal.Exemplar_AsInt
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Exemplar_AsInt{}
} else {
ov = internal.ProtoPoolExemplar_AsInt.Get().(*internal.Exemplar_AsInt)
}
ov.AsInt = v
ms.orig.Value = ov
}
// TraceID returns the traceid associated with this Exemplar.
func (ms Exemplar) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Exemplar.
func (ms Exemplar) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Exemplar.
func (ms Exemplar) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Exemplar.
func (ms Exemplar) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = internal.SpanID(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Exemplar) CopyTo(dest Exemplar) {
dest.state.AssertMutable()
internal.CopyExemplar(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"go.opentelemetry.io/collector/pdata/internal"
)
// ExemplarSlice logically represents a slice of Exemplar.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewExemplarSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExemplarSlice struct {
orig *[]internal.Exemplar
state *internal.State
}
func newExemplarSlice(orig *[]internal.Exemplar, state *internal.State) ExemplarSlice {
return ExemplarSlice{orig: orig, state: state}
}
// NewExemplarSlice creates a ExemplarSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExemplarSlice() ExemplarSlice {
orig := []internal.Exemplar(nil)
return newExemplarSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewExemplarSlice()".
func (es ExemplarSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ExemplarSlice) At(i int) Exemplar {
return newExemplar(&(*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ExemplarSlice) All() iter.Seq2[int, Exemplar] {
return func(yield func(int, Exemplar) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ExemplarSlice can be initialized:
//
// es := NewExemplarSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ExemplarSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]internal.Exemplar, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Exemplar.
// It returns the newly added Exemplar.
func (es ExemplarSlice) AppendEmpty() Exemplar {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.Exemplar{})
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ExemplarSlice) MoveAndAppendTo(dest ExemplarSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteExemplar(&(*es.orig)[i], false)
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
(*es.orig)[i].Reset()
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExemplarSlice) CopyTo(dest ExemplarSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyExemplarSlice(*dest.orig, *es.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogram struct {
orig *internal.ExponentialHistogram
state *internal.State
}
func newExponentialHistogram(orig *internal.ExponentialHistogram, state *internal.State) ExponentialHistogram {
return ExponentialHistogram{orig: orig, state: state}
}
// NewExponentialHistogram creates a new empty ExponentialHistogram.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogram() ExponentialHistogram {
return newExponentialHistogram(internal.NewExponentialHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogram(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this ExponentialHistogram.
func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice {
return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this ExponentialHistogram.
func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram.
func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) {
dest.state.AssertMutable()
internal.CopyExponentialHistogram(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPoint struct {
orig *internal.ExponentialHistogramDataPoint
state *internal.State
}
func newExponentialHistogramDataPoint(orig *internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint {
return ExponentialHistogramDataPoint{orig: orig, state: state}
}
// NewExponentialHistogramDataPoint creates a new empty ExponentialHistogramDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint {
return newExponentialHistogramDataPoint(internal.NewExponentialHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogramDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Sum() float64 {
return ms.orig.GetSum()
}
// HasSum returns true if the ExponentialHistogramDataPoint contains a
// Sum value otherwise.
func (ms ExponentialHistogramDataPoint) HasSum() bool {
return ms.orig.Sum_ != nil
}
// SetSum replaces the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum_ = &internal.ExponentialHistogramDataPoint_Sum{Sum: v}
}
// RemoveSum removes the sum associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveSum() {
ms.state.AssertMutable()
ms.orig.Sum_ = nil
}
// Scale returns the scale associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Scale() int32 {
return ms.orig.Scale
}
// SetScale replaces the scale associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetScale(v int32) {
ms.state.AssertMutable()
ms.orig.Scale = v
}
// ZeroCount returns the zerocount associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) ZeroCount() uint64 {
return ms.orig.ZeroCount
}
// SetZeroCount replaces the zerocount associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetZeroCount(v uint64) {
ms.state.AssertMutable()
ms.orig.ZeroCount = v
}
// Positive returns the positive associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Positive() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(&ms.orig.Positive, ms.state)
}
// Negative returns the negative associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Negative() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(&ms.orig.Negative, ms.state)
}
// Flags returns the flags associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Min returns the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Min() float64 {
return ms.orig.GetMin()
}
// HasMin returns true if the ExponentialHistogramDataPoint contains a
// Min value otherwise.
func (ms ExponentialHistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
// SetMin replaces the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
ms.orig.Min_ = &internal.ExponentialHistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveMin() {
ms.state.AssertMutable()
ms.orig.Min_ = nil
}
// Max returns the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) Max() float64 {
return ms.orig.GetMax()
}
// HasMax returns true if the ExponentialHistogramDataPoint contains a
// Max value otherwise.
func (ms ExponentialHistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
// SetMax replaces the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
ms.orig.Max_ = &internal.ExponentialHistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) RemoveMax() {
ms.state.AssertMutable()
ms.orig.Max_ = nil
}
// ZeroThreshold returns the zerothreshold associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) ZeroThreshold() float64 {
return ms.orig.ZeroThreshold
}
// SetZeroThreshold replaces the zerothreshold associated with this ExponentialHistogramDataPoint.
func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) {
ms.state.AssertMutable()
ms.orig.ZeroThreshold = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) {
dest.state.AssertMutable()
internal.CopyExponentialHistogramDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPointBuckets function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointBuckets struct {
orig *internal.ExponentialHistogramDataPointBuckets
state *internal.State
}
func newExponentialHistogramDataPointBuckets(orig *internal.ExponentialHistogramDataPointBuckets, state *internal.State) ExponentialHistogramDataPointBuckets {
return ExponentialHistogramDataPointBuckets{orig: orig, state: state}
}
// NewExponentialHistogramDataPointBuckets creates a new empty ExponentialHistogramDataPointBuckets.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets {
return newExponentialHistogramDataPointBuckets(internal.NewExponentialHistogramDataPointBuckets(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramDataPointBuckets) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExponentialHistogramDataPointBuckets(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Offset returns the offset associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) Offset() int32 {
return ms.orig.Offset
}
// SetOffset replaces the offset associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) {
ms.state.AssertMutable()
ms.orig.Offset = v
}
// BucketCounts returns the BucketCounts associated with this ExponentialHistogramDataPointBuckets.
func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) {
dest.state.AssertMutable()
internal.CopyExponentialHistogramDataPointBuckets(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewExponentialHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExponentialHistogramDataPointSlice struct {
orig *[]*internal.ExponentialHistogramDataPoint
state *internal.State
}
func newExponentialHistogramDataPointSlice(orig *[]*internal.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice {
return ExponentialHistogramDataPointSlice{orig: orig, state: state}
}
// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice {
orig := []*internal.ExponentialHistogramDataPoint(nil)
return newExponentialHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewExponentialHistogramDataPointSlice()".
func (es ExponentialHistogramDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ExponentialHistogramDataPointSlice) At(i int) ExponentialHistogramDataPoint {
return newExponentialHistogramDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ExponentialHistogramDataPointSlice) All() iter.Seq2[int, ExponentialHistogramDataPoint] {
return func(yield func(int, ExponentialHistogramDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ExponentialHistogramDataPointSlice can be initialized:
//
// es := NewExponentialHistogramDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ExponentialHistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ExponentialHistogramDataPoint.
// It returns the newly added ExponentialHistogramDataPoint.
func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewExponentialHistogramDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ExponentialHistogramDataPointSlice) MoveAndAppendTo(dest ExponentialHistogramDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogramDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteExponentialHistogramDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyExponentialHistogramDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the
// provided less function so that two instances of ExponentialHistogramDataPointSlice
// can be compared.
func (es ExponentialHistogramDataPointSlice) Sort(less func(a, b ExponentialHistogramDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Gauge represents the type of a numeric metric that always exports the "current value" for every data point.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewGauge function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Gauge struct {
orig *internal.Gauge
state *internal.State
}
func newGauge(orig *internal.Gauge, state *internal.State) Gauge {
return Gauge{orig: orig, state: state}
}
// NewGauge creates a new empty Gauge.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewGauge() Gauge {
return newGauge(internal.NewGauge(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Gauge) MoveTo(dest Gauge) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteGauge(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Gauge.
func (ms Gauge) DataPoints() NumberDataPointSlice {
return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Gauge) CopyTo(dest Gauge) {
dest.state.AssertMutable()
internal.CopyGauge(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewHistogram function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Histogram struct {
orig *internal.Histogram
state *internal.State
}
func newHistogram(orig *internal.Histogram, state *internal.State) Histogram {
return Histogram{orig: orig, state: state}
}
// NewHistogram creates a new empty Histogram.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogram() Histogram {
return newHistogram(internal.NewHistogram(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Histogram) MoveTo(dest Histogram) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteHistogram(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Histogram.
func (ms Histogram) DataPoints() HistogramDataPointSlice {
return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Histogram.
func (ms Histogram) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram.
func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Histogram) CopyTo(dest Histogram) {
dest.state.AssertMutable()
internal.CopyHistogram(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewHistogramDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPoint struct {
orig *internal.HistogramDataPoint
state *internal.State
}
func newHistogramDataPoint(orig *internal.HistogramDataPoint, state *internal.State) HistogramDataPoint {
return HistogramDataPoint{orig: orig, state: state}
}
// NewHistogramDataPoint creates a new empty HistogramDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewHistogramDataPoint() HistogramDataPoint {
return newHistogramDataPoint(internal.NewHistogramDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteHistogramDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Sum() float64 {
return ms.orig.GetSum()
}
// HasSum returns true if the HistogramDataPoint contains a
// Sum value otherwise.
func (ms HistogramDataPoint) HasSum() bool {
return ms.orig.Sum_ != nil
}
// SetSum replaces the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum_ = &internal.HistogramDataPoint_Sum{Sum: v}
}
// RemoveSum removes the sum associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveSum() {
ms.state.AssertMutable()
ms.orig.Sum_ = nil
}
// BucketCounts returns the BucketCounts associated with this HistogramDataPoint.
func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.BucketCounts, ms.state))
}
// ExplicitBounds returns the ExplicitBounds associated with this HistogramDataPoint.
func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice {
return pcommon.Float64Slice(internal.NewFloat64SliceWrapper(&ms.orig.ExplicitBounds, ms.state))
}
// Exemplars returns the Exemplars associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Flags returns the flags associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// Min returns the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Min() float64 {
return ms.orig.GetMin()
}
// HasMin returns true if the HistogramDataPoint contains a
// Min value otherwise.
func (ms HistogramDataPoint) HasMin() bool {
return ms.orig.Min_ != nil
}
// SetMin replaces the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMin(v float64) {
ms.state.AssertMutable()
ms.orig.Min_ = &internal.HistogramDataPoint_Min{Min: v}
}
// RemoveMin removes the min associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveMin() {
ms.state.AssertMutable()
ms.orig.Min_ = nil
}
// Max returns the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) Max() float64 {
return ms.orig.GetMax()
}
// HasMax returns true if the HistogramDataPoint contains a
// Max value otherwise.
func (ms HistogramDataPoint) HasMax() bool {
return ms.orig.Max_ != nil
}
// SetMax replaces the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) SetMax(v float64) {
ms.state.AssertMutable()
ms.orig.Max_ = &internal.HistogramDataPoint_Max{Max: v}
}
// RemoveMax removes the max associated with this HistogramDataPoint.
func (ms HistogramDataPoint) RemoveMax() {
ms.state.AssertMutable()
ms.orig.Max_ = nil
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) {
dest.state.AssertMutable()
internal.CopyHistogramDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// HistogramDataPointSlice logically represents a slice of HistogramDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewHistogramDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type HistogramDataPointSlice struct {
orig *[]*internal.HistogramDataPoint
state *internal.State
}
func newHistogramDataPointSlice(orig *[]*internal.HistogramDataPoint, state *internal.State) HistogramDataPointSlice {
return HistogramDataPointSlice{orig: orig, state: state}
}
// NewHistogramDataPointSlice creates a HistogramDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewHistogramDataPointSlice() HistogramDataPointSlice {
orig := []*internal.HistogramDataPoint(nil)
return newHistogramDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewHistogramDataPointSlice()".
func (es HistogramDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es HistogramDataPointSlice) At(i int) HistogramDataPoint {
return newHistogramDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es HistogramDataPointSlice) All() iter.Seq2[int, HistogramDataPoint] {
return func(yield func(int, HistogramDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new HistogramDataPointSlice can be initialized:
//
// es := NewHistogramDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es HistogramDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.HistogramDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty HistogramDataPoint.
// It returns the newly added HistogramDataPoint.
func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewHistogramDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es HistogramDataPointSlice) MoveAndAppendTo(dest HistogramDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteHistogramDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyHistogramDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the
// provided less function so that two instances of HistogramDataPointSlice
// can be compared.
func (es HistogramDataPointSlice) Sort(less func(a, b HistogramDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Metric represents one metric as a collection of datapoints.
// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMetric function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Metric struct {
orig *internal.Metric
state *internal.State
}
func newMetric(orig *internal.Metric, state *internal.State) Metric {
return Metric{orig: orig, state: state}
}
// NewMetric creates a new empty Metric.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMetric() Metric {
return newMetric(internal.NewMetric(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Metric) MoveTo(dest Metric) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteMetric(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Name returns the name associated with this Metric.
func (ms Metric) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this Metric.
func (ms Metric) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Description returns the description associated with this Metric.
func (ms Metric) Description() string {
return ms.orig.Description
}
// SetDescription replaces the description associated with this Metric.
func (ms Metric) SetDescription(v string) {
ms.state.AssertMutable()
ms.orig.Description = v
}
// Unit returns the unit associated with this Metric.
func (ms Metric) Unit() string {
return ms.orig.Unit
}
// SetUnit replaces the unit associated with this Metric.
func (ms Metric) SetUnit(v string) {
ms.state.AssertMutable()
ms.orig.Unit = v
}
// Type returns the type of the data for this Metric.
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Type() MetricType {
switch ms.orig.Data.(type) {
case *internal.Metric_Gauge:
return MetricTypeGauge
case *internal.Metric_Sum:
return MetricTypeSum
case *internal.Metric_Histogram:
return MetricTypeHistogram
case *internal.Metric_ExponentialHistogram:
return MetricTypeExponentialHistogram
case *internal.Metric_Summary:
return MetricTypeSummary
}
return MetricTypeEmpty
}
// Gauge returns the gauge associated with this Metric.
//
// Calling this function when Type() != MetricTypeGauge returns an invalid
// zero-initialized instance of Gauge. Note that using such Gauge instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Gauge() Gauge {
v, ok := ms.orig.GetData().(*internal.Metric_Gauge)
if !ok {
return Gauge{}
}
return newGauge(v.Gauge, ms.state)
}
// SetEmptyGauge sets an empty gauge to this Metric.
//
// After this, Type() function will return MetricTypeGauge".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyGauge() Gauge {
ms.state.AssertMutable()
var ov *internal.Metric_Gauge
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Metric_Gauge{}
} else {
ov = internal.ProtoPoolMetric_Gauge.Get().(*internal.Metric_Gauge)
}
ov.Gauge = internal.NewGauge()
ms.orig.Data = ov
return newGauge(ov.Gauge, ms.state)
}
// Sum returns the sum associated with this Metric.
//
// Calling this function when Type() != MetricTypeSum returns an invalid
// zero-initialized instance of Sum. Note that using such Sum instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Sum() Sum {
v, ok := ms.orig.GetData().(*internal.Metric_Sum)
if !ok {
return Sum{}
}
return newSum(v.Sum, ms.state)
}
// SetEmptySum sets an empty sum to this Metric.
//
// After this, Type() function will return MetricTypeSum".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySum() Sum {
ms.state.AssertMutable()
var ov *internal.Metric_Sum
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Metric_Sum{}
} else {
ov = internal.ProtoPoolMetric_Sum.Get().(*internal.Metric_Sum)
}
ov.Sum = internal.NewSum()
ms.orig.Data = ov
return newSum(ov.Sum, ms.state)
}
// Histogram returns the histogram associated with this Metric.
//
// Calling this function when Type() != MetricTypeHistogram returns an invalid
// zero-initialized instance of Histogram. Note that using such Histogram instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Histogram() Histogram {
v, ok := ms.orig.GetData().(*internal.Metric_Histogram)
if !ok {
return Histogram{}
}
return newHistogram(v.Histogram, ms.state)
}
// SetEmptyHistogram sets an empty histogram to this Metric.
//
// After this, Type() function will return MetricTypeHistogram".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyHistogram() Histogram {
ms.state.AssertMutable()
var ov *internal.Metric_Histogram
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Metric_Histogram{}
} else {
ov = internal.ProtoPoolMetric_Histogram.Get().(*internal.Metric_Histogram)
}
ov.Histogram = internal.NewHistogram()
ms.orig.Data = ov
return newHistogram(ov.Histogram, ms.state)
}
// ExponentialHistogram returns the exponentialhistogram associated with this Metric.
//
// Calling this function when Type() != MetricTypeExponentialHistogram returns an invalid
// zero-initialized instance of ExponentialHistogram. Note that using such ExponentialHistogram instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) ExponentialHistogram() ExponentialHistogram {
v, ok := ms.orig.GetData().(*internal.Metric_ExponentialHistogram)
if !ok {
return ExponentialHistogram{}
}
return newExponentialHistogram(v.ExponentialHistogram, ms.state)
}
// SetEmptyExponentialHistogram sets an empty exponentialhistogram to this Metric.
//
// After this, Type() function will return MetricTypeExponentialHistogram".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram {
ms.state.AssertMutable()
var ov *internal.Metric_ExponentialHistogram
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Metric_ExponentialHistogram{}
} else {
ov = internal.ProtoPoolMetric_ExponentialHistogram.Get().(*internal.Metric_ExponentialHistogram)
}
ov.ExponentialHistogram = internal.NewExponentialHistogram()
ms.orig.Data = ov
return newExponentialHistogram(ov.ExponentialHistogram, ms.state)
}
// Summary returns the summary associated with this Metric.
//
// Calling this function when Type() != MetricTypeSummary returns an invalid
// zero-initialized instance of Summary. Note that using such Summary instance can cause panic.
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) Summary() Summary {
v, ok := ms.orig.GetData().(*internal.Metric_Summary)
if !ok {
return Summary{}
}
return newSummary(v.Summary, ms.state)
}
// SetEmptySummary sets an empty summary to this Metric.
//
// After this, Type() function will return MetricTypeSummary".
//
// Calling this function on zero-initialized Metric will cause a panic.
func (ms Metric) SetEmptySummary() Summary {
ms.state.AssertMutable()
var ov *internal.Metric_Summary
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.Metric_Summary{}
} else {
ov = internal.ProtoPoolMetric_Summary.Get().(*internal.Metric_Summary)
}
ov.Summary = internal.NewSummary()
ms.orig.Data = ov
return newSummary(ov.Summary, ms.state)
}
// Metadata returns the Metadata associated with this Metric.
func (ms Metric) Metadata() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Metadata, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Metric) CopyTo(dest Metric) {
dest.state.AssertMutable()
internal.CopyMetric(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Metrics is the top-level struct that is propagated through the metrics pipeline.
// Use NewMetrics to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Metrics internal.MetricsWrapper
func newMetrics(orig *internal.ExportMetricsServiceRequest, state *internal.State) Metrics {
return Metrics(internal.NewMetricsWrapper(orig, state))
}
// NewMetrics creates a new empty Metrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMetrics() Metrics {
return newMetrics(internal.NewExportMetricsServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Metrics) MoveTo(dest Metrics) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportMetricsServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceMetrics returns the ResourceMetrics associated with this Metrics.
func (ms Metrics) ResourceMetrics() ResourceMetricsSlice {
return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Metrics) CopyTo(dest Metrics) {
dest.getState().AssertMutable()
internal.CopyExportMetricsServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Metrics) getOrig() *internal.ExportMetricsServiceRequest {
return internal.GetMetricsOrig(internal.MetricsWrapper(ms))
}
func (ms Metrics) getState() *internal.State {
return internal.GetMetricsState(internal.MetricsWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// MetricSlice logically represents a slice of Metric.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewMetricSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type MetricSlice struct {
orig *[]*internal.Metric
state *internal.State
}
func newMetricSlice(orig *[]*internal.Metric, state *internal.State) MetricSlice {
return MetricSlice{orig: orig, state: state}
}
// NewMetricSlice creates a MetricSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewMetricSlice() MetricSlice {
orig := []*internal.Metric(nil)
return newMetricSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewMetricSlice()".
func (es MetricSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es MetricSlice) At(i int) Metric {
return newMetric((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es MetricSlice) All() iter.Seq2[int, Metric] {
return func(yield func(int, Metric) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new MetricSlice can be initialized:
//
// es := NewMetricSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es MetricSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Metric, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Metric.
// It returns the newly added Metric.
func (es MetricSlice) AppendEmpty() Metric {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewMetric())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es MetricSlice) RemoveIf(f func(Metric) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteMetric((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es MetricSlice) CopyTo(dest MetricSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyMetricPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Metric elements within MetricSlice given the
// provided less function so that two instances of MetricSlice
// can be compared.
func (es MetricSlice) Sort(less func(a, b Metric) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewNumberDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPoint struct {
orig *internal.NumberDataPoint
state *internal.State
}
func newNumberDataPoint(orig *internal.NumberDataPoint, state *internal.State) NumberDataPoint {
return NumberDataPoint{orig: orig, state: state}
}
// NewNumberDataPoint creates a new empty NumberDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewNumberDataPoint() NumberDataPoint {
return newNumberDataPoint(internal.NewNumberDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteNumberDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this NumberDataPoint.
func (ms NumberDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this NumberDataPoint.
func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// ValueType returns the type of the value for this NumberDataPoint.
// Calling this function on zero-initialized NumberDataPoint will cause a panic.
func (ms NumberDataPoint) ValueType() NumberDataPointValueType {
switch ms.orig.Value.(type) {
case *internal.NumberDataPoint_AsDouble:
return NumberDataPointValueTypeDouble
case *internal.NumberDataPoint_AsInt:
return NumberDataPointValueTypeInt
}
return NumberDataPointValueTypeEmpty
}
// DoubleValue returns the double associated with this NumberDataPoint.
func (ms NumberDataPoint) DoubleValue() float64 {
return ms.orig.GetAsDouble()
}
// SetDoubleValue replaces the double associated with this NumberDataPoint.
func (ms NumberDataPoint) SetDoubleValue(v float64) {
ms.state.AssertMutable()
var ov *internal.NumberDataPoint_AsDouble
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.NumberDataPoint_AsDouble{}
} else {
ov = internal.ProtoPoolNumberDataPoint_AsDouble.Get().(*internal.NumberDataPoint_AsDouble)
}
ov.AsDouble = v
ms.orig.Value = ov
}
// IntValue returns the int associated with this NumberDataPoint.
func (ms NumberDataPoint) IntValue() int64 {
return ms.orig.GetAsInt()
}
// SetIntValue replaces the int associated with this NumberDataPoint.
func (ms NumberDataPoint) SetIntValue(v int64) {
ms.state.AssertMutable()
var ov *internal.NumberDataPoint_AsInt
if !internal.UseProtoPooling.IsEnabled() {
ov = &internal.NumberDataPoint_AsInt{}
} else {
ov = internal.ProtoPoolNumberDataPoint_AsInt.Get().(*internal.NumberDataPoint_AsInt)
}
ov.AsInt = v
ms.orig.Value = ov
}
// Exemplars returns the Exemplars associated with this NumberDataPoint.
func (ms NumberDataPoint) Exemplars() ExemplarSlice {
return newExemplarSlice(&ms.orig.Exemplars, ms.state)
}
// Flags returns the flags associated with this NumberDataPoint.
func (ms NumberDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this NumberDataPoint.
func (ms NumberDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) {
dest.state.AssertMutable()
internal.CopyNumberDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// NumberDataPointSlice logically represents a slice of NumberDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewNumberDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type NumberDataPointSlice struct {
orig *[]*internal.NumberDataPoint
state *internal.State
}
func newNumberDataPointSlice(orig *[]*internal.NumberDataPoint, state *internal.State) NumberDataPointSlice {
return NumberDataPointSlice{orig: orig, state: state}
}
// NewNumberDataPointSlice creates a NumberDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewNumberDataPointSlice() NumberDataPointSlice {
orig := []*internal.NumberDataPoint(nil)
return newNumberDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewNumberDataPointSlice()".
func (es NumberDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es NumberDataPointSlice) At(i int) NumberDataPoint {
return newNumberDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es NumberDataPointSlice) All() iter.Seq2[int, NumberDataPoint] {
return func(yield func(int, NumberDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new NumberDataPointSlice can be initialized:
//
// es := NewNumberDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es NumberDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.NumberDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty NumberDataPoint.
// It returns the newly added NumberDataPoint.
func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewNumberDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es NumberDataPointSlice) MoveAndAppendTo(dest NumberDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteNumberDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyNumberDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the
// provided less function so that two instances of NumberDataPointSlice
// can be compared.
func (es NumberDataPointSlice) Sort(less func(a, b NumberDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceMetrics is a collection of metrics from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetrics struct {
orig *internal.ResourceMetrics
state *internal.State
}
func newResourceMetrics(orig *internal.ResourceMetrics, state *internal.State) ResourceMetrics {
return ResourceMetrics{orig: orig, state: state}
}
// NewResourceMetrics creates a new empty ResourceMetrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceMetrics() ResourceMetrics {
return newResourceMetrics(internal.NewResourceMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceMetrics(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceMetrics.
func (ms ResourceMetrics) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics.
func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice {
return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceMetrics.
func (ms ResourceMetrics) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceMetrics.
func (ms ResourceMetrics) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) {
dest.state.AssertMutable()
internal.CopyResourceMetrics(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ResourceMetricsSlice logically represents a slice of ResourceMetrics.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceMetricsSlice struct {
orig *[]*internal.ResourceMetrics
state *internal.State
}
func newResourceMetricsSlice(orig *[]*internal.ResourceMetrics, state *internal.State) ResourceMetricsSlice {
return ResourceMetricsSlice{orig: orig, state: state}
}
// NewResourceMetricsSlice creates a ResourceMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceMetricsSlice() ResourceMetricsSlice {
orig := []*internal.ResourceMetrics(nil)
return newResourceMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceMetricsSlice()".
func (es ResourceMetricsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceMetricsSlice) At(i int) ResourceMetrics {
return newResourceMetrics((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceMetricsSlice) All() iter.Seq2[int, ResourceMetrics] {
return func(yield func(int, ResourceMetrics) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceMetricsSlice can be initialized:
//
// es := NewResourceMetricsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceMetricsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ResourceMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceMetrics.
// It returns the newly added ResourceMetrics.
func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceMetrics())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceMetrics((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceMetricsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the
// provided less function so that two instances of ResourceMetricsSlice
// can be compared.
func (es ResourceMetricsSlice) Sort(less func(a, b ResourceMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeMetrics is a collection of metrics from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeMetrics function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetrics struct {
orig *internal.ScopeMetrics
state *internal.State
}
func newScopeMetrics(orig *internal.ScopeMetrics, state *internal.State) ScopeMetrics {
return ScopeMetrics{orig: orig, state: state}
}
// NewScopeMetrics creates a new empty ScopeMetrics.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeMetrics() ScopeMetrics {
return newScopeMetrics(internal.NewScopeMetrics(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeMetrics(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeMetrics.
func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Metrics returns the Metrics associated with this ScopeMetrics.
func (ms ScopeMetrics) Metrics() MetricSlice {
return newMetricSlice(&ms.orig.Metrics, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeMetrics.
func (ms ScopeMetrics) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeMetrics.
func (ms ScopeMetrics) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) {
dest.state.AssertMutable()
internal.CopyScopeMetrics(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ScopeMetricsSlice logically represents a slice of ScopeMetrics.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeMetricsSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeMetricsSlice struct {
orig *[]*internal.ScopeMetrics
state *internal.State
}
func newScopeMetricsSlice(orig *[]*internal.ScopeMetrics, state *internal.State) ScopeMetricsSlice {
return ScopeMetricsSlice{orig: orig, state: state}
}
// NewScopeMetricsSlice creates a ScopeMetricsSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeMetricsSlice() ScopeMetricsSlice {
orig := []*internal.ScopeMetrics(nil)
return newScopeMetricsSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeMetricsSlice()".
func (es ScopeMetricsSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeMetricsSlice) At(i int) ScopeMetrics {
return newScopeMetrics((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeMetricsSlice) All() iter.Seq2[int, ScopeMetrics] {
return func(yield func(int, ScopeMetrics) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeMetricsSlice can be initialized:
//
// es := NewScopeMetricsSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeMetricsSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ScopeMetrics, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeMetrics.
// It returns the newly added ScopeMetrics.
func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeMetrics())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeMetricsSlice) MoveAndAppendTo(dest ScopeMetricsSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeMetrics((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeMetricsPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the
// provided less function so that two instances of ScopeMetricsSlice
// can be compared.
func (es ScopeMetricsSlice) Sort(less func(a, b ScopeMetrics) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSum function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Sum struct {
orig *internal.Sum
state *internal.State
}
func newSum(orig *internal.Sum, state *internal.State) Sum {
return Sum{orig: orig, state: state}
}
// NewSum creates a new empty Sum.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSum() Sum {
return newSum(internal.NewSum(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Sum) MoveTo(dest Sum) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSum(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Sum.
func (ms Sum) DataPoints() NumberDataPointSlice {
return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// AggregationTemporality returns the aggregationtemporality associated with this Sum.
func (ms Sum) AggregationTemporality() AggregationTemporality {
return AggregationTemporality(ms.orig.AggregationTemporality)
}
// SetAggregationTemporality replaces the aggregationtemporality associated with this Sum.
func (ms Sum) SetAggregationTemporality(v AggregationTemporality) {
ms.state.AssertMutable()
ms.orig.AggregationTemporality = internal.AggregationTemporality(v)
}
// IsMonotonic returns the ismonotonic associated with this Sum.
func (ms Sum) IsMonotonic() bool {
return ms.orig.IsMonotonic
}
// SetIsMonotonic replaces the ismonotonic associated with this Sum.
func (ms Sum) SetIsMonotonic(v bool) {
ms.state.AssertMutable()
ms.orig.IsMonotonic = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Sum) CopyTo(dest Sum) {
dest.state.AssertMutable()
internal.CopySum(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummary function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Summary struct {
orig *internal.Summary
state *internal.State
}
func newSummary(orig *internal.Summary, state *internal.State) Summary {
return Summary{orig: orig, state: state}
}
// NewSummary creates a new empty Summary.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummary() Summary {
return newSummary(internal.NewSummary(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Summary) MoveTo(dest Summary) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummary(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// DataPoints returns the DataPoints associated with this Summary.
func (ms Summary) DataPoints() SummaryDataPointSlice {
return newSummaryDataPointSlice(&ms.orig.DataPoints, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Summary) CopyTo(dest Summary) {
dest.state.AssertMutable()
internal.CopySummary(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummaryDataPoint function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPoint struct {
orig *internal.SummaryDataPoint
state *internal.State
}
func newSummaryDataPoint(orig *internal.SummaryDataPoint, state *internal.State) SummaryDataPoint {
return SummaryDataPoint{orig: orig, state: state}
}
// NewSummaryDataPoint creates a new empty SummaryDataPoint.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPoint() SummaryDataPoint {
return newSummaryDataPoint(internal.NewSummaryDataPoint(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummaryDataPoint(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Attributes returns the Attributes associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// StartTimestamp returns the starttimestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// Timestamp returns the timestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Count returns the count associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Count() uint64 {
return ms.orig.Count
}
// SetCount replaces the count associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetCount(v uint64) {
ms.state.AssertMutable()
ms.orig.Count = v
}
// Sum returns the sum associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Sum() float64 {
return ms.orig.Sum
}
// SetSum replaces the sum associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetSum(v float64) {
ms.state.AssertMutable()
ms.orig.Sum = v
}
// QuantileValues returns the QuantileValues associated with this SummaryDataPoint.
func (ms SummaryDataPoint) QuantileValues() SummaryDataPointValueAtQuantileSlice {
return newSummaryDataPointValueAtQuantileSlice(&ms.orig.QuantileValues, ms.state)
}
// Flags returns the flags associated with this SummaryDataPoint.
func (ms SummaryDataPoint) Flags() DataPointFlags {
return DataPointFlags(ms.orig.Flags)
}
// SetFlags replaces the flags associated with this SummaryDataPoint.
func (ms SummaryDataPoint) SetFlags(v DataPointFlags) {
ms.state.AssertMutable()
ms.orig.Flags = uint32(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) {
dest.state.AssertMutable()
internal.CopySummaryDataPoint(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SummaryDataPointSlice logically represents a slice of SummaryDataPoint.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSummaryDataPointSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointSlice struct {
orig *[]*internal.SummaryDataPoint
state *internal.State
}
func newSummaryDataPointSlice(orig *[]*internal.SummaryDataPoint, state *internal.State) SummaryDataPointSlice {
return SummaryDataPointSlice{orig: orig, state: state}
}
// NewSummaryDataPointSlice creates a SummaryDataPointSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointSlice() SummaryDataPointSlice {
orig := []*internal.SummaryDataPoint(nil)
return newSummaryDataPointSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSummaryDataPointSlice()".
func (es SummaryDataPointSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SummaryDataPointSlice) At(i int) SummaryDataPoint {
return newSummaryDataPoint((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SummaryDataPointSlice) All() iter.Seq2[int, SummaryDataPoint] {
return func(yield func(int, SummaryDataPoint) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SummaryDataPointSlice can be initialized:
//
// es := NewSummaryDataPointSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SummaryDataPointSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.SummaryDataPoint, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SummaryDataPoint.
// It returns the newly added SummaryDataPoint.
func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSummaryDataPoint())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SummaryDataPointSlice) MoveAndAppendTo(dest SummaryDataPointSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSummaryDataPoint((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySummaryDataPointPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the
// provided less function so that two instances of SummaryDataPointSlice
// can be compared.
func (es SummaryDataPointSlice) Sort(less func(a, b SummaryDataPoint) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSummaryDataPointValueAtQuantile function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantile struct {
orig *internal.SummaryDataPointValueAtQuantile
state *internal.State
}
func newSummaryDataPointValueAtQuantile(orig *internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile {
return SummaryDataPointValueAtQuantile{orig: orig, state: state}
}
// NewSummaryDataPointValueAtQuantile creates a new empty SummaryDataPointValueAtQuantile.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile {
return newSummaryDataPointValueAtQuantile(internal.NewSummaryDataPointValueAtQuantile(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQuantile) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSummaryDataPointValueAtQuantile(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Quantile returns the quantile associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) Quantile() float64 {
return ms.orig.Quantile
}
// SetQuantile replaces the quantile associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) SetQuantile(v float64) {
ms.state.AssertMutable()
ms.orig.Quantile = v
}
// Value returns the value associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) Value() float64 {
return ms.orig.Value
}
// SetValue replaces the value associated with this SummaryDataPointValueAtQuantile.
func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) {
ms.state.AssertMutable()
ms.orig.Value = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) {
dest.state.AssertMutable()
internal.CopySummaryDataPointValueAtQuantile(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetric
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SummaryDataPointValueAtQuantileSlice struct {
orig *[]*internal.SummaryDataPointValueAtQuantile
state *internal.State
}
func newSummaryDataPointValueAtQuantileSlice(orig *[]*internal.SummaryDataPointValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice {
return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state}
}
// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice {
orig := []*internal.SummaryDataPointValueAtQuantile(nil)
return newSummaryDataPointValueAtQuantileSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSummaryDataPointValueAtQuantileSlice()".
func (es SummaryDataPointValueAtQuantileSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SummaryDataPointValueAtQuantileSlice) At(i int) SummaryDataPointValueAtQuantile {
return newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SummaryDataPointValueAtQuantileSlice) All() iter.Seq2[int, SummaryDataPointValueAtQuantile] {
return func(yield func(int, SummaryDataPointValueAtQuantile) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SummaryDataPointValueAtQuantileSlice can be initialized:
//
// es := NewSummaryDataPointValueAtQuantileSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.SummaryDataPointValueAtQuantile, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SummaryDataPointValueAtQuantile.
// It returns the newly added SummaryDataPointValueAtQuantile.
func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSummaryDataPointValueAtQuantile())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SummaryDataPointValueAtQuantileSlice) MoveAndAppendTo(dest SummaryDataPointValueAtQuantileSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointValueAtQuantile) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSummaryDataPointValueAtQuantile((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValueAtQuantileSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySummaryDataPointValueAtQuantilePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the
// provided less function so that two instances of SummaryDataPointValueAtQuantileSlice
// can be compared.
func (es SummaryDataPointValueAtQuantileSlice) Sort(less func(a, b SummaryDataPointValueAtQuantile) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
var _ Marshaler = (*JSONMarshaler)(nil)
// JSONMarshaler marshals Metrics to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalMetrics to the OTLP/JSON format.
func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
md.getOrig().MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Metrics.
type JSONUnmarshaler struct{}
// UnmarshalMetrics from OTLP/JSON format into Metrics.
func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
md := NewMetrics()
md.getOrig().UnmarshalJSON(iter)
if iter.Error() != nil {
return Metrics{}, iter.Error()
}
otlp.MigrateMetrics(md.getOrig().ResourceMetrics)
return md, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
const noRecordValueMask = uint32(1)
var DefaultDataPointFlags = DataPointFlags(0)
// DataPointFlags defines how a metric aggregator reports aggregated values.
// It describes how those values relate to the time interval over which they are aggregated.
type DataPointFlags uint32
// NoRecordedValue returns true if the DataPointFlags contains the NoRecordedValue flag.
func (ms DataPointFlags) NoRecordedValue() bool {
return uint32(ms)&noRecordValueMask != 0
}
// WithNoRecordedValue returns a new DataPointFlags, with the NoRecordedValue flag set to the given value.
func (ms DataPointFlags) WithNoRecordedValue(b bool) DataPointFlags {
orig := uint32(ms)
if b {
orig |= noRecordValueMask
} else {
orig &^= noRecordValueMask
}
return DataPointFlags(orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// MetricType specifies the type of data in a Metric.
type MetricType int32
const (
// MetricTypeEmpty means that metric type is unset.
MetricTypeEmpty MetricType = iota
MetricTypeGauge
MetricTypeSum
MetricTypeHistogram
MetricTypeExponentialHistogram
MetricTypeSummary
)
// String returns the string representation of the MetricType.
func (mdt MetricType) String() string {
switch mdt {
case MetricTypeEmpty:
return "Empty"
case MetricTypeGauge:
return "Gauge"
case MetricTypeSum:
return "Sum"
case MetricTypeHistogram:
return "Histogram"
case MetricTypeExponentialHistogram:
return "ExponentialHistogram"
case MetricTypeSummary:
return "Summary"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it.
func (ms Metrics) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Metrics instance is read-only.
func (ms Metrics) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// MetricCount calculates the total number of metrics.
func (ms Metrics) MetricCount() int {
metricCount := 0
rms := ms.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
metricCount += ilm.Metrics().Len()
}
}
return metricCount
}
// DataPointCount calculates the total number of data points.
func (ms Metrics) DataPointCount() (dataPointCount int) {
rms := ms.ResourceMetrics()
for i := 0; i < rms.Len(); i++ {
rm := rms.At(i)
ilms := rm.ScopeMetrics()
for j := 0; j < ilms.Len(); j++ {
ilm := ilms.At(j)
ms := ilm.Metrics()
for k := 0; k < ms.Len(); k++ {
m := ms.At(k)
switch m.Type() {
case MetricTypeGauge:
dataPointCount += m.Gauge().DataPoints().Len()
case MetricTypeSum:
dataPointCount += m.Sum().DataPoints().Len()
case MetricTypeHistogram:
dataPointCount += m.Histogram().DataPoints().Len()
case MetricTypeExponentialHistogram:
dataPointCount += m.ExponentialHistogram().DataPoints().Len()
case MetricTypeSummary:
dataPointCount += m.Summary().DataPoints().Len()
}
}
}
}
return dataPointCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
// NumberDataPointValueType specifies the type of NumberDataPoint value.
type NumberDataPointValueType int32
const (
// NumberDataPointValueTypeEmpty means that data point value is unset.
NumberDataPointValueTypeEmpty NumberDataPointValueType = iota
NumberDataPointValueTypeInt
NumberDataPointValueTypeDouble
)
// String returns the string representation of the NumberDataPointValueType.
func (nt NumberDataPointValueType) String() string {
switch nt {
case NumberDataPointValueTypeEmpty:
return "Empty"
case NumberDataPointValueTypeInt:
return "Int"
case NumberDataPointValueTypeDouble:
return "Double"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric"
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) {
size := md.getOrig().SizeProto()
buf := make([]byte, size)
_ = md.getOrig().MarshalProto(buf)
return buf, nil
}
func (e *ProtoMarshaler) MetricsSize(md Metrics) int {
return md.getOrig().SizeProto()
}
func (e *ProtoMarshaler) ResourceMetricsSize(md ResourceMetrics) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) ScopeMetricsSize(md ScopeMetrics) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) MetricSize(md Metric) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) NumberDataPointSize(md NumberDataPoint) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) SummaryDataPointSize(md SummaryDataPoint) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) HistogramDataPointSize(md HistogramDataPoint) int {
return md.orig.SizeProto()
}
func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(md ExponentialHistogramDataPoint) int {
return md.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
md := NewMetrics()
err := md.getOrig().UnmarshalProto(buf)
if err != nil {
return Metrics{}, err
}
return md, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetricotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *internal.ExportMetricsPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *internal.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportMetricsPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportMetricsPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedDataPoints returns the rejecteddatapoints associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedDataPoints() int64 {
return ms.orig.RejectedDataPoints
}
// SetRejectedDataPoints replaces the rejecteddatapoints associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedDataPoints(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedDataPoints = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportMetricsPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pmetricotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *internal.ExportMetricsServiceResponse
state *internal.State
}
func newExportResponse(orig *internal.ExportMetricsServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportMetricsServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportMetricsServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportMetricsServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/otelgrpc"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Metrics service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export pmetric.Metrics to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otelgrpc.NewMetricsServiceClient(cc)}
}
type grpcClient struct {
rawClient otelgrpc.MetricsServiceClient
}
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC MetricsService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otelgrpc.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv})
}
type rawMetricsServer struct {
srv GRPCServer
}
func (s rawMetricsServer) Export(ctx context.Context, request *internal.ExportMetricsServiceRequest) (*internal.ExportMetricsServiceResponse, error) {
otlp.MigrateMetrics(request.ResourceMetrics)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/pmetric"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for pmetric.Metrics data.
type ExportRequest struct {
orig *internal.ExportMetricsServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &internal.ExportMetricsServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromMetrics returns a ExportRequest from pmetric.Metrics.
// Because ExportRequest is a wrapper for pmetric.Metrics,
// any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest {
return ExportRequest{
orig: internal.GetMetricsOrig(internal.MetricsWrapper(md)),
state: internal.GetMetricsState(internal.MetricsWrapper(md)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := ms.orig.UnmarshalProto(data)
if err != nil {
return err
}
otlp.MigrateMetrics(ms.orig.ResourceMetrics)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
func (ms ExportRequest) Metrics() pmetric.Metrics {
return pmetric.Metrics(internal.NewMetricsWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return ms.orig.UnmarshalProto(data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// AggregationTemporality specifies the method of aggregating metric values,
// either DELTA (change since last report) or CUMULATIVE (total since a fixed
// start time).
type AggregationTemporality int32
const (
// AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used.
AggregationTemporalityUnspecified = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED)
// AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time.
AggregationTemporalityDelta = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA)
// AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time.
AggregationTemporalityCumulative = AggregationTemporality(internal.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE)
)
// String returns the string representation of the AggregationTemporality.
func (at AggregationTemporality) String() string {
switch at {
case AggregationTemporalityUnspecified:
return "Unspecified"
case AggregationTemporalityDelta:
return "Delta"
case AggregationTemporalityCumulative:
return "Cumulative"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
"go.opentelemetry.io/collector/pdata/pcommon"
)
type attributable interface {
AttributeIndices() pcommon.Int32Slice
}
// FromAttributeIndices builds a [pcommon.Map] containing the attributes of a
// record.
// The record can be any struct that implements an `AttributeIndices` method.
// Updates made to the return map will not be applied back to the record.
func FromAttributeIndices(table KeyValueAndUnitSlice, record attributable, dic ProfilesDictionary) pcommon.Map {
m := pcommon.NewMap()
m.EnsureCapacity(record.AttributeIndices().Len())
for i := 0; i < record.AttributeIndices().Len(); i++ {
kv := table.At(int(record.AttributeIndices().At(i)))
key := dic.StringTable().At(int(kv.KeyStrindex()))
kv.Value().CopyTo(m.PutEmpty(key))
}
return m
}
var errTooManyAttributeTableEntries = errors.New("too many entries in AttributeTable")
// SetAttribute updates an AttributeTable, adding or providing a value and
// returns its index.
func SetAttribute(table KeyValueAndUnitSlice, attr KeyValueAndUnit) (int32, error) {
for j, a := range table.All() {
if a.Equal(attr) {
if j > math.MaxInt32 {
return 0, errTooManyAttributeTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyAttributeTableEntries
}
attr.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Function
func (fn Function) Equal(val Function) bool {
return fn.NameStrindex() == val.NameStrindex() &&
fn.SystemNameStrindex() == val.SystemNameStrindex() &&
fn.FilenameStrindex() == val.FilenameStrindex() &&
fn.StartLine() == val.StartLine()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyFunctionTableEntries = errors.New("too many entries in FunctionTable")
// SetFunction updates a FunctionTable, adding or providing a value and returns
// its index.
func SetFunction(table FunctionSlice, fn Function) (int32, error) {
for j, m := range table.All() {
if m.Equal(fn) {
if j > math.MaxInt32 {
return 0, errTooManyFunctionTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyFunctionTableEntries
}
fn.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewFunction function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Function struct {
orig *internal.Function
state *internal.State
}
func newFunction(orig *internal.Function, state *internal.State) Function {
return Function{orig: orig, state: state}
}
// NewFunction creates a new empty Function.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewFunction() Function {
return newFunction(internal.NewFunction(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Function) MoveTo(dest Function) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteFunction(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// NameStrindex returns the namestrindex associated with this Function.
func (ms Function) NameStrindex() int32 {
return ms.orig.NameStrindex
}
// SetNameStrindex replaces the namestrindex associated with this Function.
func (ms Function) SetNameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.NameStrindex = v
}
// SystemNameStrindex returns the systemnamestrindex associated with this Function.
func (ms Function) SystemNameStrindex() int32 {
return ms.orig.SystemNameStrindex
}
// SetSystemNameStrindex replaces the systemnamestrindex associated with this Function.
func (ms Function) SetSystemNameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.SystemNameStrindex = v
}
// FilenameStrindex returns the filenamestrindex associated with this Function.
func (ms Function) FilenameStrindex() int32 {
return ms.orig.FilenameStrindex
}
// SetFilenameStrindex replaces the filenamestrindex associated with this Function.
func (ms Function) SetFilenameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.FilenameStrindex = v
}
// StartLine returns the startline associated with this Function.
func (ms Function) StartLine() int64 {
return ms.orig.StartLine
}
// SetStartLine replaces the startline associated with this Function.
func (ms Function) SetStartLine(v int64) {
ms.state.AssertMutable()
ms.orig.StartLine = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Function) CopyTo(dest Function) {
dest.state.AssertMutable()
internal.CopyFunction(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// FunctionSlice logically represents a slice of Function.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewFunctionSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type FunctionSlice struct {
orig *[]*internal.Function
state *internal.State
}
func newFunctionSlice(orig *[]*internal.Function, state *internal.State) FunctionSlice {
return FunctionSlice{orig: orig, state: state}
}
// NewFunctionSlice creates a FunctionSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewFunctionSlice() FunctionSlice {
orig := []*internal.Function(nil)
return newFunctionSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewFunctionSlice()".
func (es FunctionSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es FunctionSlice) At(i int) Function {
return newFunction((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es FunctionSlice) All() iter.Seq2[int, Function] {
return func(yield func(int, Function) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new FunctionSlice can be initialized:
//
// es := NewFunctionSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es FunctionSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Function, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Function.
// It returns the newly added Function.
func (es FunctionSlice) AppendEmpty() Function {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewFunction())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es FunctionSlice) MoveAndAppendTo(dest FunctionSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es FunctionSlice) RemoveIf(f func(Function) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteFunction((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es FunctionSlice) CopyTo(dest FunctionSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyFunctionPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Function elements within FunctionSlice given the
// provided less function so that two instances of FunctionSlice
// can be compared.
func (es FunctionSlice) Sort(less func(a, b Function) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// KeyValueAndUnit represents a custom 'dictionary native'
// style of encoding attributes which is more convenient
// for profiles than opentelemetry.proto.common.v1.KeyValue.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewKeyValueAndUnit function to create new instances.
// Important: zero-initialized instance is not valid for use.
type KeyValueAndUnit struct {
orig *internal.KeyValueAndUnit
state *internal.State
}
func newKeyValueAndUnit(orig *internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnit {
return KeyValueAndUnit{orig: orig, state: state}
}
// NewKeyValueAndUnit creates a new empty KeyValueAndUnit.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewKeyValueAndUnit() KeyValueAndUnit {
return newKeyValueAndUnit(internal.NewKeyValueAndUnit(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms KeyValueAndUnit) MoveTo(dest KeyValueAndUnit) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteKeyValueAndUnit(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// KeyStrindex returns the keystrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) KeyStrindex() int32 {
return ms.orig.KeyStrindex
}
// SetKeyStrindex replaces the keystrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) SetKeyStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.KeyStrindex = v
}
// Value returns the value associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) Value() pcommon.Value {
return pcommon.Value(internal.NewValueWrapper(&ms.orig.Value, ms.state))
}
// UnitStrindex returns the unitstrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) UnitStrindex() int32 {
return ms.orig.UnitStrindex
}
// SetUnitStrindex replaces the unitstrindex associated with this KeyValueAndUnit.
func (ms KeyValueAndUnit) SetUnitStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.UnitStrindex = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms KeyValueAndUnit) CopyTo(dest KeyValueAndUnit) {
dest.state.AssertMutable()
internal.CopyKeyValueAndUnit(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// KeyValueAndUnitSlice logically represents a slice of KeyValueAndUnit.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewKeyValueAndUnitSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type KeyValueAndUnitSlice struct {
orig *[]*internal.KeyValueAndUnit
state *internal.State
}
func newKeyValueAndUnitSlice(orig *[]*internal.KeyValueAndUnit, state *internal.State) KeyValueAndUnitSlice {
return KeyValueAndUnitSlice{orig: orig, state: state}
}
// NewKeyValueAndUnitSlice creates a KeyValueAndUnitSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewKeyValueAndUnitSlice() KeyValueAndUnitSlice {
orig := []*internal.KeyValueAndUnit(nil)
return newKeyValueAndUnitSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewKeyValueAndUnitSlice()".
func (es KeyValueAndUnitSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es KeyValueAndUnitSlice) At(i int) KeyValueAndUnit {
return newKeyValueAndUnit((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es KeyValueAndUnitSlice) All() iter.Seq2[int, KeyValueAndUnit] {
return func(yield func(int, KeyValueAndUnit) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new KeyValueAndUnitSlice can be initialized:
//
// es := NewKeyValueAndUnitSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es KeyValueAndUnitSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.KeyValueAndUnit, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty KeyValueAndUnit.
// It returns the newly added KeyValueAndUnit.
func (es KeyValueAndUnitSlice) AppendEmpty() KeyValueAndUnit {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewKeyValueAndUnit())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es KeyValueAndUnitSlice) MoveAndAppendTo(dest KeyValueAndUnitSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es KeyValueAndUnitSlice) RemoveIf(f func(KeyValueAndUnit) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteKeyValueAndUnit((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es KeyValueAndUnitSlice) CopyTo(dest KeyValueAndUnitSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyKeyValueAndUnitPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the KeyValueAndUnit elements within KeyValueAndUnitSlice given the
// provided less function so that two instances of KeyValueAndUnitSlice
// can be compared.
func (es KeyValueAndUnitSlice) Sort(less func(a, b KeyValueAndUnit) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Line details a specific line in a source code, linked to a function.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLine function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Line struct {
orig *internal.Line
state *internal.State
}
func newLine(orig *internal.Line, state *internal.State) Line {
return Line{orig: orig, state: state}
}
// NewLine creates a new empty Line.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLine() Line {
return newLine(internal.NewLine(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Line) MoveTo(dest Line) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLine(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// FunctionIndex returns the functionindex associated with this Line.
func (ms Line) FunctionIndex() int32 {
return ms.orig.FunctionIndex
}
// SetFunctionIndex replaces the functionindex associated with this Line.
func (ms Line) SetFunctionIndex(v int32) {
ms.state.AssertMutable()
ms.orig.FunctionIndex = v
}
// Line returns the line associated with this Line.
func (ms Line) Line() int64 {
return ms.orig.Line
}
// SetLine replaces the line associated with this Line.
func (ms Line) SetLine(v int64) {
ms.state.AssertMutable()
ms.orig.Line = v
}
// Column returns the column associated with this Line.
func (ms Line) Column() int64 {
return ms.orig.Column
}
// SetColumn replaces the column associated with this Line.
func (ms Line) SetColumn(v int64) {
ms.state.AssertMutable()
ms.orig.Column = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Line) CopyTo(dest Line) {
dest.state.AssertMutable()
internal.CopyLine(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// LineSlice logically represents a slice of Line.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLineSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LineSlice struct {
orig *[]*internal.Line
state *internal.State
}
func newLineSlice(orig *[]*internal.Line, state *internal.State) LineSlice {
return LineSlice{orig: orig, state: state}
}
// NewLineSlice creates a LineSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLineSlice() LineSlice {
orig := []*internal.Line(nil)
return newLineSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLineSlice()".
func (es LineSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LineSlice) At(i int) Line {
return newLine((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LineSlice) All() iter.Seq2[int, Line] {
return func(yield func(int, Line) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LineSlice can be initialized:
//
// es := NewLineSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LineSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Line, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Line.
// It returns the newly added Line.
func (es LineSlice) AppendEmpty() Line {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLine())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LineSlice) MoveAndAppendTo(dest LineSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LineSlice) RemoveIf(f func(Line) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLine((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LineSlice) CopyTo(dest LineSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLinePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Line elements within LineSlice given the
// provided less function so that two instances of LineSlice
// can be compared.
func (es LineSlice) Sort(less func(a, b Line) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Link represents a pointer from a profile Sample to a trace Span.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Link struct {
orig *internal.Link
state *internal.State
}
func newLink(orig *internal.Link, state *internal.State) Link {
return Link{orig: orig, state: state}
}
// NewLink creates a new empty Link.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLink() Link {
return newLink(internal.NewLink(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Link) MoveTo(dest Link) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLink(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this Link.
func (ms Link) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Link.
func (ms Link) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Link.
func (ms Link) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Link.
func (ms Link) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = internal.SpanID(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Link) CopyTo(dest Link) {
dest.state.AssertMutable()
internal.CopyLink(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// LinkSlice logically represents a slice of Link.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LinkSlice struct {
orig *[]*internal.Link
state *internal.State
}
func newLinkSlice(orig *[]*internal.Link, state *internal.State) LinkSlice {
return LinkSlice{orig: orig, state: state}
}
// NewLinkSlice creates a LinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLinkSlice() LinkSlice {
orig := []*internal.Link(nil)
return newLinkSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLinkSlice()".
func (es LinkSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LinkSlice) At(i int) Link {
return newLink((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LinkSlice) All() iter.Seq2[int, Link] {
return func(yield func(int, Link) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LinkSlice can be initialized:
//
// es := NewLinkSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LinkSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Link, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Link.
// It returns the newly added Link.
func (es LinkSlice) AppendEmpty() Link {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLink())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LinkSlice) MoveAndAppendTo(dest LinkSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LinkSlice) RemoveIf(f func(Link) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLink((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LinkSlice) CopyTo(dest LinkSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLinkPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Link elements within LinkSlice given the
// provided less function so that two instances of LinkSlice
// can be compared.
func (es LinkSlice) Sort(less func(a, b Link) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Location describes function and line table debug information.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewLocation function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Location struct {
orig *internal.Location
state *internal.State
}
func newLocation(orig *internal.Location, state *internal.State) Location {
return Location{orig: orig, state: state}
}
// NewLocation creates a new empty Location.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewLocation() Location {
return newLocation(internal.NewLocation(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Location) MoveTo(dest Location) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteLocation(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MappingIndex returns the mappingindex associated with this Location.
func (ms Location) MappingIndex() int32 {
return ms.orig.MappingIndex
}
// SetMappingIndex replaces the mappingindex associated with this Location.
func (ms Location) SetMappingIndex(v int32) {
ms.state.AssertMutable()
ms.orig.MappingIndex = v
}
// Address returns the address associated with this Location.
func (ms Location) Address() uint64 {
return ms.orig.Address
}
// SetAddress replaces the address associated with this Location.
func (ms Location) SetAddress(v uint64) {
ms.state.AssertMutable()
ms.orig.Address = v
}
// Lines returns the Lines associated with this Location.
func (ms Location) Lines() LineSlice {
return newLineSlice(&ms.orig.Lines, ms.state)
}
// AttributeIndices returns the AttributeIndices associated with this Location.
func (ms Location) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Location) CopyTo(dest Location) {
dest.state.AssertMutable()
internal.CopyLocation(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// LocationSlice logically represents a slice of Location.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewLocationSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type LocationSlice struct {
orig *[]*internal.Location
state *internal.State
}
func newLocationSlice(orig *[]*internal.Location, state *internal.State) LocationSlice {
return LocationSlice{orig: orig, state: state}
}
// NewLocationSlice creates a LocationSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewLocationSlice() LocationSlice {
orig := []*internal.Location(nil)
return newLocationSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewLocationSlice()".
func (es LocationSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es LocationSlice) At(i int) Location {
return newLocation((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es LocationSlice) All() iter.Seq2[int, Location] {
return func(yield func(int, Location) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new LocationSlice can be initialized:
//
// es := NewLocationSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es LocationSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Location, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Location.
// It returns the newly added Location.
func (es LocationSlice) AppendEmpty() Location {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewLocation())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es LocationSlice) MoveAndAppendTo(dest LocationSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es LocationSlice) RemoveIf(f func(Location) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteLocation((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es LocationSlice) CopyTo(dest LocationSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyLocationPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Location elements within LocationSlice given the
// provided less function so that two instances of LocationSlice
// can be compared.
func (es LocationSlice) Sort(less func(a, b Location) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewMapping function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Mapping struct {
orig *internal.Mapping
state *internal.State
}
func newMapping(orig *internal.Mapping, state *internal.State) Mapping {
return Mapping{orig: orig, state: state}
}
// NewMapping creates a new empty Mapping.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewMapping() Mapping {
return newMapping(internal.NewMapping(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Mapping) MoveTo(dest Mapping) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteMapping(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MemoryStart returns the memorystart associated with this Mapping.
func (ms Mapping) MemoryStart() uint64 {
return ms.orig.MemoryStart
}
// SetMemoryStart replaces the memorystart associated with this Mapping.
func (ms Mapping) SetMemoryStart(v uint64) {
ms.state.AssertMutable()
ms.orig.MemoryStart = v
}
// MemoryLimit returns the memorylimit associated with this Mapping.
func (ms Mapping) MemoryLimit() uint64 {
return ms.orig.MemoryLimit
}
// SetMemoryLimit replaces the memorylimit associated with this Mapping.
func (ms Mapping) SetMemoryLimit(v uint64) {
ms.state.AssertMutable()
ms.orig.MemoryLimit = v
}
// FileOffset returns the fileoffset associated with this Mapping.
func (ms Mapping) FileOffset() uint64 {
return ms.orig.FileOffset
}
// SetFileOffset replaces the fileoffset associated with this Mapping.
func (ms Mapping) SetFileOffset(v uint64) {
ms.state.AssertMutable()
ms.orig.FileOffset = v
}
// FilenameStrindex returns the filenamestrindex associated with this Mapping.
func (ms Mapping) FilenameStrindex() int32 {
return ms.orig.FilenameStrindex
}
// SetFilenameStrindex replaces the filenamestrindex associated with this Mapping.
func (ms Mapping) SetFilenameStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.FilenameStrindex = v
}
// AttributeIndices returns the AttributeIndices associated with this Mapping.
func (ms Mapping) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Mapping) CopyTo(dest Mapping) {
dest.state.AssertMutable()
internal.CopyMapping(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// MappingSlice logically represents a slice of Mapping.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewMappingSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type MappingSlice struct {
orig *[]*internal.Mapping
state *internal.State
}
func newMappingSlice(orig *[]*internal.Mapping, state *internal.State) MappingSlice {
return MappingSlice{orig: orig, state: state}
}
// NewMappingSlice creates a MappingSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewMappingSlice() MappingSlice {
orig := []*internal.Mapping(nil)
return newMappingSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewMappingSlice()".
func (es MappingSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es MappingSlice) At(i int) Mapping {
return newMapping((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es MappingSlice) All() iter.Seq2[int, Mapping] {
return func(yield func(int, Mapping) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new MappingSlice can be initialized:
//
// es := NewMappingSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es MappingSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Mapping, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Mapping.
// It returns the newly added Mapping.
func (es MappingSlice) AppendEmpty() Mapping {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewMapping())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es MappingSlice) MoveAndAppendTo(dest MappingSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es MappingSlice) RemoveIf(f func(Mapping) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteMapping((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es MappingSlice) CopyTo(dest MappingSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyMappingPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Mapping elements within MappingSlice given the
// provided less function so that two instances of MappingSlice
// can be compared.
func (es MappingSlice) Sort(less func(a, b Mapping) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Profile are an implementation of the pprofextended data model.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfile function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Profile struct {
orig *internal.Profile
state *internal.State
}
func newProfile(orig *internal.Profile, state *internal.State) Profile {
return Profile{orig: orig, state: state}
}
// NewProfile creates a new empty Profile.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfile() Profile {
return newProfile(internal.NewProfile(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Profile) MoveTo(dest Profile) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteProfile(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// SampleType returns the sampletype associated with this Profile.
func (ms Profile) SampleType() ValueType {
return newValueType(&ms.orig.SampleType, ms.state)
}
// Samples returns the Samples associated with this Profile.
func (ms Profile) Samples() SampleSlice {
return newSampleSlice(&ms.orig.Samples, ms.state)
}
// Time returns the time associated with this Profile.
func (ms Profile) Time() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTime replaces the time associated with this Profile.
func (ms Profile) SetTime(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Duration returns the duration associated with this Profile.
func (ms Profile) Duration() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.DurationNano)
}
// SetDuration replaces the duration associated with this Profile.
func (ms Profile) SetDuration(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.DurationNano = uint64(v)
}
// PeriodType returns the periodtype associated with this Profile.
func (ms Profile) PeriodType() ValueType {
return newValueType(&ms.orig.PeriodType, ms.state)
}
// Period returns the period associated with this Profile.
func (ms Profile) Period() int64 {
return ms.orig.Period
}
// SetPeriod replaces the period associated with this Profile.
func (ms Profile) SetPeriod(v int64) {
ms.state.AssertMutable()
ms.orig.Period = v
}
// ProfileID returns the profileid associated with this Profile.
func (ms Profile) ProfileID() ProfileID {
return ProfileID(ms.orig.ProfileId)
}
// SetProfileID replaces the profileid associated with this Profile.
func (ms Profile) SetProfileID(v ProfileID) {
ms.state.AssertMutable()
ms.orig.ProfileId = internal.ProfileID(v)
}
// DroppedAttributesCount returns the droppedattributescount associated with this Profile.
func (ms Profile) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Profile.
func (ms Profile) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// OriginalPayloadFormat returns the originalpayloadformat associated with this Profile.
func (ms Profile) OriginalPayloadFormat() string {
return ms.orig.OriginalPayloadFormat
}
// SetOriginalPayloadFormat replaces the originalpayloadformat associated with this Profile.
func (ms Profile) SetOriginalPayloadFormat(v string) {
ms.state.AssertMutable()
ms.orig.OriginalPayloadFormat = v
}
// OriginalPayload returns the OriginalPayload associated with this Profile.
func (ms Profile) OriginalPayload() pcommon.ByteSlice {
return pcommon.ByteSlice(internal.NewByteSliceWrapper(&ms.orig.OriginalPayload, ms.state))
}
// AttributeIndices returns the AttributeIndices associated with this Profile.
func (ms Profile) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Profile) CopyTo(dest Profile) {
dest.state.AssertMutable()
internal.CopyProfile(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Profiles is the top-level struct that is propagated through the profiles pipeline.
// Use NewProfiles to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Profiles internal.ProfilesWrapper
func newProfiles(orig *internal.ExportProfilesServiceRequest, state *internal.State) Profiles {
return Profiles(internal.NewProfilesWrapper(orig, state))
}
// NewProfiles creates a new empty Profiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfiles() Profiles {
return newProfiles(internal.NewExportProfilesServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Profiles) MoveTo(dest Profiles) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportProfilesServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceProfiles returns the ResourceProfiles associated with this Profiles.
func (ms Profiles) ResourceProfiles() ResourceProfilesSlice {
return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState())
}
// Dictionary returns the dictionary associated with this Profiles.
func (ms Profiles) Dictionary() ProfilesDictionary {
return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Profiles) CopyTo(dest Profiles) {
dest.getState().AssertMutable()
internal.CopyExportProfilesServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Profiles) getOrig() *internal.ExportProfilesServiceRequest {
return internal.GetProfilesOrig(internal.ProfilesWrapper(ms))
}
func (ms Profiles) getState() *internal.State {
return internal.GetProfilesState(internal.ProfilesWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ProfilesData represents the profiles data that can be stored in persistent storage,
// OR can be embedded by other protocols that transfer OTLP profiles data but do not
// implement the OTLP protocol.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfilesData function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ProfilesData internal.ProfilesDataWrapper
func newProfilesData(orig *internal.ProfilesData, state *internal.State) ProfilesData {
return ProfilesData(internal.NewProfilesDataWrapper(orig, state))
}
// NewProfilesData creates a new empty ProfilesData.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfilesData() ProfilesData {
return newProfilesData(internal.NewProfilesData(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ProfilesData) MoveTo(dest ProfilesData) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteProfilesData(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceProfiles returns the ResourceProfiles associated with this ProfilesData.
func (ms ProfilesData) ResourceProfiles() ResourceProfilesSlice {
return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, ms.getState())
}
// Dictionary returns the dictionary associated with this ProfilesData.
func (ms ProfilesData) Dictionary() ProfilesDictionary {
return newProfilesDictionary(&ms.getOrig().Dictionary, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ProfilesData) CopyTo(dest ProfilesData) {
dest.getState().AssertMutable()
internal.CopyProfilesData(dest.getOrig(), ms.getOrig())
}
func (ms ProfilesData) getOrig() *internal.ProfilesData {
return internal.GetProfilesDataOrig(internal.ProfilesDataWrapper(ms))
}
func (ms ProfilesData) getState() *internal.State {
return internal.GetProfilesDataState(internal.ProfilesDataWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ProfilesDictionary is the reference table containing all data shared by profiles across the message being sent.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewProfilesDictionary function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ProfilesDictionary struct {
orig *internal.ProfilesDictionary
state *internal.State
}
func newProfilesDictionary(orig *internal.ProfilesDictionary, state *internal.State) ProfilesDictionary {
return ProfilesDictionary{orig: orig, state: state}
}
// NewProfilesDictionary creates a new empty ProfilesDictionary.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewProfilesDictionary() ProfilesDictionary {
return newProfilesDictionary(internal.NewProfilesDictionary(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ProfilesDictionary) MoveTo(dest ProfilesDictionary) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteProfilesDictionary(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// MappingTable returns the MappingTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) MappingTable() MappingSlice {
return newMappingSlice(&ms.orig.MappingTable, ms.state)
}
// LocationTable returns the LocationTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) LocationTable() LocationSlice {
return newLocationSlice(&ms.orig.LocationTable, ms.state)
}
// FunctionTable returns the FunctionTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) FunctionTable() FunctionSlice {
return newFunctionSlice(&ms.orig.FunctionTable, ms.state)
}
// LinkTable returns the LinkTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) LinkTable() LinkSlice {
return newLinkSlice(&ms.orig.LinkTable, ms.state)
}
// StringTable returns the StringTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) StringTable() pcommon.StringSlice {
return pcommon.StringSlice(internal.NewStringSliceWrapper(&ms.orig.StringTable, ms.state))
}
// AttributeTable returns the AttributeTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) AttributeTable() KeyValueAndUnitSlice {
return newKeyValueAndUnitSlice(&ms.orig.AttributeTable, ms.state)
}
// StackTable returns the StackTable associated with this ProfilesDictionary.
func (ms ProfilesDictionary) StackTable() StackSlice {
return newStackSlice(&ms.orig.StackTable, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ProfilesDictionary) CopyTo(dest ProfilesDictionary) {
dest.state.AssertMutable()
internal.CopyProfilesDictionary(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ProfilesSlice logically represents a slice of Profile.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ProfilesSlice struct {
orig *[]*internal.Profile
state *internal.State
}
func newProfilesSlice(orig *[]*internal.Profile, state *internal.State) ProfilesSlice {
return ProfilesSlice{orig: orig, state: state}
}
// NewProfilesSlice creates a ProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewProfilesSlice() ProfilesSlice {
orig := []*internal.Profile(nil)
return newProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewProfilesSlice()".
func (es ProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ProfilesSlice) At(i int) Profile {
return newProfile((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ProfilesSlice) All() iter.Seq2[int, Profile] {
return func(yield func(int, Profile) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ProfilesSlice can be initialized:
//
// es := NewProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Profile, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Profile.
// It returns the newly added Profile.
func (es ProfilesSlice) AppendEmpty() Profile {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewProfile())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ProfilesSlice) MoveAndAppendTo(dest ProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ProfilesSlice) RemoveIf(f func(Profile) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteProfile((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ProfilesSlice) CopyTo(dest ProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyProfilePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Profile elements within ProfilesSlice given the
// provided less function so that two instances of ProfilesSlice
// can be compared.
func (es ProfilesSlice) Sort(less func(a, b Profile) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceProfiles is a collection of profiles from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceProfiles struct {
orig *internal.ResourceProfiles
state *internal.State
}
func newResourceProfiles(orig *internal.ResourceProfiles, state *internal.State) ResourceProfiles {
return ResourceProfiles{orig: orig, state: state}
}
// NewResourceProfiles creates a new empty ResourceProfiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceProfiles() ResourceProfiles {
return newResourceProfiles(internal.NewResourceProfiles(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceProfiles(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceProfiles.
func (ms ResourceProfiles) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles.
func (ms ResourceProfiles) ScopeProfiles() ScopeProfilesSlice {
return newScopeProfilesSlice(&ms.orig.ScopeProfiles, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceProfiles.
func (ms ResourceProfiles) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceProfiles.
func (ms ResourceProfiles) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) {
dest.state.AssertMutable()
internal.CopyResourceProfiles(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ResourceProfilesSlice logically represents a slice of ResourceProfiles.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceProfilesSlice struct {
orig *[]*internal.ResourceProfiles
state *internal.State
}
func newResourceProfilesSlice(orig *[]*internal.ResourceProfiles, state *internal.State) ResourceProfilesSlice {
return ResourceProfilesSlice{orig: orig, state: state}
}
// NewResourceProfilesSlice creates a ResourceProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceProfilesSlice() ResourceProfilesSlice {
orig := []*internal.ResourceProfiles(nil)
return newResourceProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceProfilesSlice()".
func (es ResourceProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceProfilesSlice) At(i int) ResourceProfiles {
return newResourceProfiles((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceProfilesSlice) All() iter.Seq2[int, ResourceProfiles] {
return func(yield func(int, ResourceProfiles) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceProfilesSlice can be initialized:
//
// es := NewResourceProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ResourceProfiles, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceProfiles.
// It returns the newly added ResourceProfiles.
func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceProfiles())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceProfilesSlice) MoveAndAppendTo(dest ResourceProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceProfiles((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceProfilesPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the
// provided less function so that two instances of ResourceProfilesSlice
// can be compared.
func (es ResourceProfilesSlice) Sort(less func(a, b ResourceProfiles) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Sample represents each record value encountered within a profiled program.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSample function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Sample struct {
orig *internal.Sample
state *internal.State
}
func newSample(orig *internal.Sample, state *internal.State) Sample {
return Sample{orig: orig, state: state}
}
// NewSample creates a new empty Sample.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSample() Sample {
return newSample(internal.NewSample(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Sample) MoveTo(dest Sample) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSample(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// StackIndex returns the stackindex associated with this Sample.
func (ms Sample) StackIndex() int32 {
return ms.orig.StackIndex
}
// SetStackIndex replaces the stackindex associated with this Sample.
func (ms Sample) SetStackIndex(v int32) {
ms.state.AssertMutable()
ms.orig.StackIndex = v
}
// Values returns the Values associated with this Sample.
func (ms Sample) Values() pcommon.Int64Slice {
return pcommon.Int64Slice(internal.NewInt64SliceWrapper(&ms.orig.Values, ms.state))
}
// AttributeIndices returns the AttributeIndices associated with this Sample.
func (ms Sample) AttributeIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.AttributeIndices, ms.state))
}
// LinkIndex returns the linkindex associated with this Sample.
func (ms Sample) LinkIndex() int32 {
return ms.orig.LinkIndex
}
// SetLinkIndex replaces the linkindex associated with this Sample.
func (ms Sample) SetLinkIndex(v int32) {
ms.state.AssertMutable()
ms.orig.LinkIndex = v
}
// TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample.
func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice {
return pcommon.UInt64Slice(internal.NewUInt64SliceWrapper(&ms.orig.TimestampsUnixNano, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Sample) CopyTo(dest Sample) {
dest.state.AssertMutable()
internal.CopySample(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SampleSlice logically represents a slice of Sample.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSampleSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SampleSlice struct {
orig *[]*internal.Sample
state *internal.State
}
func newSampleSlice(orig *[]*internal.Sample, state *internal.State) SampleSlice {
return SampleSlice{orig: orig, state: state}
}
// NewSampleSlice creates a SampleSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSampleSlice() SampleSlice {
orig := []*internal.Sample(nil)
return newSampleSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSampleSlice()".
func (es SampleSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SampleSlice) At(i int) Sample {
return newSample((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SampleSlice) All() iter.Seq2[int, Sample] {
return func(yield func(int, Sample) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SampleSlice can be initialized:
//
// es := NewSampleSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SampleSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Sample, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Sample.
// It returns the newly added Sample.
func (es SampleSlice) AppendEmpty() Sample {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSample())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SampleSlice) MoveAndAppendTo(dest SampleSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SampleSlice) RemoveIf(f func(Sample) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSample((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SampleSlice) CopyTo(dest SampleSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySamplePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Sample elements within SampleSlice given the
// provided less function so that two instances of SampleSlice
// can be compared.
func (es SampleSlice) Sort(less func(a, b Sample) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeProfiles is a collection of profiles from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeProfiles function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeProfiles struct {
orig *internal.ScopeProfiles
state *internal.State
}
func newScopeProfiles(orig *internal.ScopeProfiles, state *internal.State) ScopeProfiles {
return ScopeProfiles{orig: orig, state: state}
}
// NewScopeProfiles creates a new empty ScopeProfiles.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeProfiles() ScopeProfiles {
return newScopeProfiles(internal.NewScopeProfiles(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeProfiles(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeProfiles.
func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Profiles returns the Profiles associated with this ScopeProfiles.
func (ms ScopeProfiles) Profiles() ProfilesSlice {
return newProfilesSlice(&ms.orig.Profiles, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeProfiles.
func (ms ScopeProfiles) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeProfiles.
func (ms ScopeProfiles) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) {
dest.state.AssertMutable()
internal.CopyScopeProfiles(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ScopeProfilesSlice logically represents a slice of ScopeProfiles.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeProfilesSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeProfilesSlice struct {
orig *[]*internal.ScopeProfiles
state *internal.State
}
func newScopeProfilesSlice(orig *[]*internal.ScopeProfiles, state *internal.State) ScopeProfilesSlice {
return ScopeProfilesSlice{orig: orig, state: state}
}
// NewScopeProfilesSlice creates a ScopeProfilesSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeProfilesSlice() ScopeProfilesSlice {
orig := []*internal.ScopeProfiles(nil)
return newScopeProfilesSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeProfilesSlice()".
func (es ScopeProfilesSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeProfilesSlice) At(i int) ScopeProfiles {
return newScopeProfiles((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeProfilesSlice) All() iter.Seq2[int, ScopeProfiles] {
return func(yield func(int, ScopeProfiles) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeProfilesSlice can be initialized:
//
// es := NewScopeProfilesSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeProfilesSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ScopeProfiles, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeProfiles.
// It returns the newly added ScopeProfiles.
func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeProfiles())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeProfilesSlice) MoveAndAppendTo(dest ScopeProfilesSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeProfiles((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeProfilesPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the
// provided less function so that two instances of ScopeProfilesSlice
// can be compared.
func (es ScopeProfilesSlice) Sort(less func(a, b ScopeProfiles) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Stack represents a stack trace as a list of locations.
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewStack function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Stack struct {
orig *internal.Stack
state *internal.State
}
func newStack(orig *internal.Stack, state *internal.State) Stack {
return Stack{orig: orig, state: state}
}
// NewStack creates a new empty Stack.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStack() Stack {
return newStack(internal.NewStack(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Stack) MoveTo(dest Stack) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteStack(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// LocationIndices returns the LocationIndices associated with this Stack.
func (ms Stack) LocationIndices() pcommon.Int32Slice {
return pcommon.Int32Slice(internal.NewInt32SliceWrapper(&ms.orig.LocationIndices, ms.state))
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Stack) CopyTo(dest Stack) {
dest.state.AssertMutable()
internal.CopyStack(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// StackSlice logically represents a slice of Stack.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewStackSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type StackSlice struct {
orig *[]*internal.Stack
state *internal.State
}
func newStackSlice(orig *[]*internal.Stack, state *internal.State) StackSlice {
return StackSlice{orig: orig, state: state}
}
// NewStackSlice creates a StackSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewStackSlice() StackSlice {
orig := []*internal.Stack(nil)
return newStackSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewStackSlice()".
func (es StackSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es StackSlice) At(i int) Stack {
return newStack((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es StackSlice) All() iter.Seq2[int, Stack] {
return func(yield func(int, Stack) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new StackSlice can be initialized:
//
// es := NewStackSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es StackSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Stack, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Stack.
// It returns the newly added Stack.
func (es StackSlice) AppendEmpty() Stack {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewStack())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es StackSlice) MoveAndAppendTo(dest StackSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es StackSlice) RemoveIf(f func(Stack) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteStack((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es StackSlice) CopyTo(dest StackSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyStackPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Stack elements within StackSlice given the
// provided less function so that two instances of StackSlice
// can be compared.
func (es StackSlice) Sort(less func(a, b Stack) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ValueType describes the type and units of a value.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewValueType function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ValueType struct {
orig *internal.ValueType
state *internal.State
}
func newValueType(orig *internal.ValueType, state *internal.State) ValueType {
return ValueType{orig: orig, state: state}
}
// NewValueType creates a new empty ValueType.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewValueType() ValueType {
return newValueType(internal.NewValueType(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ValueType) MoveTo(dest ValueType) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteValueType(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TypeStrindex returns the typestrindex associated with this ValueType.
func (ms ValueType) TypeStrindex() int32 {
return ms.orig.TypeStrindex
}
// SetTypeStrindex replaces the typestrindex associated with this ValueType.
func (ms ValueType) SetTypeStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.TypeStrindex = v
}
// UnitStrindex returns the unitstrindex associated with this ValueType.
func (ms ValueType) UnitStrindex() int32 {
return ms.orig.UnitStrindex
}
// SetUnitStrindex replaces the unitstrindex associated with this ValueType.
func (ms ValueType) SetUnitStrindex(v int32) {
ms.state.AssertMutable()
ms.orig.UnitStrindex = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ValueType) CopyTo(dest ValueType) {
dest.state.AssertMutable()
internal.CopyValueType(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofile
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ValueTypeSlice logically represents a slice of ValueType.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewValueTypeSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ValueTypeSlice struct {
orig *[]*internal.ValueType
state *internal.State
}
func newValueTypeSlice(orig *[]*internal.ValueType, state *internal.State) ValueTypeSlice {
return ValueTypeSlice{orig: orig, state: state}
}
// NewValueTypeSlice creates a ValueTypeSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewValueTypeSlice() ValueTypeSlice {
orig := []*internal.ValueType(nil)
return newValueTypeSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewValueTypeSlice()".
func (es ValueTypeSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ValueTypeSlice) At(i int) ValueType {
return newValueType((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ValueTypeSlice) All() iter.Seq2[int, ValueType] {
return func(yield func(int, ValueType) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ValueTypeSlice can be initialized:
//
// es := NewValueTypeSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ValueTypeSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ValueType, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ValueType.
// It returns the newly added ValueType.
func (es ValueTypeSlice) AppendEmpty() ValueType {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewValueType())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ValueTypeSlice) MoveAndAppendTo(dest ValueTypeSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteValueType((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyValueTypePtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ValueType elements within ValueTypeSlice given the
// provided less function so that two instances of ValueTypeSlice
// can be compared.
func (es ValueTypeSlice) Sort(less func(a, b ValueType) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals pprofile.Profiles to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalProfiles to the OTLP/JSON format.
func (*JSONMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
pd.getOrig().MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pprofile.Profiles.
type JSONUnmarshaler struct{}
// UnmarshalProfiles from OTLP/JSON format into pprofile.Profiles.
func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
pd := NewProfiles()
pd.getOrig().UnmarshalJSON(iter)
if iter.Error() != nil {
return Profiles{}, iter.Error()
}
otlp.MigrateProfiles(pd.getOrig().ResourceProfiles)
return pd, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another KeyValueAndUnit
// It assumes both structs refer to the same dictionary.
func (ms KeyValueAndUnit) Equal(val KeyValueAndUnit) bool {
return ms.KeyStrindex() == val.KeyStrindex() &&
ms.UnitStrindex() == val.UnitStrindex() &&
ms.Value().Equal(val.Value())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another LineSlice
func (l LineSlice) Equal(val LineSlice) bool {
if l.Len() != val.Len() {
return false
}
for i := range l.Len() {
if !l.At(i).Equal(val.At(i)) {
return false
}
}
return true
}
// Equal checks equality with another Line
func (l Line) Equal(val Line) bool {
return l.Column() == val.Column() &&
l.FunctionIndex() == val.FunctionIndex() &&
l.Line() == val.Line()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Link
func (ms Link) Equal(val Link) bool {
return ms.TraceID() == val.TraceID() &&
ms.SpanID() == val.SpanID()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyLinkTableEntries = errors.New("too many entries in LinkTable")
// SetLink updates a LinkTable, adding or providing a value and returns its
// index.
func SetLink(table LinkSlice, li Link) (int32, error) {
for j, l := range table.All() {
if l.Equal(li) {
if j > math.MaxInt32 {
return 0, errTooManyLinkTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyLinkTableEntries
}
li.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Location
func (ms Location) Equal(val Location) bool {
return ms.MappingIndex() == val.MappingIndex() &&
ms.Address() == val.Address() &&
ms.AttributeIndices().Equal(val.AttributeIndices()) &&
ms.Lines().Equal(val.Lines())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
// FromLocationIndices builds a slice containing all the locations of a Stack.
// Updates made to the returned map will not be applied back to the Stack.
func FromLocationIndices(table LocationSlice, record Stack) LocationSlice {
m := NewLocationSlice()
m.EnsureCapacity(record.LocationIndices().Len())
for _, idx := range record.LocationIndices().All() {
l := table.At(int(idx))
l.CopyTo(m.AppendEmpty())
}
return m
}
var errTooManyLocationTableEntries = errors.New("too many entries in LocationTable")
// SetLocation updates a LocationTable, adding or providing a value and returns
// its index.
func SetLocation(table LocationSlice, loc Location) (int32, error) {
for j, a := range table.All() {
if a.Equal(loc) {
if j > math.MaxInt32 {
return 0, errTooManyLocationTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyLocationTableEntries
}
loc.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Mapping
func (ms Mapping) Equal(val Mapping) bool {
return ms.MemoryStart() == val.MemoryStart() &&
ms.MemoryLimit() == val.MemoryLimit() &&
ms.FileOffset() == val.FileOffset() &&
ms.FilenameStrindex() == val.FilenameStrindex() &&
ms.AttributeIndices().Equal(val.AttributeIndices())
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyMappingTableEntries = errors.New("too many entries in MappingTable")
// SetMapping updates a MappingTable, adding or providing a value and returns
// its index.
func SetMapping(table MappingSlice, ma Mapping) (int32, error) {
for j, m := range table.All() {
if m.Equal(ma) {
if j > math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
ma.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalProfiles(pd Profiles) ([]byte, error) {
size := pd.getOrig().SizeProto()
buf := make([]byte, size)
_ = pd.getOrig().MarshalProto(buf)
return buf, nil
}
func (e *ProtoMarshaler) ProfilesSize(pd Profiles) int {
return pd.getOrig().SizeProto()
}
func (e *ProtoMarshaler) ResourceProfilesSize(pd ResourceProfiles) int {
return pd.orig.SizeProto()
}
func (e *ProtoMarshaler) ScopeProfilesSize(pd ScopeProfiles) int {
return pd.orig.SizeProto()
}
func (e *ProtoMarshaler) ProfileSize(pd Profile) int {
return pd.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) {
pd := NewProfiles()
err := pd.getOrig().UnmarshalProto(buf)
if err != nil {
return Profiles{}, err
}
return pd, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofileotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *internal.ExportProfilesPartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *internal.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportProfilesPartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportProfilesPartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedProfiles returns the rejectedprofiles associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedProfiles() int64 {
return ms.orig.RejectedProfiles
}
// SetRejectedProfiles replaces the rejectedprofiles associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedProfiles(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedProfiles = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportProfilesPartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package pprofileotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *internal.ExportProfilesServiceResponse
state *internal.State
}
func newExportResponse(orig *internal.ExportProfilesServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportProfilesServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportProfilesServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportProfilesServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/otelgrpc"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Profiles service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export pprofile.Profiles to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otelgrpc.NewProfilesServiceClient(cc)}
}
type grpcClient struct {
rawClient otelgrpc.ProfilesServiceClient
}
// Export implements the Client interface.
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC ProfilesService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otelgrpc.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv})
}
type rawProfilesServer struct {
srv GRPCServer
}
func (s rawProfilesServer) Export(ctx context.Context, request *internal.ExportProfilesServiceRequest) (*internal.ExportProfilesServiceResponse, error) {
otlp.MigrateProfiles(request.ResourceProfiles)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/pprofile"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for pprofile.Profiles data.
type ExportRequest struct {
orig *internal.ExportProfilesServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &internal.ExportProfilesServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromProfiles returns a ExportRequest from pprofile.Profiles.
// Because ExportRequest is a wrapper for pprofile.Profiles,
// any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest {
return ExportRequest{
orig: internal.GetProfilesOrig(internal.ProfilesWrapper(td)),
state: internal.GetProfilesState(internal.ProfilesWrapper(td)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := ms.orig.UnmarshalProto(data)
if err != nil {
return err
}
otlp.MigrateProfiles(ms.orig.ResourceProfiles)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
func (ms ExportRequest) Profiles() pprofile.Profiles {
return pprofile.Profiles(internal.NewProfilesWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return ms.orig.UnmarshalProto(data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"encoding/hex"
"go.opentelemetry.io/collector/pdata/internal"
)
var emptyProfileID = ProfileID([16]byte{})
// ProfileID is a profile identifier.
type ProfileID [16]byte
// NewProfileIDEmpty returns a new empty (all zero bytes) ProfileID.
func NewProfileIDEmpty() ProfileID {
return emptyProfileID
}
// String returns string representation of the ProfileID.
//
// Important: Don't rely on this method to get a string identifier of ProfileID.
// Use hex.EncodeToString explicitly instead.
// This method is meant to implement Stringer interface for display purposes only.
func (ms ProfileID) String() string {
if ms.IsEmpty() {
return ""
}
return hex.EncodeToString(ms[:])
}
// IsEmpty returns true if id doesn't contain at least one non-zero byte.
func (ms ProfileID) IsEmpty() bool {
return internal.ProfileID(ms).IsEmpty()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it.
func (ms Profiles) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this ResourceProfiles instance is read-only.
func (ms Profiles) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// SampleCount calculates the total number of samples.
func (ms Profiles) SampleCount() int {
sampleCount := 0
rps := ms.ResourceProfiles()
for i := 0; i < rps.Len(); i++ {
rp := rps.At(i)
sps := rp.ScopeProfiles()
for j := 0; j < sps.Len(); j++ {
pcs := sps.At(j).Profiles()
for k := 0; k < pcs.Len(); k++ {
sampleCount += pcs.At(k).Samples().Len()
}
}
}
return sampleCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
// Equal checks equality with another Stack
func (ms Stack) Equal(val Stack) bool {
if ms.LocationIndices().Len() != val.LocationIndices().Len() {
return false
}
for i := range ms.LocationIndices().Len() {
if ms.LocationIndices().At(i) != val.LocationIndices().At(i) {
return false
}
}
return true
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
)
var errTooManyStackTableEntries = errors.New("too many entries in StackTable")
// SetStack updates a StackSlice, adding or providing a stack and returns its
// index.
func SetStack(table StackSlice, st Stack) (int32, error) {
for j, l := range table.All() {
if l.Equal(st) {
if j > math.MaxInt32 {
return 0, errTooManyStackTableEntries
}
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyStackTableEntries
}
st.CopyTo(table.AppendEmpty())
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile"
import (
"errors"
"math"
"go.opentelemetry.io/collector/pdata/pcommon"
)
var errTooManyStringTableEntries = errors.New("too many entries in StringTable")
// SetString updates a StringTable, adding or providing a value and returns its index.
func SetString(table pcommon.StringSlice, val string) (int32, error) {
for j, v := range table.All() {
if v == val {
if j > math.MaxInt32 {
return 0, errTooManyStringTableEntries
}
// Return the index of the existing value.
return int32(j), nil //nolint:gosec // G115 overflow checked
}
}
if table.Len() >= math.MaxInt32 {
return 0, errTooManyMappingTableEntries
}
table.Append(val)
return int32(table.Len() - 1), nil //nolint:gosec // G115 overflow checked
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ResourceSpans is a collection of spans from a Resource.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewResourceSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpans struct {
orig *internal.ResourceSpans
state *internal.State
}
func newResourceSpans(orig *internal.ResourceSpans, state *internal.State) ResourceSpans {
return ResourceSpans{orig: orig, state: state}
}
// NewResourceSpans creates a new empty ResourceSpans.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewResourceSpans() ResourceSpans {
return newResourceSpans(internal.NewResourceSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ResourceSpans) MoveTo(dest ResourceSpans) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteResourceSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Resource returns the resource associated with this ResourceSpans.
func (ms ResourceSpans) Resource() pcommon.Resource {
return pcommon.Resource(internal.NewResourceWrapper(&ms.orig.Resource, ms.state))
}
// ScopeSpans returns the ScopeSpans associated with this ResourceSpans.
func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice {
return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ResourceSpans.
func (ms ResourceSpans) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ResourceSpans.
func (ms ResourceSpans) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ResourceSpans) CopyTo(dest ResourceSpans) {
dest.state.AssertMutable()
internal.CopyResourceSpans(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ResourceSpansSlice logically represents a slice of ResourceSpans.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewResourceSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ResourceSpansSlice struct {
orig *[]*internal.ResourceSpans
state *internal.State
}
func newResourceSpansSlice(orig *[]*internal.ResourceSpans, state *internal.State) ResourceSpansSlice {
return ResourceSpansSlice{orig: orig, state: state}
}
// NewResourceSpansSlice creates a ResourceSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewResourceSpansSlice() ResourceSpansSlice {
orig := []*internal.ResourceSpans(nil)
return newResourceSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewResourceSpansSlice()".
func (es ResourceSpansSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ResourceSpansSlice) At(i int) ResourceSpans {
return newResourceSpans((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ResourceSpansSlice) All() iter.Seq2[int, ResourceSpans] {
return func(yield func(int, ResourceSpans) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ResourceSpansSlice can be initialized:
//
// es := NewResourceSpansSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ResourceSpansSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ResourceSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ResourceSpans.
// It returns the newly added ResourceSpans.
func (es ResourceSpansSlice) AppendEmpty() ResourceSpans {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewResourceSpans())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteResourceSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyResourceSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ResourceSpans elements within ResourceSpansSlice given the
// provided less function so that two instances of ResourceSpansSlice
// can be compared.
func (es ResourceSpansSlice) Sort(less func(a, b ResourceSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// ScopeSpans is a collection of spans from a LibraryInstrumentation.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewScopeSpans function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpans struct {
orig *internal.ScopeSpans
state *internal.State
}
func newScopeSpans(orig *internal.ScopeSpans, state *internal.State) ScopeSpans {
return ScopeSpans{orig: orig, state: state}
}
// NewScopeSpans creates a new empty ScopeSpans.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewScopeSpans() ScopeSpans {
return newScopeSpans(internal.NewScopeSpans(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ScopeSpans) MoveTo(dest ScopeSpans) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteScopeSpans(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Scope returns the scope associated with this ScopeSpans.
func (ms ScopeSpans) Scope() pcommon.InstrumentationScope {
return pcommon.InstrumentationScope(internal.NewInstrumentationScopeWrapper(&ms.orig.Scope, ms.state))
}
// Spans returns the Spans associated with this ScopeSpans.
func (ms ScopeSpans) Spans() SpanSlice {
return newSpanSlice(&ms.orig.Spans, ms.state)
}
// SchemaUrl returns the schemaurl associated with this ScopeSpans.
func (ms ScopeSpans) SchemaUrl() string {
return ms.orig.SchemaUrl
}
// SetSchemaUrl replaces the schemaurl associated with this ScopeSpans.
func (ms ScopeSpans) SetSchemaUrl(v string) {
ms.state.AssertMutable()
ms.orig.SchemaUrl = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ScopeSpans) CopyTo(dest ScopeSpans) {
dest.state.AssertMutable()
internal.CopyScopeSpans(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// ScopeSpansSlice logically represents a slice of ScopeSpans.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewScopeSpansSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ScopeSpansSlice struct {
orig *[]*internal.ScopeSpans
state *internal.State
}
func newScopeSpansSlice(orig *[]*internal.ScopeSpans, state *internal.State) ScopeSpansSlice {
return ScopeSpansSlice{orig: orig, state: state}
}
// NewScopeSpansSlice creates a ScopeSpansSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewScopeSpansSlice() ScopeSpansSlice {
orig := []*internal.ScopeSpans(nil)
return newScopeSpansSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewScopeSpansSlice()".
func (es ScopeSpansSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es ScopeSpansSlice) At(i int) ScopeSpans {
return newScopeSpans((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es ScopeSpansSlice) All() iter.Seq2[int, ScopeSpans] {
return func(yield func(int, ScopeSpans) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new ScopeSpansSlice can be initialized:
//
// es := NewScopeSpansSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es ScopeSpansSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.ScopeSpans, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty ScopeSpans.
// It returns the newly added ScopeSpans.
func (es ScopeSpansSlice) AppendEmpty() ScopeSpans {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewScopeSpans())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es ScopeSpansSlice) MoveAndAppendTo(dest ScopeSpansSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteScopeSpans((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopyScopeSpansPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the ScopeSpans elements within ScopeSpansSlice given the
// provided less function so that two instances of ScopeSpansSlice
// can be compared.
func (es ScopeSpansSlice) Sort(less func(a, b ScopeSpans) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// Span represents a single operation within a trace.
// See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpan function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Span struct {
orig *internal.Span
state *internal.State
}
func newSpan(orig *internal.Span, state *internal.State) Span {
return Span{orig: orig, state: state}
}
// NewSpan creates a new empty Span.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpan() Span {
return newSpan(internal.NewSpan(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Span) MoveTo(dest Span) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpan(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this Span.
func (ms Span) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this Span.
func (ms Span) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this Span.
func (ms Span) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this Span.
func (ms Span) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this Span.
func (ms Span) TraceState() pcommon.TraceState {
return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// ParentSpanID returns the parentspanid associated with this Span.
func (ms Span) ParentSpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.ParentSpanId)
}
// SetParentSpanID replaces the parentspanid associated with this Span.
func (ms Span) SetParentSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.ParentSpanId = internal.SpanID(v)
}
// Flags returns the flags associated with this Span.
func (ms Span) Flags() uint32 {
return ms.orig.Flags
}
// SetFlags replaces the flags associated with this Span.
func (ms Span) SetFlags(v uint32) {
ms.state.AssertMutable()
ms.orig.Flags = v
}
// Name returns the name associated with this Span.
func (ms Span) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this Span.
func (ms Span) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Kind returns the kind associated with this Span.
func (ms Span) Kind() SpanKind {
return SpanKind(ms.orig.Kind)
}
// SetKind replaces the kind associated with this Span.
func (ms Span) SetKind(v SpanKind) {
ms.state.AssertMutable()
ms.orig.Kind = internal.SpanKind(v)
}
// StartTimestamp returns the starttimestamp associated with this Span.
func (ms Span) StartTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.StartTimeUnixNano)
}
// SetStartTimestamp replaces the starttimestamp associated with this Span.
func (ms Span) SetStartTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.StartTimeUnixNano = uint64(v)
}
// EndTimestamp returns the endtimestamp associated with this Span.
func (ms Span) EndTimestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.EndTimeUnixNano)
}
// SetEndTimestamp replaces the endtimestamp associated with this Span.
func (ms Span) SetEndTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.EndTimeUnixNano = uint64(v)
}
// Attributes returns the Attributes associated with this Span.
func (ms Span) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this Span.
func (ms Span) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this Span.
func (ms Span) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Events returns the Events associated with this Span.
func (ms Span) Events() SpanEventSlice {
return newSpanEventSlice(&ms.orig.Events, ms.state)
}
// DroppedEventsCount returns the droppedeventscount associated with this Span.
func (ms Span) DroppedEventsCount() uint32 {
return ms.orig.DroppedEventsCount
}
// SetDroppedEventsCount replaces the droppedeventscount associated with this Span.
func (ms Span) SetDroppedEventsCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedEventsCount = v
}
// Links returns the Links associated with this Span.
func (ms Span) Links() SpanLinkSlice {
return newSpanLinkSlice(&ms.orig.Links, ms.state)
}
// DroppedLinksCount returns the droppedlinkscount associated with this Span.
func (ms Span) DroppedLinksCount() uint32 {
return ms.orig.DroppedLinksCount
}
// SetDroppedLinksCount replaces the droppedlinkscount associated with this Span.
func (ms Span) SetDroppedLinksCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedLinksCount = v
}
// Status returns the status associated with this Span.
func (ms Span) Status() Status {
return newStatus(&ms.orig.Status, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Span) CopyTo(dest Span) {
dest.state.AssertMutable()
internal.CopySpan(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs. See OTLP for event definition.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpanEvent function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEvent struct {
orig *internal.SpanEvent
state *internal.State
}
func newSpanEvent(orig *internal.SpanEvent, state *internal.State) SpanEvent {
return SpanEvent{orig: orig, state: state}
}
// NewSpanEvent creates a new empty SpanEvent.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanEvent() SpanEvent {
return newSpanEvent(internal.NewSpanEvent(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SpanEvent) MoveTo(dest SpanEvent) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpanEvent(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Timestamp returns the timestamp associated with this SpanEvent.
func (ms SpanEvent) Timestamp() pcommon.Timestamp {
return pcommon.Timestamp(ms.orig.TimeUnixNano)
}
// SetTimestamp replaces the timestamp associated with this SpanEvent.
func (ms SpanEvent) SetTimestamp(v pcommon.Timestamp) {
ms.state.AssertMutable()
ms.orig.TimeUnixNano = uint64(v)
}
// Name returns the name associated with this SpanEvent.
func (ms SpanEvent) Name() string {
return ms.orig.Name
}
// SetName replaces the name associated with this SpanEvent.
func (ms SpanEvent) SetName(v string) {
ms.state.AssertMutable()
ms.orig.Name = v
}
// Attributes returns the Attributes associated with this SpanEvent.
func (ms SpanEvent) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent.
func (ms SpanEvent) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent.
func (ms SpanEvent) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanEvent) CopyTo(dest SpanEvent) {
dest.state.AssertMutable()
internal.CopySpanEvent(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SpanEventSlice logically represents a slice of SpanEvent.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanEventSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanEventSlice struct {
orig *[]*internal.SpanEvent
state *internal.State
}
func newSpanEventSlice(orig *[]*internal.SpanEvent, state *internal.State) SpanEventSlice {
return SpanEventSlice{orig: orig, state: state}
}
// NewSpanEventSlice creates a SpanEventSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanEventSlice() SpanEventSlice {
orig := []*internal.SpanEvent(nil)
return newSpanEventSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanEventSlice()".
func (es SpanEventSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanEventSlice) At(i int) SpanEvent {
return newSpanEvent((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanEventSlice) All() iter.Seq2[int, SpanEvent] {
return func(yield func(int, SpanEvent) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanEventSlice can be initialized:
//
// es := NewSpanEventSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanEventSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.SpanEvent, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SpanEvent.
// It returns the newly added SpanEvent.
func (es SpanEventSlice) AppendEmpty() SpanEvent {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpanEvent())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpanEvent((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanEventSlice) CopyTo(dest SpanEventSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpanEventPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanEvent elements within SpanEventSlice given the
// provided less function so that two instances of SpanEventSlice
// can be compared.
func (es SpanEventSlice) Sort(less func(a, b SpanEvent) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/pcommon"
)
// SpanLink is a pointer from the current span to another span in the same trace or in a
// different trace.
// See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewSpanLink function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLink struct {
orig *internal.SpanLink
state *internal.State
}
func newSpanLink(orig *internal.SpanLink, state *internal.State) SpanLink {
return SpanLink{orig: orig, state: state}
}
// NewSpanLink creates a new empty SpanLink.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewSpanLink() SpanLink {
return newSpanLink(internal.NewSpanLink(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms SpanLink) MoveTo(dest SpanLink) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteSpanLink(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// TraceID returns the traceid associated with this SpanLink.
func (ms SpanLink) TraceID() pcommon.TraceID {
return pcommon.TraceID(ms.orig.TraceId)
}
// SetTraceID replaces the traceid associated with this SpanLink.
func (ms SpanLink) SetTraceID(v pcommon.TraceID) {
ms.state.AssertMutable()
ms.orig.TraceId = internal.TraceID(v)
}
// SpanID returns the spanid associated with this SpanLink.
func (ms SpanLink) SpanID() pcommon.SpanID {
return pcommon.SpanID(ms.orig.SpanId)
}
// SetSpanID replaces the spanid associated with this SpanLink.
func (ms SpanLink) SetSpanID(v pcommon.SpanID) {
ms.state.AssertMutable()
ms.orig.SpanId = internal.SpanID(v)
}
// TraceState returns the tracestate associated with this SpanLink.
func (ms SpanLink) TraceState() pcommon.TraceState {
return pcommon.TraceState(internal.NewTraceStateWrapper(&ms.orig.TraceState, ms.state))
}
// Attributes returns the Attributes associated with this SpanLink.
func (ms SpanLink) Attributes() pcommon.Map {
return pcommon.Map(internal.NewMapWrapper(&ms.orig.Attributes, ms.state))
}
// DroppedAttributesCount returns the droppedattributescount associated with this SpanLink.
func (ms SpanLink) DroppedAttributesCount() uint32 {
return ms.orig.DroppedAttributesCount
}
// SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink.
func (ms SpanLink) SetDroppedAttributesCount(v uint32) {
ms.state.AssertMutable()
ms.orig.DroppedAttributesCount = v
}
// Flags returns the flags associated with this SpanLink.
func (ms SpanLink) Flags() uint32 {
return ms.orig.Flags
}
// SetFlags replaces the flags associated with this SpanLink.
func (ms SpanLink) SetFlags(v uint32) {
ms.state.AssertMutable()
ms.orig.Flags = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms SpanLink) CopyTo(dest SpanLink) {
dest.state.AssertMutable()
internal.CopySpanLink(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SpanLinkSlice logically represents a slice of SpanLink.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanLinkSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanLinkSlice struct {
orig *[]*internal.SpanLink
state *internal.State
}
func newSpanLinkSlice(orig *[]*internal.SpanLink, state *internal.State) SpanLinkSlice {
return SpanLinkSlice{orig: orig, state: state}
}
// NewSpanLinkSlice creates a SpanLinkSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanLinkSlice() SpanLinkSlice {
orig := []*internal.SpanLink(nil)
return newSpanLinkSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanLinkSlice()".
func (es SpanLinkSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanLinkSlice) At(i int) SpanLink {
return newSpanLink((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanLinkSlice) All() iter.Seq2[int, SpanLink] {
return func(yield func(int, SpanLink) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanLinkSlice can be initialized:
//
// es := NewSpanLinkSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanLinkSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.SpanLink, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty SpanLink.
// It returns the newly added SpanLink.
func (es SpanLinkSlice) AppendEmpty() SpanLink {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpanLink())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpanLink((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpanLinkPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the SpanLink elements within SpanLinkSlice given the
// provided less function so that two instances of SpanLinkSlice
// can be compared.
func (es SpanLinkSlice) Sort(less func(a, b SpanLink) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
)
// SpanSlice logically represents a slice of Span.
//
// This is a reference type. If passed by value and callee modifies it, the
// caller will see the modification.
//
// Must use NewSpanSlice function to create new instances.
// Important: zero-initialized instance is not valid for use.
type SpanSlice struct {
orig *[]*internal.Span
state *internal.State
}
func newSpanSlice(orig *[]*internal.Span, state *internal.State) SpanSlice {
return SpanSlice{orig: orig, state: state}
}
// NewSpanSlice creates a SpanSliceWrapper with 0 elements.
// Can use "EnsureCapacity" to initialize with a given capacity.
func NewSpanSlice() SpanSlice {
orig := []*internal.Span(nil)
return newSpanSlice(&orig, internal.NewState())
}
// Len returns the number of elements in the slice.
//
// Returns "0" for a newly instance created with "NewSpanSlice()".
func (es SpanSlice) Len() int {
return len(*es.orig)
}
// At returns the element at the given index.
//
// This function is used mostly for iterating over all the values in the slice:
//
// for i := 0; i < es.Len(); i++ {
// e := es.At(i)
// ... // Do something with the element
// }
func (es SpanSlice) At(i int) Span {
return newSpan((*es.orig)[i], es.state)
}
// All returns an iterator over index-value pairs in the slice.
//
// for i, v := range es.All() {
// ... // Do something with index-value pair
// }
func (es SpanSlice) All() iter.Seq2[int, Span] {
return func(yield func(int, Span) bool) {
for i := 0; i < es.Len(); i++ {
if !yield(i, es.At(i)) {
return
}
}
}
}
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
//
// Here is how a new SpanSlice can be initialized:
//
// es := NewSpanSlice()
// es.EnsureCapacity(4)
// for i := 0; i < 4; i++ {
// e := es.AppendEmpty()
// // Here should set all the values for e.
// }
func (es SpanSlice) EnsureCapacity(newCap int) {
es.state.AssertMutable()
oldCap := cap(*es.orig)
if newCap <= oldCap {
return
}
newOrig := make([]*internal.Span, len(*es.orig), newCap)
copy(newOrig, *es.orig)
*es.orig = newOrig
}
// AppendEmpty will append to the end of the slice an empty Span.
// It returns the newly added Span.
func (es SpanSlice) AppendEmpty() Span {
es.state.AssertMutable()
*es.orig = append(*es.orig, internal.NewSpan())
return es.At(es.Len() - 1)
}
// MoveAndAppendTo moves all elements from the current slice and appends them to the dest.
// The current slice will be cleared.
func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) {
es.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if es.orig == dest.orig {
return
}
if *dest.orig == nil {
// We can simply move the entire vector and avoid any allocations.
*dest.orig = *es.orig
} else {
*dest.orig = append(*dest.orig, *es.orig...)
}
*es.orig = nil
}
// RemoveIf calls f sequentially for each element present in the slice.
// If f returns true, the element is removed from the slice.
func (es SpanSlice) RemoveIf(f func(Span) bool) {
es.state.AssertMutable()
newLen := 0
for i := 0; i < len(*es.orig); i++ {
if f(es.At(i)) {
internal.DeleteSpan((*es.orig)[i], true)
(*es.orig)[i] = nil
continue
}
if newLen == i {
// Nothing to move, element is at the right place.
newLen++
continue
}
(*es.orig)[newLen] = (*es.orig)[i]
// Cannot delete here since we just move the data(or pointer to data) to a different position in the slice.
(*es.orig)[i] = nil
newLen++
}
*es.orig = (*es.orig)[:newLen]
}
// CopyTo copies all elements from the current slice overriding the destination.
func (es SpanSlice) CopyTo(dest SpanSlice) {
dest.state.AssertMutable()
if es.orig == dest.orig {
return
}
*dest.orig = internal.CopySpanPtrSlice(*dest.orig, *es.orig)
}
// Sort sorts the Span elements within SpanSlice given the
// provided less function so that two instances of SpanSlice
// can be compared.
func (es SpanSlice) Sort(less func(a, b Span) bool) {
es.state.AssertMutable()
sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) })
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Status is an optional final status for this span. Semantically, when Status was not
// set, that means the span ended without errors and to assume Status.Ok (code = 0).
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewStatus function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Status struct {
orig *internal.Status
state *internal.State
}
func newStatus(orig *internal.Status, state *internal.State) Status {
return Status{orig: orig, state: state}
}
// NewStatus creates a new empty Status.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewStatus() Status {
return newStatus(internal.NewStatus(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Status) MoveTo(dest Status) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteStatus(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// Message returns the message associated with this Status.
func (ms Status) Message() string {
return ms.orig.Message
}
// SetMessage replaces the message associated with this Status.
func (ms Status) SetMessage(v string) {
ms.state.AssertMutable()
ms.orig.Message = v
}
// Code returns the code associated with this Status.
func (ms Status) Code() StatusCode {
return StatusCode(ms.orig.Code)
}
// SetCode replaces the code associated with this Status.
func (ms Status) SetCode(v StatusCode) {
ms.state.AssertMutable()
ms.orig.Code = internal.StatusCode(v)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Status) CopyTo(dest Status) {
dest.state.AssertMutable()
internal.CopyStatus(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptrace
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// Traces is the top-level struct that is propagated through the traces pipeline.
// Use NewTraces to create new instance, zero-initialized instance is not valid for use.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewTraces function to create new instances.
// Important: zero-initialized instance is not valid for use.
type Traces internal.TracesWrapper
func newTraces(orig *internal.ExportTraceServiceRequest, state *internal.State) Traces {
return Traces(internal.NewTracesWrapper(orig, state))
}
// NewTraces creates a new empty Traces.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewTraces() Traces {
return newTraces(internal.NewExportTraceServiceRequest(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms Traces) MoveTo(dest Traces) {
ms.getState().AssertMutable()
dest.getState().AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.getOrig() == dest.getOrig() {
return
}
internal.DeleteExportTraceServiceRequest(dest.getOrig(), false)
*dest.getOrig(), *ms.getOrig() = *ms.getOrig(), *dest.getOrig()
}
// ResourceSpans returns the ResourceSpans associated with this Traces.
func (ms Traces) ResourceSpans() ResourceSpansSlice {
return newResourceSpansSlice(&ms.getOrig().ResourceSpans, ms.getState())
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms Traces) CopyTo(dest Traces) {
dest.getState().AssertMutable()
internal.CopyExportTraceServiceRequest(dest.getOrig(), ms.getOrig())
}
func (ms Traces) getOrig() *internal.ExportTraceServiceRequest {
return internal.GetTracesOrig(internal.TracesWrapper(ms))
}
func (ms Traces) getState() *internal.State {
return internal.GetTracesState(internal.TracesWrapper(ms))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// JSONMarshaler marshals Traces to JSON bytes using the OTLP/JSON format.
type JSONMarshaler struct{}
// MarshalTraces to the OTLP/JSON format.
func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
td.getOrig().MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to Traces.
type JSONUnmarshaler struct{}
// UnmarshalTraces from OTLP/JSON format into Traces.
func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
iter := json.BorrowIterator(buf)
defer json.ReturnIterator(iter)
td := NewTraces()
td.getOrig().UnmarshalJSON(iter)
if iter.Error() != nil {
return Traces{}, iter.Error()
}
otlp.MigrateTraces(td.getOrig().ResourceSpans)
return td, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
var _ MarshalSizer = (*ProtoMarshaler)(nil)
type ProtoMarshaler struct{}
func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) {
size := td.getOrig().SizeProto()
buf := make([]byte, size)
_ = td.getOrig().MarshalProto(buf)
return buf, nil
}
func (e *ProtoMarshaler) TracesSize(td Traces) int {
return td.getOrig().SizeProto()
}
func (e *ProtoMarshaler) ResourceSpansSize(td ResourceSpans) int {
return td.orig.SizeProto()
}
func (e *ProtoMarshaler) ScopeSpansSize(td ScopeSpans) int {
return td.orig.SizeProto()
}
func (e *ProtoMarshaler) SpanSize(td Span) int {
return td.orig.SizeProto()
}
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
td := NewTraces()
err := td.getOrig().UnmarshalProto(buf)
if err != nil {
return Traces{}, err
}
return td, nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptraceotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportPartialSuccess represents the details of a partially successful export request.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportPartialSuccess function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportPartialSuccess struct {
orig *internal.ExportTracePartialSuccess
state *internal.State
}
func newExportPartialSuccess(orig *internal.ExportTracePartialSuccess, state *internal.State) ExportPartialSuccess {
return ExportPartialSuccess{orig: orig, state: state}
}
// NewExportPartialSuccess creates a new empty ExportPartialSuccess.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportPartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(internal.NewExportTracePartialSuccess(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportTracePartialSuccess(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// RejectedSpans returns the rejectedspans associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) RejectedSpans() int64 {
return ms.orig.RejectedSpans
}
// SetRejectedSpans replaces the rejectedspans associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetRejectedSpans(v int64) {
ms.state.AssertMutable()
ms.orig.RejectedSpans = v
}
// ErrorMessage returns the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) ErrorMessage() string {
return ms.orig.ErrorMessage
}
// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess.
func (ms ExportPartialSuccess) SetErrorMessage(v string) {
ms.state.AssertMutable()
ms.orig.ErrorMessage = v
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) {
dest.state.AssertMutable()
internal.CopyExportTracePartialSuccess(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Code generated by "internal/cmd/pdatagen/main.go". DO NOT EDIT.
// To regenerate this file run "make genpdata".
package ptraceotlp
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// ExportResponse represents the response for gRPC/HTTP client/server.
//
// This is a reference type, if passed by value and callee modifies it the
// caller will see the modification.
//
// Must use NewExportResponse function to create new instances.
// Important: zero-initialized instance is not valid for use.
type ExportResponse struct {
orig *internal.ExportTraceServiceResponse
state *internal.State
}
func newExportResponse(orig *internal.ExportTraceServiceResponse, state *internal.State) ExportResponse {
return ExportResponse{orig: orig, state: state}
}
// NewExportResponse creates a new empty ExportResponse.
//
// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice,
// OR directly access the member if this is embedded in another struct.
func NewExportResponse() ExportResponse {
return newExportResponse(internal.NewExportTraceServiceResponse(), internal.NewState())
}
// MoveTo moves all properties from the current struct overriding the destination and
// resetting the current instance to its zero value
func (ms ExportResponse) MoveTo(dest ExportResponse) {
ms.state.AssertMutable()
dest.state.AssertMutable()
// If they point to the same data, they are the same, nothing to do.
if ms.orig == dest.orig {
return
}
internal.DeleteExportTraceServiceResponse(dest.orig, false)
*dest.orig, *ms.orig = *ms.orig, *dest.orig
}
// PartialSuccess returns the partialsuccess associated with this ExportResponse.
func (ms ExportResponse) PartialSuccess() ExportPartialSuccess {
return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state)
}
// CopyTo copies all properties from the current struct overriding the destination.
func (ms ExportResponse) CopyTo(dest ExportResponse) {
dest.state.AssertMutable()
internal.CopyExportTraceServiceResponse(dest.orig, ms.orig)
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/otelgrpc"
"go.opentelemetry.io/collector/pdata/internal/otlp"
)
// GRPCClient is the client API for OTLP-GRPC Traces service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GRPCClient interface {
// Export ptrace.Traces to the server.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error)
// unexported disallow implementation of the GRPCClient.
unexported()
}
// NewGRPCClient returns a new GRPCClient connected using the given connection.
func NewGRPCClient(cc *grpc.ClientConn) GRPCClient {
return &grpcClient{rawClient: otelgrpc.NewTraceServiceClient(cc)}
}
type grpcClient struct {
rawClient otelgrpc.TraceServiceClient
}
// Export implements the Client interface.
func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) {
rsp, err := c.rawClient.Export(ctx, request.orig, opts...)
if err != nil {
return ExportResponse{}, err
}
return ExportResponse{orig: rsp, state: internal.NewState()}, err
}
func (c *grpcClient) unexported() {}
// GRPCServer is the server API for OTLP gRPC TracesService service.
// Implementations MUST embed UnimplementedGRPCServer.
type GRPCServer interface {
// Export is called every time a new request is received.
//
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(context.Context, ExportRequest) (ExportResponse, error)
// unexported disallow implementation of the GRPCServer.
unexported()
}
var _ GRPCServer = (*UnimplementedGRPCServer)(nil)
// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations.
type UnimplementedGRPCServer struct{}
func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) {
return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedGRPCServer) unexported() {}
// RegisterGRPCServer registers the GRPCServer to the grpc.Server.
func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) {
otelgrpc.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv})
}
type rawTracesServer struct {
srv GRPCServer
}
func (s rawTracesServer) Export(ctx context.Context, request *internal.ExportTraceServiceRequest) (*internal.ExportTraceServiceResponse, error) {
otlp.MigrateTraces(request.ResourceSpans)
rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: internal.NewState()})
return rsp.orig, err
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal"
"go.opentelemetry.io/collector/pdata/internal/json"
"go.opentelemetry.io/collector/pdata/internal/otlp"
"go.opentelemetry.io/collector/pdata/ptrace"
)
// ExportRequest represents the request for gRPC/HTTP client/server.
// It's a wrapper for ptrace.Traces data.
type ExportRequest struct {
orig *internal.ExportTraceServiceRequest
state *internal.State
}
// NewExportRequest returns an empty ExportRequest.
func NewExportRequest() ExportRequest {
return ExportRequest{
orig: &internal.ExportTraceServiceRequest{},
state: internal.NewState(),
}
}
// NewExportRequestFromTraces returns a ExportRequest from ptrace.Traces.
// Because ExportRequest is a wrapper for ptrace.Traces,
// any changes to the provided Traces struct will be reflected in the ExportRequest and vice versa.
func NewExportRequestFromTraces(td ptrace.Traces) ExportRequest {
return ExportRequest{
orig: internal.GetTracesOrig(internal.TracesWrapper(td)),
state: internal.GetTracesState(internal.TracesWrapper(td)),
}
}
// MarshalProto marshals ExportRequest into proto bytes.
func (ms ExportRequest) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportRequest from proto bytes.
func (ms ExportRequest) UnmarshalProto(data []byte) error {
err := ms.orig.UnmarshalProto(data)
if err != nil {
return err
}
otlp.MigrateTraces(ms.orig.ResourceSpans)
return nil
}
// MarshalJSON marshals ExportRequest into JSON bytes.
func (ms ExportRequest) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
if dest.Error() != nil {
return nil, dest.Error()
}
return slices.Clone(dest.Buffer()), nil
}
// UnmarshalJSON unmarshalls ExportRequest from JSON bytes.
func (ms ExportRequest) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
func (ms ExportRequest) Traces() ptrace.Traces {
return ptrace.Traces(internal.NewTracesWrapper(ms.orig, ms.state))
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
import (
"slices"
"go.opentelemetry.io/collector/pdata/internal/json"
)
// MarshalProto marshals ExportResponse into proto bytes.
func (ms ExportResponse) MarshalProto() ([]byte, error) {
size := ms.orig.SizeProto()
buf := make([]byte, size)
_ = ms.orig.MarshalProto(buf)
return buf, nil
}
// UnmarshalProto unmarshalls ExportResponse from proto bytes.
func (ms ExportResponse) UnmarshalProto(data []byte) error {
return ms.orig.UnmarshalProto(data)
}
// MarshalJSON marshals ExportResponse into JSON bytes.
func (ms ExportResponse) MarshalJSON() ([]byte, error) {
dest := json.BorrowStream(nil)
defer json.ReturnStream(dest)
ms.orig.MarshalJSON(dest)
return slices.Clone(dest.Buffer()), dest.Error()
}
// UnmarshalJSON unmarshalls ExportResponse from JSON bytes.
func (ms ExportResponse) UnmarshalJSON(data []byte) error {
iter := json.BorrowIterator(data)
defer json.ReturnIterator(iter)
ms.orig.UnmarshalJSON(iter)
return iter.Error()
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// SpanKind is the type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type SpanKind int32
const (
// SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used.
SpanKindUnspecified = SpanKind(internal.SpanKind_SPAN_KIND_UNSPECIFIED)
// SpanKindInternal indicates that the span represents an internal operation within an application,
// as opposed to an operation happening at the boundaries. Default value.
SpanKindInternal = SpanKind(internal.SpanKind_SPAN_KIND_INTERNAL)
// SpanKindServer indicates that the span covers server-side handling of an RPC or other
// remote network request.
SpanKindServer = SpanKind(internal.SpanKind_SPAN_KIND_SERVER)
// SpanKindClient indicates that the span describes a request to some remote service.
SpanKindClient = SpanKind(internal.SpanKind_SPAN_KIND_CLIENT)
// SpanKindProducer indicates that the span describes a producer sending a message to a broker.
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
// between producer and consumer spans.
// A PRODUCER span ends when the message was accepted by the broker while the logical processing of
// the message might span a much longer time.
SpanKindProducer = SpanKind(internal.SpanKind_SPAN_KIND_PRODUCER)
// SpanKindConsumer indicates that the span describes consumer receiving a message from a broker.
// Like the PRODUCER kind, there is often no direct critical path latency relationship between
// producer and consumer spans.
SpanKindConsumer = SpanKind(internal.SpanKind_SPAN_KIND_CONSUMER)
)
// String returns the string representation of the SpanKind.
func (sk SpanKind) String() string {
switch sk {
case SpanKindUnspecified:
return "Unspecified"
case SpanKindInternal:
return "Internal"
case SpanKindServer:
return "Server"
case SpanKindClient:
return "Client"
case SpanKindProducer:
return "Producer"
case SpanKindConsumer:
return "Consumer"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
import (
"go.opentelemetry.io/collector/pdata/internal"
)
// StatusCode mirrors the codes defined at
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
StatusCodeUnset = StatusCode(internal.StatusCode_STATUS_CODE_UNSET)
StatusCodeOk = StatusCode(internal.StatusCode_STATUS_CODE_OK)
StatusCodeError = StatusCode(internal.StatusCode_STATUS_CODE_ERROR)
)
// String returns the string representation of the StatusCode.
func (sc StatusCode) String() string {
switch sc {
case StatusCodeUnset:
return "Unset"
case StatusCodeOk:
return "Ok"
case StatusCodeError:
return "Error"
}
return ""
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace"
// MarkReadOnly marks the Traces as shared so that no further modifications can be done on it.
func (ms Traces) MarkReadOnly() {
ms.getState().MarkReadOnly()
}
// IsReadOnly returns true if this Traces instance is read-only.
func (ms Traces) IsReadOnly() bool {
return ms.getState().IsReadOnly()
}
// SpanCount calculates the total number of spans.
func (ms Traces) SpanCount() int {
spanCount := 0
rss := ms.ResourceSpans()
for i := 0; i < rss.Len(); i++ {
rs := rss.At(i)
ilss := rs.ScopeSpans()
for j := 0; j < ilss.Len(); j++ {
spanCount += ilss.At(j).Spans().Len()
}
}
return spanCount
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"encoding"
"errors"
"fmt"
"net/url"
"path"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configgrpc"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/config/configoptional"
)
type SanitizedURLPath string
var _ encoding.TextUnmarshaler = (*SanitizedURLPath)(nil)
func (s *SanitizedURLPath) UnmarshalText(text []byte) error {
u, err := url.Parse(string(text))
if err != nil {
return fmt.Errorf("invalid HTTP URL path set for signal: %w", err)
}
if !path.IsAbs(u.Path) {
u.Path = "/" + u.Path
}
*s = SanitizedURLPath(u.Path)
return nil
}
type HTTPConfig struct {
ServerConfig confighttp.ServerConfig `mapstructure:",squash"`
// The URL path to receive traces on. If omitted "/v1/traces" will be used.
TracesURLPath SanitizedURLPath `mapstructure:"traces_url_path,omitempty"`
// The URL path to receive metrics on. If omitted "/v1/metrics" will be used.
MetricsURLPath SanitizedURLPath `mapstructure:"metrics_url_path,omitempty"`
// The URL path to receive logs on. If omitted "/v1/logs" will be used.
LogsURLPath SanitizedURLPath `mapstructure:"logs_url_path,omitempty"`
// prevent unkeyed literal initialization
_ struct{}
}
// Protocols is the configuration for the supported protocols.
type Protocols struct {
GRPC configoptional.Optional[configgrpc.ServerConfig] `mapstructure:"grpc"`
HTTP configoptional.Optional[HTTPConfig] `mapstructure:"http"`
// prevent unkeyed literal initialization
_ struct{}
}
// Config defines configuration for OTLP receiver.
type Config struct {
// Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON).
Protocols `mapstructure:"protocols"`
// prevent unkeyed literal initialization
_ struct{}
}
var _ component.Config = (*Config)(nil)
// Validate checks the receiver configuration is valid
func (cfg *Config) Validate() error {
if !cfg.GRPC.HasValue() && !cfg.HTTP.HasValue() {
return errors.New("must specify at least one protocol when using the OTLP receiver")
}
return nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
)
const (
pbContentType = "application/x-protobuf"
jsonContentType = "application/json"
)
var (
pbEncoder = &protoEncoder{}
jsEncoder = &jsonEncoder{}
)
type encoder interface {
unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error)
unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error)
unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error)
unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error)
marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error)
marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error)
marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error)
marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error)
marshalStatus(rsp *spb.Status) ([]byte, error)
contentType() string
}
type protoEncoder struct{}
func (protoEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
req := ptraceotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
req := plogotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) {
req := pprofileotlp.NewExportRequest()
err := req.UnmarshalProto(buf)
return req, err
}
func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) {
return resp.MarshalProto()
}
func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) {
return proto.Marshal(resp)
}
func (protoEncoder) contentType() string {
return pbContentType
}
type jsonEncoder struct{}
func (jsonEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) {
req := ptraceotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) {
req := pmetricotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) {
req := plogotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) {
req := pprofileotlp.NewExportRequest()
err := req.UnmarshalJSON(buf)
return req, err
}
func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) {
return resp.MarshalJSON()
}
func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) {
return protojson.Marshal(resp)
}
func (jsonEncoder) contentType() string {
return jsonContentType
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"context"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configgrpc"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/config/configoptional"
"go.opentelemetry.io/collector/config/configtls"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/internal/sharedcomponent"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata"
"go.opentelemetry.io/collector/receiver/xreceiver"
)
const (
defaultTracesURLPath = "/v1/traces"
defaultMetricsURLPath = "/v1/metrics"
defaultLogsURLPath = "/v1/logs"
defaultProfilesURLPath = "/v1development/profiles"
)
// NewFactory creates a new OTLP receiver factory.
func NewFactory() receiver.Factory {
return xreceiver.NewFactory(
metadata.Type,
createDefaultConfig,
xreceiver.WithTraces(createTraces, metadata.TracesStability),
xreceiver.WithMetrics(createMetrics, metadata.MetricsStability),
xreceiver.WithLogs(createLog, metadata.LogsStability),
xreceiver.WithProfiles(createProfiles, metadata.ProfilesStability),
)
}
// createDefaultConfig creates the default configuration for receiver.
func createDefaultConfig() component.Config {
grpcCfg := configgrpc.NewDefaultServerConfig()
grpcCfg.NetAddr.Endpoint = "localhost:4317"
// We almost write 0 bytes, so no need to tune WriteBufferSize.
grpcCfg.ReadBufferSize = 512 * 1024
httpCfg := confighttp.NewDefaultServerConfig()
httpCfg.Endpoint = "localhost:4318"
// For backward compatibility:
httpCfg.TLS = configoptional.None[configtls.ServerConfig]()
httpCfg.WriteTimeout = 0
httpCfg.ReadHeaderTimeout = 0
httpCfg.IdleTimeout = 0
return &Config{
Protocols: Protocols{
GRPC: configoptional.Default(grpcCfg),
HTTP: configoptional.Default(HTTPConfig{
ServerConfig: httpCfg,
TracesURLPath: defaultTracesURLPath,
MetricsURLPath: defaultMetricsURLPath,
LogsURLPath: defaultLogsURLPath,
}),
},
}
}
// createTraces creates a trace receiver based on provided config.
func createTraces(
_ context.Context,
set receiver.Settings,
cfg component.Config,
nextConsumer consumer.Traces,
) (receiver.Traces, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerTraceConsumer(nextConsumer)
return r, nil
}
// createMetrics creates a metrics receiver based on provided config.
func createMetrics(
_ context.Context,
set receiver.Settings,
cfg component.Config,
consumer consumer.Metrics,
) (receiver.Metrics, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerMetricsConsumer(consumer)
return r, nil
}
// createLog creates a log receiver based on provided config.
func createLog(
_ context.Context,
set receiver.Settings,
cfg component.Config,
consumer consumer.Logs,
) (receiver.Logs, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerLogsConsumer(consumer)
return r, nil
}
// createProfiles creates a trace receiver based on provided config.
func createProfiles(
_ context.Context,
set receiver.Settings,
cfg component.Config,
nextConsumer xconsumer.Profiles,
) (xreceiver.Profiles, error) {
oCfg := cfg.(*Config)
r, err := receivers.LoadOrStore(
oCfg,
func() (*otlpReceiver, error) {
return newOtlpReceiver(oCfg, &set)
},
)
if err != nil {
return nil, err
}
r.Unwrap().registerProfilesConsumer(nextConsumer)
return r, nil
}
// This is the map of already created OTLP receivers for particular configurations.
// We maintain this map because the receiver.Factory is asked trace and metric receivers separately
// when it gets CreateTraces() and CreateMetrics() but they must not
// create separate objects, they must use one otlpReceiver object per configuration.
// When the receiver is shutdown it should be removed from this map so the same configuration
// can be recreated successfully.
var receivers = sharedcomponent.NewMap[*Config, *otlpReceiver]()
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package errors // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
import (
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/consumer/consumererror"
)
func GetStatusFromError(err error) error {
s, ok := status.FromError(err)
if !ok {
// Default to a retryable error
// https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures
code := codes.Unavailable
if consumererror.IsPermanent(err) {
// If an error is permanent but doesn't have an attached gRPC status, assume it is server-side.
code = codes.Internal
}
s = status.New(code, err.Error())
}
return s.Err()
}
func GetHTTPStatusCodeFromStatus(s *status.Status) int {
// See https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures
// to see if a code is retryable.
// See https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#failures-1
// to see a list of retryable http status codes.
switch s.Code() {
// Retryable
case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss:
return http.StatusServiceUnavailable
// Retryable
case codes.ResourceExhausted:
return http.StatusTooManyRequests
// Not Retryable
case codes.InvalidArgument:
return http.StatusBadRequest
// Not Retryable
case codes.Unauthenticated:
return http.StatusUnauthorized
// Not Retryable
case codes.PermissionDenied:
return http.StatusForbidden
// Not Retryable
case codes.Unimplemented:
return http.StatusNotFound
// Not Retryable
default:
return http.StatusInternalServerError
}
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package logs // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle logs from OpenTelemetry exporters.
type Receiver struct {
plogotlp.UnimplementedGRPCServer
nextConsumer consumer.Logs
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Logs, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export logs func.
func (r *Receiver) Export(ctx context.Context, req plogotlp.ExportRequest) (plogotlp.ExportResponse, error) {
ld := req.Logs()
numSpans := ld.LogRecordCount()
if numSpans == 0 {
return plogotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartLogsOp(ctx)
err := r.nextConsumer.ConsumeLogs(ctx, ld)
r.obsreport.EndLogsOp(ctx, dataFormatProtobuf, numSpans, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return plogotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return plogotlp.NewExportResponse(), nil
}
// Code generated by mdatagen. DO NOT EDIT.
package metadata
import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/receiver"
)
// LogsBuilder provides an interface for scrapers to report logs while taking care of all the transformations
// required to produce log representation defined in metadata and user config.
type LogsBuilder struct {
logsBuffer plog.Logs
logRecordsBuffer plog.LogRecordSlice
buildInfo component.BuildInfo // contains version information.
}
// LogBuilderOption applies changes to default logs builder.
type LogBuilderOption interface {
apply(*LogsBuilder)
}
func NewLogsBuilder(settings receiver.Settings) *LogsBuilder {
lb := &LogsBuilder{
logsBuffer: plog.NewLogs(),
logRecordsBuffer: plog.NewLogRecordSlice(),
buildInfo: settings.BuildInfo,
}
return lb
}
// ResourceLogsOption applies changes to provided resource logs.
type ResourceLogsOption interface {
apply(plog.ResourceLogs)
}
type resourceLogsOptionFunc func(plog.ResourceLogs)
func (rlof resourceLogsOptionFunc) apply(rl plog.ResourceLogs) {
rlof(rl)
}
// WithLogsResource sets the provided resource on the emitted ResourceLogs.
// It's recommended to use ResourceBuilder to create the resource.
func WithLogsResource(res pcommon.Resource) ResourceLogsOption {
return resourceLogsOptionFunc(func(rl plog.ResourceLogs) {
res.CopyTo(rl.Resource())
})
}
// AppendLogRecord adds a log record to the logs builder.
func (lb *LogsBuilder) AppendLogRecord(lr plog.LogRecord) {
lr.MoveTo(lb.logRecordsBuffer.AppendEmpty())
}
// EmitForResource saves all the generated logs under a new resource and updates the internal state to be ready for
// recording another set of log records as part of another resource. This function can be helpful when one scraper
// needs to emit logs from several resources. Otherwise calling this function is not required,
// just `Emit` function can be called instead.
// Resource attributes should be provided as ResourceLogsOption arguments.
func (lb *LogsBuilder) EmitForResource(options ...ResourceLogsOption) {
rl := plog.NewResourceLogs()
ils := rl.ScopeLogs().AppendEmpty()
ils.Scope().SetName(ScopeName)
ils.Scope().SetVersion(lb.buildInfo.Version)
for _, op := range options {
op.apply(rl)
}
if lb.logRecordsBuffer.Len() > 0 {
lb.logRecordsBuffer.MoveAndAppendTo(ils.LogRecords())
lb.logRecordsBuffer = plog.NewLogRecordSlice()
}
if ils.LogRecords().Len() > 0 {
rl.MoveTo(lb.logsBuffer.ResourceLogs().AppendEmpty())
}
}
// Emit returns all the logs accumulated by the logs builder and updates the internal state to be ready for
// recording another set of logs. This function will be responsible for applying all the transformations required to
// produce logs representation defined in metadata and user config.
func (lb *LogsBuilder) Emit(options ...ResourceLogsOption) plog.Logs {
lb.EmitForResource(options...)
logs := lb.logsBuffer
lb.logsBuffer = plog.NewLogs()
return logs
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package metrics // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle metrics from OpenTelemetry exporters.
type Receiver struct {
pmetricotlp.UnimplementedGRPCServer
nextConsumer consumer.Metrics
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Metrics, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export metrics func.
func (r *Receiver) Export(ctx context.Context, req pmetricotlp.ExportRequest) (pmetricotlp.ExportResponse, error) {
md := req.Metrics()
dataPointCount := md.DataPointCount()
if dataPointCount == 0 {
return pmetricotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartMetricsOp(ctx)
err := r.nextConsumer.ConsumeMetrics(ctx, md)
r.obsreport.EndMetricsOp(ctx, dataFormatProtobuf, dataPointCount, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return pmetricotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return pmetricotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package profiles // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
import (
"context"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
)
// Receiver is the type used to handle spans from OpenTelemetry exporters.
type Receiver struct {
pprofileotlp.UnimplementedGRPCServer
nextConsumer xconsumer.Profiles
}
// New creates a new Receiver reference.
func New(nextConsumer xconsumer.Profiles) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
}
}
// Export implements the service Export profiles func.
func (r *Receiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) {
td := req.Profiles()
// We need to ensure that it propagates the receiver name as a tag
numProfiles := td.SampleCount()
if numProfiles == 0 {
return pprofileotlp.NewExportResponse(), nil
}
err := r.nextConsumer.ConsumeProfiles(ctx, td)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return pprofileotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return pprofileotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package trace // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
import (
"context"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
const dataFormatProtobuf = "protobuf"
// Receiver is the type used to handle spans from OpenTelemetry exporters.
type Receiver struct {
ptraceotlp.UnimplementedGRPCServer
nextConsumer consumer.Traces
obsreport *receiverhelper.ObsReport
}
// New creates a new Receiver reference.
func New(nextConsumer consumer.Traces, obsreport *receiverhelper.ObsReport) *Receiver {
return &Receiver{
nextConsumer: nextConsumer,
obsreport: obsreport,
}
}
// Export implements the service Export traces func.
func (r *Receiver) Export(ctx context.Context, req ptraceotlp.ExportRequest) (ptraceotlp.ExportResponse, error) {
td := req.Traces()
// We need to ensure that it propagates the receiver name as a tag
numSpans := td.SpanCount()
if numSpans == 0 {
return ptraceotlp.NewExportResponse(), nil
}
ctx = r.obsreport.StartTracesOp(ctx)
err := r.nextConsumer.ConsumeTraces(ctx, td)
r.obsreport.EndTracesOp(ctx, dataFormatProtobuf, numSpans, err)
// Use appropriate status codes for permanent/non-permanent errors
// If we return the error straightaway, then the grpc implementation will set status code to Unknown
// Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345
// So, convert the error to appropriate grpc status and return the error
// NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503)
// Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400)
if err != nil {
return ptraceotlp.NewExportResponse(), errors.GetStatusFromError(err)
}
return ptraceotlp.NewExportResponse(), nil
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"context"
"errors"
"net"
"net/http"
"sync"
"go.uber.org/zap"
"google.golang.org/grpc"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componentstatus"
"go.opentelemetry.io/collector/config/confighttp"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/xconsumer"
"go.opentelemetry.io/collector/internal/telemetry"
"go.opentelemetry.io/collector/pdata/plog/plogotlp"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp"
"go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
"go.opentelemetry.io/collector/receiver/receiverhelper"
)
// otlpReceiver is the type that exposes Trace and Metrics reception.
type otlpReceiver struct {
cfg *Config
serverGRPC *grpc.Server
serverHTTP *http.Server
nextTraces consumer.Traces
nextMetrics consumer.Metrics
nextLogs consumer.Logs
nextProfiles xconsumer.Profiles
shutdownWG sync.WaitGroup
obsrepGRPC *receiverhelper.ObsReport
obsrepHTTP *receiverhelper.ObsReport
settings *receiver.Settings
}
// newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's
// responsibility to invoke the respective Start*Reception methods as well
// as the various Stop*Reception methods to end it.
func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) {
set.TelemetrySettings = telemetry.DropInjectedAttributes(set.TelemetrySettings, telemetry.SignalKey)
set.Logger.Debug("created signal-agnostic logger")
r := &otlpReceiver{
cfg: cfg,
nextTraces: nil,
nextMetrics: nil,
nextLogs: nil,
nextProfiles: nil,
settings: set,
}
var err error
r.obsrepGRPC, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: set.ID,
Transport: "grpc",
ReceiverCreateSettings: *set,
})
if err != nil {
return nil, err
}
r.obsrepHTTP, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{
ReceiverID: set.ID,
Transport: "http",
ReceiverCreateSettings: *set,
})
if err != nil {
return nil, err
}
return r, nil
}
func (r *otlpReceiver) startGRPCServer(ctx context.Context, host component.Host) error {
// If GRPC is not enabled, nothing to start.
if !r.cfg.GRPC.HasValue() {
return nil
}
grpcCfg := r.cfg.GRPC.Get()
var err error
if r.serverGRPC, err = grpcCfg.ToServer(ctx, host, r.settings.TelemetrySettings); err != nil {
return err
}
if r.nextTraces != nil {
ptraceotlp.RegisterGRPCServer(r.serverGRPC, trace.New(r.nextTraces, r.obsrepGRPC))
}
if r.nextMetrics != nil {
pmetricotlp.RegisterGRPCServer(r.serverGRPC, metrics.New(r.nextMetrics, r.obsrepGRPC))
}
if r.nextLogs != nil {
plogotlp.RegisterGRPCServer(r.serverGRPC, logs.New(r.nextLogs, r.obsrepGRPC))
}
if r.nextProfiles != nil {
pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles))
}
var gln net.Listener
if gln, err = grpcCfg.NetAddr.Listen(ctx); err != nil {
return err
}
r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", gln.Addr().String()))
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc))
}
}()
return nil
}
func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) error {
// If HTTP is not enabled, nothing to start.
if !r.cfg.HTTP.HasValue() {
return nil
}
httpCfg := r.cfg.HTTP.Get()
httpMux := http.NewServeMux()
if r.nextTraces != nil {
httpTracesReceiver := trace.New(r.nextTraces, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.TracesURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleTraces(resp, req, httpTracesReceiver)
})
}
if r.nextMetrics != nil {
httpMetricsReceiver := metrics.New(r.nextMetrics, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.MetricsURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleMetrics(resp, req, httpMetricsReceiver)
})
}
if r.nextLogs != nil {
httpLogsReceiver := logs.New(r.nextLogs, r.obsrepHTTP)
httpMux.HandleFunc(string(httpCfg.LogsURLPath), func(resp http.ResponseWriter, req *http.Request) {
handleLogs(resp, req, httpLogsReceiver)
})
}
if r.nextProfiles != nil {
httpProfilesReceiver := profiles.New(r.nextProfiles)
httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) {
handleProfiles(resp, req, httpProfilesReceiver)
})
}
var err error
if r.serverHTTP, err = httpCfg.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil {
return err
}
var hln net.Listener
if hln, err = httpCfg.ServerConfig.ToListener(ctx); err != nil {
return err
}
r.settings.Logger.Info("Starting HTTP server", zap.String("endpoint", hln.Addr().String()))
r.shutdownWG.Add(1)
go func() {
defer r.shutdownWG.Done()
if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) {
componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP))
}
}()
return nil
}
// Start runs the trace receiver on the gRPC server. Currently
// it also enables the metrics receiver too.
func (r *otlpReceiver) Start(ctx context.Context, host component.Host) error {
if err := r.startGRPCServer(ctx, host); err != nil {
return err
}
if err := r.startHTTPServer(ctx, host); err != nil {
// It's possible that a valid GRPC server configuration was specified,
// but an invalid HTTP configuration. If that's the case, the successfully
// started GRPC server must be shutdown to ensure no goroutines are leaked.
return errors.Join(err, r.Shutdown(ctx))
}
return nil
}
// Shutdown is a method to turn off receiving.
func (r *otlpReceiver) Shutdown(ctx context.Context) error {
var err error
if r.serverHTTP != nil {
err = r.serverHTTP.Shutdown(ctx)
}
if r.serverGRPC != nil {
r.serverGRPC.GracefulStop()
}
r.shutdownWG.Wait()
return err
}
func (r *otlpReceiver) registerTraceConsumer(tc consumer.Traces) {
r.nextTraces = tc
}
func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) {
r.nextMetrics = mc
}
func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) {
r.nextLogs = lc
}
func (r *otlpReceiver) registerProfilesConsumer(tc xconsumer.Profiles) {
r.nextProfiles = tc
}
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver"
import (
"fmt"
"io"
"mime"
"net/http"
"strconv"
"time"
"google.golang.org/grpc/status"
"go.opentelemetry.io/collector/internal/statusutil"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles"
"go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace"
)
// Pre-computed status with code=Internal to be used in case of a marshaling error.
var fallbackMsg = []byte(`{"code": 13, "message": "failed to marshal error message"}`)
const fallbackContentType = "application/json"
func handleTraces(resp http.ResponseWriter, req *http.Request, tracesReceiver *trace.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalTracesRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := tracesReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalTracesResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleMetrics(resp http.ResponseWriter, req *http.Request, metricsReceiver *metrics.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalMetricsRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := metricsReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalMetricsResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleLogs(resp http.ResponseWriter, req *http.Request, logsReceiver *logs.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalLogsRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := logsReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalLogsResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceiver *profiles.Receiver) {
enc, ok := readContentType(resp, req)
if !ok {
return
}
body, ok := readAndCloseBody(resp, req, enc)
if !ok {
return
}
otlpReq, err := enc.unmarshalProfilesRequest(body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return
}
otlpResp, err := profilesReceiver.Export(req.Context(), otlpReq)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
msg, err := enc.marshalProfilesResponse(otlpResp)
if err != nil {
writeError(resp, enc, err, http.StatusInternalServerError)
return
}
writeResponse(resp, enc.contentType(), http.StatusOK, msg)
}
func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) {
if req.Method != http.MethodPost {
handleUnmatchedMethod(resp)
return nil, false
}
switch getMimeTypeFromContentType(req.Header.Get("Content-Type")) {
case pbContentType:
return pbEncoder, true
case jsonContentType:
return jsEncoder, true
default:
handleUnmatchedContentType(resp)
return nil, false
}
}
func readAndCloseBody(resp http.ResponseWriter, req *http.Request, enc encoder) ([]byte, bool) {
body, err := io.ReadAll(req.Body)
if err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return nil, false
}
if err = req.Body.Close(); err != nil {
writeError(resp, enc, err, http.StatusBadRequest)
return nil, false
}
return body, true
}
// writeError encodes the HTTP error inside a rpc.Status message as required by the OTLP protocol.
func writeError(w http.ResponseWriter, encoder encoder, err error, statusCode int) {
s, ok := status.FromError(err)
if ok {
statusCode = errors.GetHTTPStatusCodeFromStatus(s)
} else {
s = statusutil.NewStatusFromMsgAndHTTPCode(err.Error(), statusCode)
}
writeStatusResponse(w, encoder, statusCode, s)
}
// errorHandler encodes the HTTP error message inside a rpc.Status message as required
// by the OTLP protocol.
func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) {
s := statusutil.NewStatusFromMsgAndHTTPCode(errMsg, statusCode)
contentType := r.Header.Get("Content-Type")
if contentType == "" {
contentType = fallbackContentType
}
switch getMimeTypeFromContentType(contentType) {
case pbContentType:
writeStatusResponse(w, pbEncoder, statusCode, s)
return
case jsonContentType:
writeStatusResponse(w, jsEncoder, statusCode, s)
return
}
writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg)
}
func writeStatusResponse(w http.ResponseWriter, enc encoder, statusCode int, st *status.Status) {
// https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#otlphttp-throttling
if statusCode == http.StatusTooManyRequests || statusCode == http.StatusServiceUnavailable {
retryInfo := statusutil.GetRetryInfo(st)
// Check if server returned throttling information.
if retryInfo != nil {
// We are throttled. Wait before retrying as requested by the server.
// The value of Retry-After field can be either an HTTP-date or a number of
// seconds to delay after the response is received. See https://datatracker.ietf.org/doc/html/rfc7231#section-7.1.3
//
// Retry-After = HTTP-date / delay-seconds
//
// Use delay-seconds since is easier to format as well as does not require clock synchronization.
w.Header().Set("Retry-After", strconv.FormatInt(int64(retryInfo.GetRetryDelay().AsDuration()/time.Second), 10))
}
}
msg, err := enc.marshalStatus(st.Proto())
if err != nil {
writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg)
return
}
writeResponse(w, enc.contentType(), statusCode, msg)
}
func writeResponse(w http.ResponseWriter, contentType string, statusCode int, msg []byte) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(statusCode)
// Nothing we can do with the error if we cannot write to the response.
_, _ = w.Write(msg)
}
func getMimeTypeFromContentType(contentType string) string {
mediatype, _, err := mime.ParseMediaType(contentType)
if err != nil {
return ""
}
return mediatype
}
func handleUnmatchedMethod(resp http.ResponseWriter) {
hst := http.StatusMethodNotAllowed
writeResponse(resp, "text/plain", hst, fmt.Appendf(nil, "%v method not allowed, supported: [POST]", hst))
}
func handleUnmatchedContentType(resp http.ResponseWriter) {
hst := http.StatusUnsupportedMediaType
writeResponse(resp, "text/plain", hst, fmt.Appendf(nil, "%v unsupported media type, supported: [%s, %s]", hst, jsonContentType, pbContentType))
}