// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor" import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" ) type tracesGroup struct { traces ptrace.Traces resourceHashes [][16]byte } func newTracesGroup() *tracesGroup { return &tracesGroup{traces: ptrace.NewTraces()} } // findOrCreateResource searches for a Resource with matching attributes and returns it. If nothing is found, it is being created func (tg *tracesGroup) findOrCreateResourceSpans(originResource pcommon.Resource, requiredAttributes pcommon.Map) ptrace.ResourceSpans { referenceResource := buildReferenceResource(originResource, requiredAttributes) referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rss := tg.traces.ResourceSpans() for i := 0; i < rss.Len(); i++ { if tg.resourceHashes[i] == referenceResourceHash { return rss.At(i) } } rs := tg.traces.ResourceSpans().AppendEmpty() referenceResource.MoveTo(rs.Resource()) tg.resourceHashes = append(tg.resourceHashes, referenceResourceHash) return rs } type metricsGroup struct { metrics pmetric.Metrics resourceHashes [][16]byte } func newMetricsGroup() *metricsGroup { return &metricsGroup{metrics: pmetric.NewMetrics()} } // findOrCreateResourceMetrics searches for a Resource with matching attributes and returns it. If nothing is found, it is being created func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resource, requiredAttributes pcommon.Map) pmetric.ResourceMetrics { referenceResource := buildReferenceResource(originResource, requiredAttributes) referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rms := mg.metrics.ResourceMetrics() for i := 0; i < rms.Len(); i++ { if mg.resourceHashes[i] == referenceResourceHash { return rms.At(i) } } rm := mg.metrics.ResourceMetrics().AppendEmpty() referenceResource.MoveTo(rm.Resource()) mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash) return rm } type logsGroup struct { logs plog.Logs resourceHashes [][16]byte } // newLogsGroup returns new logsGroup with predefined capacity func newLogsGroup() *logsGroup { return &logsGroup{logs: plog.NewLogs()} } // findOrCreateResourceLogs searches for a Resource with matching attributes and returns it. If nothing is found, it is being created func (lg *logsGroup) findOrCreateResourceLogs(originResource pcommon.Resource, requiredAttributes pcommon.Map) plog.ResourceLogs { referenceResource := buildReferenceResource(originResource, requiredAttributes) referenceResourceHash := pdatautil.MapHash(referenceResource.Attributes()) rls := lg.logs.ResourceLogs() for i := 0; i < rls.Len(); i++ { if lg.resourceHashes[i] == referenceResourceHash { return rls.At(i) } } rl := lg.logs.ResourceLogs().AppendEmpty() referenceResource.MoveTo(rl.Resource()) lg.resourceHashes = append(lg.resourceHashes, referenceResourceHash) return rl } func instrumentationLibrariesEqual(il1, il2 pcommon.InstrumentationScope) bool { return il1.Name() == il2.Name() && il1.Version() == il2.Version() } // matchingScopeSpans searches for a ptrace.ScopeSpans instance matching // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.InstrumentationScope) ptrace.ScopeSpans { ilss := rl.ScopeSpans() for i := 0; i < ilss.Len(); i++ { ils := ilss.At(i) if instrumentationLibrariesEqual(ils.Scope(), library) { return ils } } ils := ilss.AppendEmpty() library.CopyTo(ils.Scope()) return ils } // matchingScopeLogs searches for a plog.ScopeLogs instance matching // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScope) plog.ScopeLogs { ills := rl.ScopeLogs() for i := 0; i < ills.Len(); i++ { sl := ills.At(i) if instrumentationLibrariesEqual(sl.Scope(), library) { return sl } } sl := ills.AppendEmpty() library.CopyTo(sl.Scope()) return sl } // matchingScopeMetrics searches for a pmetric.ScopeMetrics instance matching // given InstrumentationScope. If nothing is found, it creates a new one func matchingScopeMetrics(rm pmetric.ResourceMetrics, library pcommon.InstrumentationScope) pmetric.ScopeMetrics { ilms := rm.ScopeMetrics() for i := 0; i < ilms.Len(); i++ { ilm := ilms.At(i) if instrumentationLibrariesEqual(ilm.Scope(), library) { return ilm } } ilm := ilms.AppendEmpty() library.CopyTo(ilm.Scope()) return ilm } // buildReferenceResource returns a new resource that we'll be looking for in existing Resources // as a merge of the Attributes of the original Resource with the requested Attributes. func buildReferenceResource(originResource pcommon.Resource, requiredAttributes pcommon.Map) pcommon.Resource { referenceResource := pcommon.NewResource() originResource.Attributes().CopyTo(referenceResource.Attributes()) requiredAttributes.Range(func(k string, v pcommon.Value) bool { v.CopyTo(referenceResource.Attributes().PutEmpty(k)) return true }) return referenceResource }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata" ) var consumerCapabilities = consumer.Capabilities{MutatesData: true} // NewFactory returns a new factory for the Filter processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, createDefaultConfig, processor.WithTraces(createTracesProcessor, metadata.TracesStability), processor.WithLogs(createLogsProcessor, metadata.LogsStability), processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability)) } // createDefaultConfig creates the default configuration for the processor. func createDefaultConfig() component.Config { return &Config{ GroupByKeys: []string{}, } } func createGroupByAttrsProcessor(set processor.Settings, attributes []string) (*groupByAttrsProcessor, error) { var nonEmptyAttributes []string presentAttributes := make(map[string]struct{}) for _, str := range attributes { if str != "" { _, isPresent := presentAttributes[str] if isPresent { set.Logger.Warn("A grouping key is already present", zap.String("key", str)) } else { nonEmptyAttributes = append(nonEmptyAttributes, str) presentAttributes[str] = struct{}{} } } } telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } return &groupByAttrsProcessor{logger: set.Logger, groupByKeys: nonEmptyAttributes, telemetryBuilder: telemetryBuilder}, nil } // createTracesProcessor creates a trace processor based on this config. func createTracesProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { return nil, err } return processorhelper.NewTraces( ctx, set, cfg, nextConsumer, gap.processTraces, processorhelper.WithCapabilities(consumerCapabilities)) } // createLogsProcessor creates a logs processor based on this config. func createLogsProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { return nil, err } return processorhelper.NewLogs( ctx, set, cfg, nextConsumer, gap.processLogs, processorhelper.WithCapabilities(consumerCapabilities)) } // createMetricsProcessor creates a metrics processor based on this config. func createMetricsProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { return nil, err } return processorhelper.NewMetrics( ctx, set, cfg, nextConsumer, gap.processMetrics, processorhelper.WithCapabilities(consumerCapabilities)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor" import ( "context" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor/internal/metadata" ) type groupByAttrsProcessor struct { logger *zap.Logger groupByKeys []string telemetryBuilder *metadata.TelemetryBuilder } // ProcessTraces process traces and groups traces by attribute. func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() tg := newTracesGroup() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) ilss := rs.ScopeSpans() for j := 0; j < ilss.Len(); j++ { ils := ilss.At(j) for k := 0; k < ils.Spans().Len(); k++ { span := ils.Spans().At(k) toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(span.Attributes()) if toBeGrouped { gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedSpans.Add(ctx, 1) // Some attributes are going to be moved from span to resource level, // so we can delete those on the record level deleteAttributes(requiredAttributes, span.Attributes()) } else { gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedSpans.Add(ctx, 1) } // Lets combine the base resource attributes + the extracted (grouped) attributes // and keep them in the grouping entry groupedResourceSpans := tg.findOrCreateResourceSpans(rs.Resource(), requiredAttributes) sp := matchingScopeSpans(groupedResourceSpans, ils.Scope()).Spans().AppendEmpty() span.CopyTo(sp) } } } // Copy the grouped data into output gap.telemetryBuilder.ProcessorGroupbyattrsSpanGroups.Record(ctx, int64(tg.traces.ResourceSpans().Len())) return tg.traces, nil } func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() lg := newLogsGroup() for i := 0; i < rl.Len(); i++ { ls := rl.At(i) ills := ls.ScopeLogs() for j := 0; j < ills.Len(); j++ { sl := ills.At(j) for k := 0; k < sl.LogRecords().Len(); k++ { log := sl.LogRecords().At(k) toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(log.Attributes()) if toBeGrouped { gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedLogs.Add(ctx, 1) // Some attributes are going to be moved from log record to resource level, // so we can delete those on the record level deleteAttributes(requiredAttributes, log.Attributes()) } else { gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedLogs.Add(ctx, 1) } // Lets combine the base resource attributes + the extracted (grouped) attributes // and keep them in the grouping entry groupedResourceLogs := lg.findOrCreateResourceLogs(ls.Resource(), requiredAttributes) lr := matchingScopeLogs(groupedResourceLogs, sl.Scope()).LogRecords().AppendEmpty() log.CopyTo(lr) } } } // Copy the grouped data into output gap.telemetryBuilder.ProcessorGroupbyattrsLogGroups.Record(ctx, int64(lg.logs.ResourceLogs().Len())) return lg.logs, nil } func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() mg := newMetricsGroup() for i := 0; i < rms.Len(); i++ { rm := rms.At(i) ilms := rm.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { ilm := ilms.At(j) for k := 0; k < ilm.Metrics().Len(); k++ { metric := ilm.Metrics().At(k) //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ { dataPoint := metric.Gauge().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Gauge().DataPoints().AppendEmpty()) } case pmetric.MetricTypeSum: for pointIndex := 0; pointIndex < metric.Sum().DataPoints().Len(); pointIndex++ { dataPoint := metric.Sum().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Sum().DataPoints().AppendEmpty()) } case pmetric.MetricTypeSummary: for pointIndex := 0; pointIndex < metric.Summary().DataPoints().Len(); pointIndex++ { dataPoint := metric.Summary().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Summary().DataPoints().AppendEmpty()) } case pmetric.MetricTypeHistogram: for pointIndex := 0; pointIndex < metric.Histogram().DataPoints().Len(); pointIndex++ { dataPoint := metric.Histogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Histogram().DataPoints().AppendEmpty()) } case pmetric.MetricTypeExponentialHistogram: for pointIndex := 0; pointIndex < metric.ExponentialHistogram().DataPoints().Len(); pointIndex++ { dataPoint := metric.ExponentialHistogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, mg, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.ExponentialHistogram().DataPoints().AppendEmpty()) } case pmetric.MetricTypeEmpty: } } } } gap.telemetryBuilder.ProcessorGroupbyattrsMetricGroups.Record(ctx, int64(mg.metrics.ResourceMetrics().Len())) return mg.metrics, nil } func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) { attrsForRemoval.Range(func(key string, _ pcommon.Value) bool { targetAttrs.Remove(key) return true }) } // extractGroupingAttributes extracts the keys and values of the specified Attributes // that match with the attributes keys that is used for grouping // Returns: // - whether any attribute matched (true) or none (false) // - the extracted AttributeMap of matching keys and their corresponding values func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) { groupingAttributes := pcommon.NewMap() foundMatch := false for _, attrKey := range gap.groupByKeys { attrVal, found := attrMap.Get(attrKey) if found { attrVal.CopyTo(groupingAttributes.PutEmpty(attrKey)) foundMatch = true } } return foundMatch, groupingAttributes } // Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it. func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric { // Loop through all metrics and try to find the one that matches with the one we search for // (name and type) for i := 0; i < ilm.Metrics().Len(); i++ { metric := ilm.Metrics().At(i) if metric.Name() == searchedMetric.Name() && metric.Type() == searchedMetric.Type() { return metric } } // We're here, which means that we haven't found our metric, so we need to create a new one, with the same name and type metric := ilm.Metrics().AppendEmpty() metric.SetDescription(searchedMetric.Description()) metric.SetName(searchedMetric.Name()) metric.SetUnit(searchedMetric.Unit()) searchedMetric.Metadata().CopyTo(metric.Metadata()) // Move other special type specific values //exhaustive:enforce switch searchedMetric.Type() { case pmetric.MetricTypeHistogram: metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality()) case pmetric.MetricTypeExponentialHistogram: metric.SetEmptyExponentialHistogram().SetAggregationTemporality(searchedMetric.ExponentialHistogram().AggregationTemporality()) case pmetric.MetricTypeSum: metric.SetEmptySum().SetAggregationTemporality(searchedMetric.Sum().AggregationTemporality()) metric.Sum().SetIsMonotonic(searchedMetric.Sum().IsMonotonic()) case pmetric.MetricTypeGauge: metric.SetEmptyGauge() case pmetric.MetricTypeSummary: metric.SetEmptySummary() case pmetric.MetricTypeEmpty: } return metric } // Returns the Metric in the appropriate Resource matching with the specified Attributes func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( ctx context.Context, mg *metricsGroup, originResourceMetrics pmetric.ResourceMetrics, ilm pmetric.ScopeMetrics, metric pmetric.Metric, attributes pcommon.Map, ) pmetric.Metric { toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes) if toBeGrouped { gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1) // These attributes are going to be moved from datapoint to resource level, // so we can delete those on the datapoint deleteAttributes(requiredAttributes, attributes) } else { gap.telemetryBuilder.ProcessorGroupbyattrsNumNonGroupedMetrics.Add(ctx, 1) } // Get the ResourceMetrics matching with these attributes groupedResourceMetrics := mg.findOrCreateResourceMetrics(originResourceMetrics.Resource(), requiredAttributes) // Get the corresponding instrumentation library groupedInstrumentationLibrary := matchingScopeMetrics(groupedResourceMetrics, ilm.Scope()) // Return the metric in this resource return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Package logdedupprocessor provides a processor that counts logs as metrics. package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor" import ( "errors" "fmt" "strings" "time" "go.opentelemetry.io/collector/component" ) // Config defaults const ( // defaultInterval is the default export interval. defaultInterval = 10 * time.Second // defaultLogCountAttribute is the default log count attribute defaultLogCountAttribute = "log_count" // defaultTimezone is the default timezone defaultTimezone = "UTC" // bodyField is the name of the body field bodyField = "body" // attributeField is the name of the attribute field attributeField = "attributes" ) // Config errors var ( errInvalidLogCountAttribute = errors.New("log_count_attribute must be set") errInvalidInterval = errors.New("interval must be greater than 0") errCannotExcludeBody = errors.New("cannot exclude the entire body") errCannotIncludeBody = errors.New("cannot include the entire body") ) // Config is the config of the processor. type Config struct { LogCountAttribute string `mapstructure:"log_count_attribute"` Interval time.Duration `mapstructure:"interval"` Timezone string `mapstructure:"timezone"` ExcludeFields []string `mapstructure:"exclude_fields"` IncludeFields []string `mapstructure:"include_fields"` Conditions []string `mapstructure:"conditions"` } // createDefaultConfig returns the default config for the processor. func createDefaultConfig() component.Config { return &Config{ LogCountAttribute: defaultLogCountAttribute, Interval: defaultInterval, Timezone: defaultTimezone, ExcludeFields: []string{}, IncludeFields: []string{}, Conditions: []string{}, } } // Validate validates the configuration func (c Config) Validate() error { if c.Interval <= 0 { return errInvalidInterval } if c.LogCountAttribute == "" { return errInvalidLogCountAttribute } _, err := time.LoadLocation(c.Timezone) if err != nil { return fmt.Errorf("timezone is invalid: %w", err) } if len(c.ExcludeFields) > 0 && len(c.IncludeFields) > 0 { return errors.New("cannot define both exclude_fields and include_fields") } if err = c.validateExcludeFields(); err != nil { return err } if err = c.validateIncludeFields(); err != nil { return err } return nil } // validateExcludeFields validates that all the exclude fields func (c Config) validateExcludeFields() error { knownExcludeFields := make(map[string]struct{}) for _, field := range c.ExcludeFields { // Special check to make sure the entire body is not excluded if field == bodyField { return errCannotExcludeBody } // Split and ensure the field starts with `body` or `attributes` parts := strings.Split(field, fieldDelimiter) if parts[0] != bodyField && parts[0] != attributeField { return fmt.Errorf("an excludefield must start with %s or %s", bodyField, attributeField) } // If a field is valid make sure we haven't already seen it if _, ok := knownExcludeFields[field]; ok { return fmt.Errorf("duplicate exclude_field %s", field) } knownExcludeFields[field] = struct{}{} } return nil } // validateIncludeFields validates that all the exclude fields func (c Config) validateIncludeFields() error { knownFields := make(map[string]struct{}) for _, field := range c.IncludeFields { // Special check to make sure the entire body is not included if field == bodyField { return errCannotIncludeBody } // Split and ensure the field starts with `body` or `attributes` parts := strings.Split(field, fieldDelimiter) if parts[0] != bodyField && parts[0] != attributeField { return fmt.Errorf("an include_fields must start with %s or %s", bodyField, attributeField) } // If a field is valid make sure we haven't already seen it if _, ok := knownFields[field]; ok { return fmt.Errorf("duplicate include_fields %s", field) } knownFields[field] = struct{}{} } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor" import ( "context" "time" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata" ) // Attributes names for first and last observed timestamps const ( firstObservedTSAttr = "first_observed_timestamp" lastObservedTSAttr = "last_observed_timestamp" ) // timeNow can be reassigned for testing var timeNow = time.Now // logAggregator tracks the number of times a specific logRecord has been seen. type logAggregator struct { resources map[uint64]*resourceAggregator logCountAttribute string timezone *time.Location telemetryBuilder *metadata.TelemetryBuilder dedupFields []string } // newLogAggregator creates a new LogCounter. func newLogAggregator(logCountAttribute string, timezone *time.Location, telemetryBuilder *metadata.TelemetryBuilder, dedupFields []string) *logAggregator { return &logAggregator{ resources: make(map[uint64]*resourceAggregator), logCountAttribute: logCountAttribute, timezone: timezone, telemetryBuilder: telemetryBuilder, dedupFields: dedupFields, } } // Export exports the counter as a Logs func (l *logAggregator) Export(ctx context.Context) plog.Logs { logs := plog.NewLogs() for _, resourceAggregator := range l.resources { rl := logs.ResourceLogs().AppendEmpty() resourceAggregator.resource.CopyTo(rl.Resource()) for _, scopeAggregator := range resourceAggregator.scopeCounters { sl := rl.ScopeLogs().AppendEmpty() scopeAggregator.scope.CopyTo(sl.Scope()) for _, logAggregator := range scopeAggregator.logCounters { // Record aggregated logs records l.telemetryBuilder.DedupProcessorAggregatedLogs.Record(ctx, logAggregator.count) lr := sl.LogRecords().AppendEmpty() logAggregator.logRecord.CopyTo(lr) // Set log record timestamps lr.SetTimestamp(pcommon.NewTimestampFromTime(timeNow())) lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(logAggregator.firstObservedTimestamp)) // Add attributes for log count and first/last observed timestamps lr.Attributes().EnsureCapacity(lr.Attributes().Len() + 3) lr.Attributes().PutInt(l.logCountAttribute, logAggregator.count) firstTimestampStr := logAggregator.firstObservedTimestamp.In(l.timezone).Format(time.RFC3339) lr.Attributes().PutStr(firstObservedTSAttr, firstTimestampStr) lastTimestampStr := logAggregator.lastObservedTimestamp.In(l.timezone).Format(time.RFC3339) lr.Attributes().PutStr(lastObservedTSAttr, lastTimestampStr) } } } return logs } // Add adds the logRecord to the resource aggregator that is identified by the resource attributes func (l *logAggregator) Add(resource pcommon.Resource, scope pcommon.InstrumentationScope, logRecord plog.LogRecord) { key := getResourceKey(resource) resourceAggregator, ok := l.resources[key] if !ok { resourceAggregator = newResourceAggregator(resource, l.dedupFields) l.resources[key] = resourceAggregator } resourceAggregator.Add(scope, logRecord) } // Reset resets the counter. func (l *logAggregator) Reset() { l.resources = make(map[uint64]*resourceAggregator) } // resourceAggregator dimensions the counter by resource. type resourceAggregator struct { resource pcommon.Resource scopeCounters map[uint64]*scopeAggregator dedupFields []string } // newResourceAggregator creates a new ResourceCounter. func newResourceAggregator(resource pcommon.Resource, dedupFields []string) *resourceAggregator { return &resourceAggregator{ resource: resource, scopeCounters: make(map[uint64]*scopeAggregator), dedupFields: dedupFields, } } // Add increments the counter that the logRecord matches. func (r *resourceAggregator) Add(scope pcommon.InstrumentationScope, logRecord plog.LogRecord) { key := getScopeKey(scope) scopeAggregator, ok := r.scopeCounters[key] if !ok { scopeAggregator = newScopeAggregator(scope, r.dedupFields) r.scopeCounters[key] = scopeAggregator } scopeAggregator.Add(logRecord) } // scopeAggregator dimensions the counter by scope. type scopeAggregator struct { scope pcommon.InstrumentationScope logCounters map[uint64]*logCounter dedupFields []string } // newScopeAggregator creates a new ScopeCounter. func newScopeAggregator(scope pcommon.InstrumentationScope, dedupFields []string) *scopeAggregator { return &scopeAggregator{ scope: scope, logCounters: make(map[uint64]*logCounter), dedupFields: dedupFields, } } // Add increments the counter that the logRecord matches. func (s *scopeAggregator) Add(logRecord plog.LogRecord) { key := getLogKey(logRecord, s.dedupFields) lc, ok := s.logCounters[key] if !ok { lc = newLogCounter(logRecord) s.logCounters[key] = lc } lc.Increment() } // logCounter is a counter for a log record. type logCounter struct { logRecord plog.LogRecord firstObservedTimestamp time.Time lastObservedTimestamp time.Time count int64 } // newLogCounter creates a new AttributeCounter. func newLogCounter(logRecord plog.LogRecord) *logCounter { return &logCounter{ logRecord: logRecord, count: 0, firstObservedTimestamp: timeNow().UTC(), lastObservedTimestamp: timeNow().UTC(), } } // Increment increments the counter. func (a *logCounter) Increment() { a.lastObservedTimestamp = timeNow().UTC() a.count++ } // getResourceKey creates a unique hash for the resource to use as a map key func getResourceKey(resource pcommon.Resource) uint64 { return pdatautil.Hash64( pdatautil.WithMap(resource.Attributes()), ) } // getScopeKey creates a unique hash for the scope to use as a map key func getScopeKey(scope pcommon.InstrumentationScope) uint64 { return pdatautil.Hash64( pdatautil.WithMap(scope.Attributes()), pdatautil.WithString(scope.Name()), pdatautil.WithString(scope.Version()), ) } // getLogKey creates a unique hash for the log record to use as a map key. // If dedupFields is non-empty, it is used to determine the fields whose values are hashed. func getLogKey(logRecord plog.LogRecord, dedupFields []string) uint64 { if len(dedupFields) > 0 { var opts []pdatautil.HashOption for _, field := range dedupFields { parts := splitField(field) var m pcommon.Map switch parts[0] { case bodyField: if logRecord.Body().Type() == pcommon.ValueTypeMap { m = logRecord.Body().Map() } case attributeField: m = logRecord.Attributes() } value, ok := getKeyValue(m, parts[1:]) if ok { opts = append(opts, pdatautil.WithString(value.AsString())) } } if len(opts) > 0 { return pdatautil.Hash64(opts...) } } return pdatautil.Hash64( pdatautil.WithMap(logRecord.Attributes()), pdatautil.WithValue(logRecord.Body()), pdatautil.WithString(logRecord.SeverityNumber().String()), pdatautil.WithString(logRecord.SeverityText()), ) } func getKeyValue(valueMap pcommon.Map, keyParts []string) (pcommon.Value, bool) { nextKeyPart, remainingParts := keyParts[0], keyParts[1:] // Look for the value associated with the next key part. // If we don't find it then return value, ok := valueMap.Get(nextKeyPart) if !ok { return pcommon.NewValueEmpty(), false } // No more key parts that means we have found the value if len(remainingParts) == 0 { return valueMap.Get(nextKeyPart) } // If the value is a map then recurse through with the remaining parts if value.Type() == pcommon.ValueTypeMap { return getKeyValue(value.Map(), remainingParts) } return pcommon.NewValueEmpty(), false }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor" import ( "context" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata" ) // NewFactory creates a new factory for the processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, createDefaultConfig, processor.WithLogs(createLogsProcessor, metadata.LogsStability), ) } // createLogsProcessor creates a log processor. func createLogsProcessor(_ context.Context, settings processor.Settings, cfg component.Config, consumer consumer.Logs) (processor.Logs, error) { processorCfg, ok := cfg.(*Config) if !ok { return nil, fmt.Errorf("invalid config type: %+v", cfg) } if err := processorCfg.Validate(); err != nil { return nil, err } processor, err := newProcessor(processorCfg, consumer, settings) if err != nil { return nil, fmt.Errorf("error creating processor: %w", err) } if len(processorCfg.Conditions) == 0 { processor.conditions = nil } else { conditions, err := filterottl.NewBoolExprForLog( processorCfg.Conditions, filterottl.StandardLogFuncs(), ottl.PropagateError, settings.TelemetrySettings, ) if err != nil { return nil, fmt.Errorf("invalid condition: %w", err) } processor.conditions = conditions } return processor, nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor" import ( "fmt" "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" ) const ( // fieldDelimiter is the delimiter used to split a field key into its parts. fieldDelimiter = "." // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimiters while splitting a field key. fieldEscapeKeyReplacement = "{TEMP_REPLACE}" ) // fieldRemover handles removing excluded fields from log records type fieldRemover struct { fields []*field } // field represents a field and it's compound key to match on type field struct { keyParts []string } // newFieldRemover creates a new field remover based on the passed in field keys func newFieldRemover(fieldKeys []string) *fieldRemover { fe := &fieldRemover{ fields: make([]*field, 0, len(fieldKeys)), } for _, f := range fieldKeys { fe.fields = append(fe.fields, &field{ keyParts: splitField(f), }) } return fe } // RemoveFields removes any body or attribute fields that match in the log record func (fe *fieldRemover) RemoveFields(logRecord plog.LogRecord) { for _, field := range fe.fields { field.removeField(logRecord) } } // removeField removes the field from the log record if it exists func (f *field) removeField(logRecord plog.LogRecord) { firstPart, remainingParts := f.keyParts[0], f.keyParts[1:] switch firstPart { case bodyField: // If body is a map then recurse through to remove the field if logRecord.Body().Type() == pcommon.ValueTypeMap { removeFieldFromMap(logRecord.Body().Map(), remainingParts) } case attributeField: // Remove all attributes if len(remainingParts) == 0 { logRecord.Attributes().Clear() return } // Recurse through map and remove fields removeFieldFromMap(logRecord.Attributes(), remainingParts) } } // removeFieldFromMap recurses through the map and removes the field if it's found. func removeFieldFromMap(valueMap pcommon.Map, keyParts []string) { nextKeyPart, remainingParts := keyParts[0], keyParts[1:] // Look for the value associated with the next key part. // If we don't find it then return value, ok := valueMap.Get(nextKeyPart) if !ok { return } // No more key parts that means we have found the value and remove it if len(remainingParts) == 0 { valueMap.Remove(nextKeyPart) return } // If the value is a map then recurse through with the remaining parts if value.Type() == pcommon.ValueTypeMap { removeFieldFromMap(value.Map(), remainingParts) } } // splitField splits a field key into its parts. // It replaces escaped delimiters with the full delimiter after splitting. func splitField(fieldKey string) []string { escapedKey := strings.ReplaceAll(fieldKey, fmt.Sprintf("\\%s", fieldDelimiter), fieldEscapeKeyReplacement) keyParts := strings.Split(escapedKey, fieldDelimiter) // Replace the temporarily escaped delimiters with the actual delimiter. for i := range keyParts { keyParts[i] = strings.ReplaceAll(keyParts[i], fieldEscapeKeyReplacement, fieldDelimiter) } return keyParts }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package logdedupprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor" import ( "context" "fmt" "sync" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata" ) // logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs. type logDedupProcessor struct { emitInterval time.Duration conditions *ottl.ConditionSequence[ottllog.TransformContext] aggregator *logAggregator remover *fieldRemover nextConsumer consumer.Logs logger *zap.Logger cancel context.CancelFunc wg sync.WaitGroup mux sync.Mutex } func newProcessor(cfg *Config, nextConsumer consumer.Logs, settings processor.Settings) (*logDedupProcessor, error) { telemetryBuilder, err := metadata.NewTelemetryBuilder(settings.TelemetrySettings) if err != nil { return nil, fmt.Errorf("failed to create telemetry builder: %w", err) } // This should not happen due to config validation but we check anyways. timezone, err := time.LoadLocation(cfg.Timezone) if err != nil { return nil, fmt.Errorf("invalid timezone: %w", err) } return &logDedupProcessor{ emitInterval: cfg.Interval, aggregator: newLogAggregator(cfg.LogCountAttribute, timezone, telemetryBuilder, cfg.IncludeFields), remover: newFieldRemover(cfg.ExcludeFields), nextConsumer: nextConsumer, logger: settings.Logger, }, nil } // Start starts the processor. func (p *logDedupProcessor) Start(ctx context.Context, _ component.Host) error { ctx, cancel := context.WithCancel(ctx) p.cancel = cancel p.wg.Add(1) go p.handleExportInterval(ctx) return nil } // Capabilities returns the consumer's capabilities. func (p *logDedupProcessor) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } // Shutdown stops the processor. func (p *logDedupProcessor) Shutdown(_ context.Context) error { if p.cancel != nil { // Call cancel to stop the export interval goroutine and wait for it to finish. p.cancel() p.wg.Wait() } return nil } // ConsumeLogs processes the logs. func (p *logDedupProcessor) ConsumeLogs(ctx context.Context, pl plog.Logs) error { p.mux.Lock() defer p.mux.Unlock() for i := 0; i < pl.ResourceLogs().Len(); i++ { rl := pl.ResourceLogs().At(i) resource := rl.Resource() for j := 0; j < rl.ScopeLogs().Len(); j++ { sl := rl.ScopeLogs().At(j) scope := sl.Scope() logs := sl.LogRecords() logs.RemoveIf(func(logRecord plog.LogRecord) bool { if p.conditions == nil { p.aggregateLog(logRecord, scope, resource) return true } logCtx := ottllog.NewTransformContext(logRecord, scope, resource, sl, rl) logMatch, err := p.conditions.Eval(ctx, logCtx) if err != nil { p.logger.Error("error matching conditions", zap.Error(err)) return false } if logMatch { p.aggregateLog(logRecord, scope, resource) } return logMatch }) } } // immediately consume any logs that didn't match any conditions if pl.LogRecordCount() > 0 { err := p.nextConsumer.ConsumeLogs(ctx, pl) if err != nil { p.logger.Error("failed to consume logs", zap.Error(err)) } } return nil } func (p *logDedupProcessor) aggregateLog(logRecord plog.LogRecord, scope pcommon.InstrumentationScope, resource pcommon.Resource) { p.remover.RemoveFields(logRecord) p.aggregator.Add(resource, scope, logRecord) } // handleExportInterval sends metrics at the configured interval. func (p *logDedupProcessor) handleExportInterval(ctx context.Context) { defer p.wg.Done() ticker := time.NewTicker(p.emitInterval) defer ticker.Stop() for { select { case <-ctx.Done(): // Export any remaining logs p.exportLogs(ctx) if err := ctx.Err(); err != context.Canceled { p.logger.Error("context error", zap.Error(err)) } return case <-ticker.C: p.exportLogs(ctx) } } } // exportLogs exports the logs to the next consumer. func (p *logDedupProcessor) exportLogs(ctx context.Context) { p.mux.Lock() defer p.mux.Unlock() logs := p.aggregator.Export(ctx) // Only send logs if we have some if logs.LogRecordCount() > 0 { err := p.nextConsumer.ConsumeLogs(ctx, logs) if err != nil { p.logger.Error("failed to consume logs", zap.Error(err)) } } p.aggregator.Reset() }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "fmt" "math" "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling" ) type AttributeSource string const ( traceIDAttributeSource = AttributeSource("traceID") recordAttributeSource = AttributeSource("record") defaultAttributeSource = traceIDAttributeSource ) var validAttributeSource = map[AttributeSource]bool{ traceIDAttributeSource: true, recordAttributeSource: true, } // Config has the configuration guiding the sampler processor. type Config struct { // SamplingPercentage is the percentage rate at which traces or logs are going to be sampled. Defaults to zero, i.e.: no sample. // Values greater or equal 100 are treated as "sample all traces/logs". SamplingPercentage float32 `mapstructure:"sampling_percentage"` // HashSeed allows one to configure the hashing seed. This is important in scenarios where multiple layers of collectors // have different sampling rates: if they use the same seed all passing one layer may pass the other even if they have // different sampling rates, configuring different seeds avoids that. HashSeed uint32 `mapstructure:"hash_seed"` // Mode selects the sampling behavior. Supported values: // // - "hash_seed": the legacy behavior of this processor. // Using an FNV hash combined with the HashSeed value, this // sampler performs a non-consistent probabilistic // downsampling. The number of spans output is expected to // equal SamplingPercentage (as a ratio) times the number of // spans inpout, assuming good behavior from FNV and good // entropy in the hashed attributes or TraceID. // // - "equalizing": Using an OTel-specified consistent sampling // mechanism, this sampler selectively reduces the effective // sampling probability of arriving spans. This can be // useful to select a small fraction of complete traces from // a stream with mixed sampling rates. The rate of spans // passing through depends on how much sampling has already // been applied. If an arriving span was head sampled at // the same probability it passes through. If the span // arrives with lower probability, a warning is logged // because it means this sampler is configured with too // large a sampling probability to ensure complete traces. // // - "proportional": Using an OTel-specified consistent sampling // mechanism, this sampler reduces the effective sampling // probability of each span by `SamplingProbability`. Mode SamplerMode `mapstructure:"mode"` // FailClosed indicates to not sample data (the processor will // fail "closed") in case of error, such as failure to parse // the tracestate field or missing the randomness attribute. // // By default, failure cases are sampled (the processor is // fails "open"). Sampling priority-based decisions are made after // FailClosed is processed, making it possible to sample // despite errors using priority. FailClosed bool `mapstructure:"fail_closed"` // SamplingPrecision is how many hex digits of sampling // threshold will be encoded, from 1 up to 14. Default is 4. // 0 is treated as full precision. SamplingPrecision int `mapstructure:"sampling_precision"` /////// // Logs only fields below. // AttributeSource (logs only) defines where to look for the attribute in from_attribute. The allowed values are // `traceID` or `record`. Default is `traceID`. AttributeSource `mapstructure:"attribute_source"` // FromAttribute (logs only) The optional name of a log record attribute used for sampling purposes, such as a // unique log record ID. The value of the attribute is only used if the trace ID is absent or if `attribute_source` is set to `record`. FromAttribute string `mapstructure:"from_attribute"` // SamplingPriority (logs only) enables using a log record attribute as the sampling priority of the log record. SamplingPriority string `mapstructure:"sampling_priority"` } var _ component.Config = (*Config)(nil) // Validate checks if the processor configuration is valid func (cfg *Config) Validate() error { pct := float64(cfg.SamplingPercentage) if math.IsInf(pct, 0) || math.IsNaN(pct) { return fmt.Errorf("sampling rate is invalid: %f%%", cfg.SamplingPercentage) } ratio := pct / 100.0 switch { case ratio < 0: return fmt.Errorf("sampling rate is negative: %f%%", cfg.SamplingPercentage) case ratio == 0: // Special case case ratio < sampling.MinSamplingProbability: // Too-small case return fmt.Errorf("sampling rate is too small: %g%%", cfg.SamplingPercentage) default: // Note that ratio > 1 is specifically allowed by the README, taken to mean 100% } if cfg.AttributeSource != "" && !validAttributeSource[cfg.AttributeSource] { return fmt.Errorf("invalid attribute source: %v. Expected: %v or %v", cfg.AttributeSource, traceIDAttributeSource, recordAttributeSource) } if cfg.SamplingPrecision == 0 { return fmt.Errorf("invalid sampling precision: 0") } else if cfg.SamplingPrecision > sampling.NumHexDigits { return fmt.Errorf("sampling precision is too great, should be <= 14: %d", cfg.SamplingPrecision) } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 //go:generate mdatagen metadata.yaml package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata" ) // The default precision is 4 hex digits, slightly more the original // component logic's 14-bits of precision. const defaultPrecision = 4 // NewFactory returns a new factory for the Probabilistic sampler processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, createDefaultConfig, processor.WithTraces(createTracesProcessor, metadata.TracesStability), processor.WithLogs(createLogsProcessor, metadata.LogsStability)) } func createDefaultConfig() component.Config { return &Config{ AttributeSource: defaultAttributeSource, FailClosed: true, Mode: modeUnset, SamplingPrecision: defaultPrecision, } } // createTracesProcessor creates a trace processor based on this config. func createTracesProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { return newTracesProcessor(ctx, set, cfg.(*Config), nextConsumer) } // createLogsProcessor creates a log processor based on this config. func createLogsProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { return newLogsProcessor(ctx, set, nextConsumer, cfg.(*Config)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "encoding/binary" "hash/fnv" ) // computeHash creates a hash using the FNV-1a algorithm func computeHash(b []byte, seed uint32) uint32 { hash := fnv.New32a() // the implementation fnv.Write() does not return an error, see hash/fnv/fnv.go _, _ = hash.Write(i32tob(seed)) _, _ = hash.Write(b) return hash.Sum32() } // i32tob converts a seed to a byte array to be used as part of fnv.Write() func i32tob(val uint32) []byte { r := make([]byte, 4) binary.LittleEndian.PutUint32(r, val) return r }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "context" "errors" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata" ) type logsProcessor struct { sampler dataSampler samplingPriority string precision int failClosed bool logger *zap.Logger telemetryBuilder *metadata.TelemetryBuilder } type recordCarrier struct { record plog.LogRecord parsed struct { tvalue string threshold sampling.Threshold rvalue string randomness sampling.Randomness } } var _ samplingCarrier = &recordCarrier{} func (rc *recordCarrier) get(key string) string { val, ok := rc.record.Attributes().Get(key) if !ok || val.Type() != pcommon.ValueTypeStr { return "" } return val.Str() } func newLogRecordCarrier(l plog.LogRecord) (samplingCarrier, error) { var ret error carrier := &recordCarrier{ record: l, } if tvalue := carrier.get("sampling.threshold"); len(tvalue) != 0 { th, err := sampling.TValueToThreshold(tvalue) if err != nil { ret = errors.Join(err, ret) } else { carrier.parsed.tvalue = tvalue carrier.parsed.threshold = th } } if rvalue := carrier.get("sampling.randomness"); len(rvalue) != 0 { rnd, err := sampling.RValueToRandomness(rvalue) if err != nil { ret = errors.Join(err, ret) } else { carrier.parsed.rvalue = rvalue carrier.parsed.randomness = rnd } } return carrier, ret } func (rc *recordCarrier) threshold() (sampling.Threshold, bool) { return rc.parsed.threshold, len(rc.parsed.tvalue) != 0 } func (rc *recordCarrier) explicitRandomness() (randomnessNamer, bool) { if len(rc.parsed.rvalue) == 0 { return newMissingRandomnessMethod(), false } return newSamplingRandomnessMethod(rc.parsed.randomness), true } func (rc *recordCarrier) updateThreshold(th sampling.Threshold) error { exist, has := rc.threshold() if has && sampling.ThresholdLessThan(th, exist) { return sampling.ErrInconsistentSampling } rc.record.Attributes().PutStr("sampling.threshold", th.TValue()) return nil } func (rc *recordCarrier) setExplicitRandomness(rnd randomnessNamer) { rc.parsed.randomness = rnd.randomness() rc.parsed.rvalue = rnd.randomness().RValue() rc.record.Attributes().PutStr("sampling.randomness", rnd.randomness().RValue()) } func (rc *recordCarrier) clearThreshold() { rc.parsed.threshold = sampling.NeverSampleThreshold rc.parsed.tvalue = "" rc.record.Attributes().Remove("sampling.threshold") } func (rc *recordCarrier) reserialize() error { return nil } func (*neverSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) { // We return a fake randomness value, since it will not be used. // This avoids a consistency check error for missing randomness. lrc, err := newLogRecordCarrier(logRec) return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), lrc, err } // randomnessFromLogRecord (hashingSampler) uses a hash function over // the TraceID or logs attribute source. func (th *hashingSampler) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) { rnd := newMissingRandomnessMethod() lrc, err := newLogRecordCarrier(logRec) if th.logsTraceIDEnabled { value := logRec.TraceID() if !value.IsEmpty() { rnd = newTraceIDHashingMethod(randomnessFromBytes(value[:], th.hashSeed)) } } if isMissing(rnd) && th.logsRandomnessSourceAttribute != "" { if value, ok := logRec.Attributes().Get(th.logsRandomnessSourceAttribute); ok { by := getBytesFromValue(value) if len(by) > 0 { rnd = newAttributeHashingMethod( th.logsRandomnessSourceAttribute, randomnessFromBytes(by, th.hashSeed), ) } } } if err != nil { // The sampling.randomness or sampling.threshold attributes // had a parse error, in this case. lrc = nil } else if _, hasRnd := lrc.explicitRandomness(); hasRnd { // If the log record contains a randomness value, do not update. err = ErrRandomnessInUse lrc = nil } else if _, hasTh := lrc.threshold(); hasTh { // If the log record contains a threshold value, do not update. err = ErrThresholdInUse lrc = nil } else if !isMissing(rnd) { // When no sampling information is already present and we have // calculated new randomness, add it to the record. lrc.setExplicitRandomness(rnd) } return rnd, lrc, err } // randomnessFromLogRecord (hashingSampler) uses OTEP 235 semantic // conventions basing its decision only on the TraceID. func (ctc *consistentTracestateCommon) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) { lrc, err := newLogRecordCarrier(logRec) rnd := newMissingRandomnessMethod() if err != nil { // Parse error in sampling.randomness or sampling.threshold lrc = nil } else if rv, hasRnd := lrc.explicitRandomness(); hasRnd { rnd = rv } else if tid := logRec.TraceID(); !tid.IsEmpty() { rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(tid)) } return rnd, lrc, err } // newLogsProcessor returns a processor.LogsProcessor that will perform head sampling according to the given // configuration. func newLogsProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Logs, cfg *Config) (processor.Logs, error) { telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } lsp := &logsProcessor{ sampler: makeSampler(cfg, true), samplingPriority: cfg.SamplingPriority, precision: cfg.SamplingPrecision, failClosed: cfg.FailClosed, logger: set.Logger, telemetryBuilder: telemetryBuilder, } return processorhelper.NewLogs( ctx, set, cfg, nextConsumer, lsp.processLogs, processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) } func (lsp *logsProcessor) processLogs(ctx context.Context, logsData plog.Logs) (plog.Logs, error) { logsData.ResourceLogs().RemoveIf(func(rl plog.ResourceLogs) bool { rl.ScopeLogs().RemoveIf(func(ill plog.ScopeLogs) bool { ill.LogRecords().RemoveIf(func(l plog.LogRecord) bool { return !commonShouldSampleLogic( ctx, l, lsp.sampler, lsp.failClosed, lsp.sampler.randomnessFromLogRecord, lsp.priorityFunc, "logs sampler", lsp.logger, lsp.telemetryBuilder.ProcessorProbabilisticSamplerCountLogsSampled, ) }) // Filter out empty ScopeLogs return ill.LogRecords().Len() == 0 }) // Filter out empty ResourceLogs return rl.ScopeLogs().Len() == 0 }) if logsData.ResourceLogs().Len() == 0 { return logsData, processorhelper.ErrSkipProcessingData } return logsData, nil } func (lsp *logsProcessor) priorityFunc(logRec plog.LogRecord, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) { // Note: in logs, unlike traces, the sampling priority // attribute is interpreted as a request to be sampled. if lsp.samplingPriority != "" { priorityThreshold := lsp.logRecordToPriorityThreshold(logRec) if priorityThreshold == sampling.NeverSampleThreshold { threshold = priorityThreshold rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name } else if sampling.ThresholdLessThan(priorityThreshold, threshold) { threshold = priorityThreshold rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name } } return rnd, threshold } func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sampling.Threshold { if localPriority, ok := logRec.Attributes().Get(lsp.samplingPriority); ok { // Potentially raise the sampling probability to minProb minProb := 0.0 switch localPriority.Type() { case pcommon.ValueTypeDouble: minProb = localPriority.Double() / 100.0 case pcommon.ValueTypeInt: minProb = float64(localPriority.Int()) / 100.0 } if minProb != 0 { if th, err := sampling.ProbabilityToThresholdWithPrecision(minProb, lsp.precision); err == nil { // The record has supplied a valid alternative sampling probability return th } } } return sampling.NeverSampleThreshold }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "context" "errors" "fmt" "strconv" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling" ) const ( // These four can happen at runtime and be returned by // randomnessFromXXX() ErrInconsistentArrivingTValue samplerError = "inconsistent arriving threshold: item should not have been sampled" ErrMissingRandomness samplerError = "missing randomness" ErrRandomnessInUse samplerError = "item has sampling randomness, equalizing or proportional mode recommended" ErrThresholdInUse samplerError = "item has sampling threshold, equalizing or proportional mode recommended" ) const ( // Hashing method: The constants below help translate user friendly percentages // to numbers direct used in sampling. numHashBucketsLg2 = 14 numHashBuckets = 0x4000 // Using a power of 2 to avoid division. bitMaskHashBuckets = numHashBuckets - 1 percentageScaleFactor = numHashBuckets / 100.0 ) // samplerErrors are conditions reported by the sampler that are somewhat // ordinary and should log as info-level. type samplerError string var _ error = samplerError("") func (s samplerError) Error() string { return string(s) } // SamplerMode determines which of several modes is used for the // sampling decision. type SamplerMode string const ( // HashSeed applies the hash/fnv hash function originally used in this component. HashSeed SamplerMode = "hash_seed" // Equalizing uses OpenTelemetry consistent probability // sampling information (OTEP 235), applies an absolute // threshold to equalize incoming sampling probabilities. Equalizing SamplerMode = "equalizing" // Proportional uses OpenTelemetry consistent probability // sampling information (OTEP 235), multiplies incoming // sampling probabilities. Proportional SamplerMode = "proportional" // defaultHashSeed is applied when the mode is unset. defaultMode SamplerMode = HashSeed // modeUnset indicates the user has not configured the mode. modeUnset SamplerMode = "" ) type randomnessNamer interface { randomness() sampling.Randomness policyName() string } type randomnessMethod sampling.Randomness func (rm randomnessMethod) randomness() sampling.Randomness { return sampling.Randomness(rm) } type ( traceIDHashingMethod struct{ randomnessMethod } traceIDW3CSpecMethod struct{ randomnessMethod } samplingRandomnessMethod struct{ randomnessMethod } samplingPriorityMethod struct{ randomnessMethod } ) type missingRandomnessMethod struct{} func (rm missingRandomnessMethod) randomness() sampling.Randomness { return sampling.AllProbabilitiesRandomness } func (missingRandomnessMethod) policyName() string { return "missing_randomness" } type attributeHashingMethod struct { randomnessMethod attribute string } func (am attributeHashingMethod) policyName() string { return am.attribute } func (traceIDHashingMethod) policyName() string { return "trace_id_hash" } func (samplingRandomnessMethod) policyName() string { return "sampling_randomness" } func (traceIDW3CSpecMethod) policyName() string { return "trace_id_w3c" } func (samplingPriorityMethod) policyName() string { return "sampling_priority" } var ( _ randomnessNamer = missingRandomnessMethod{} _ randomnessNamer = traceIDHashingMethod{} _ randomnessNamer = traceIDW3CSpecMethod{} _ randomnessNamer = samplingRandomnessMethod{} _ randomnessNamer = samplingPriorityMethod{} ) func newMissingRandomnessMethod() randomnessNamer { return missingRandomnessMethod{} } func isMissing(rnd randomnessNamer) bool { _, ok := rnd.(missingRandomnessMethod) return ok } func newSamplingRandomnessMethod(rnd sampling.Randomness) randomnessNamer { return samplingRandomnessMethod{randomnessMethod(rnd)} } func newTraceIDW3CSpecMethod(rnd sampling.Randomness) randomnessNamer { return traceIDW3CSpecMethod{randomnessMethod(rnd)} } func newTraceIDHashingMethod(rnd sampling.Randomness) randomnessNamer { return traceIDHashingMethod{randomnessMethod(rnd)} } func newSamplingPriorityMethod(rnd sampling.Randomness) randomnessNamer { return samplingPriorityMethod{randomnessMethod(rnd)} } func newAttributeHashingMethod(attribute string, rnd sampling.Randomness) randomnessNamer { return attributeHashingMethod{ randomnessMethod: randomnessMethod(rnd), attribute: attribute, } } // samplingCarrier conveys information about the underlying data item // (whether span or log record) through the sampling decision. type samplingCarrier interface { // explicitRandomness returns a randomness value and a boolean // indicating whether the item had sampling randomness // explicitly set. explicitRandomness() (randomnessNamer, bool) // setExplicitRandomness updates the item with the signal-specific // encoding for an explicit randomness value. setExplicitRandomness(randomnessNamer) // clearThreshold unsets a sampling threshold, which is used to // clear information that breaks the expected sampling invariants // described in OTEP 235. clearThreshold() // threshold returns a sampling threshold and a boolean // indicating whether the item had sampling threshold // explicitly set. threshold() (sampling.Threshold, bool) // updateThreshold modifies the sampling threshold. This // returns an error if the updated sampling threshold has a // lower adjusted account; the only permissible updates raise // adjusted count (i.e., reduce sampling probability). updateThreshold(sampling.Threshold) error // reserialize re-encodes the updated sampling information // into the item, if necessary. For Spans, this re-encodes // the tracestate. This is a no-op for logs records. reserialize() error } // dataSampler implements the logic of a sampling mode. type dataSampler interface { // decide reports the result based on a probabilistic decision. decide(carrier samplingCarrier) sampling.Threshold // randomnessFromSpan extracts randomness and returns a carrier specific to traces data. randomnessFromSpan(s ptrace.Span) (randomness randomnessNamer, carrier samplingCarrier, err error) // randomnessFromLogRecord extracts randomness and returns a carrier specific to logs data. randomnessFromLogRecord(s plog.LogRecord) (randomness randomnessNamer, carrier samplingCarrier, err error) } func (sm *SamplerMode) UnmarshalText(in []byte) error { switch mode := SamplerMode(in); mode { case HashSeed, Equalizing, Proportional, modeUnset: *sm = mode return nil default: return fmt.Errorf("unsupported sampler mode %q", mode) } } // hashingSampler is the original hash-based calculation. It is an // equalizing sampler with randomness calculation that matches the // original implementation. This hash-based implementation is limited // to 14 bits of precision. type hashingSampler struct { hashSeed uint32 tvalueThreshold sampling.Threshold // Logs only: name of attribute to obtain randomness logsRandomnessSourceAttribute string // Logs only: name of attribute to obtain randomness logsTraceIDEnabled bool } func (th *hashingSampler) decide(_ samplingCarrier) sampling.Threshold { return th.tvalueThreshold } // consistentTracestateCommon contains the common aspects of the // Proportional and Equalizing sampler modes. These samplers sample // using the TraceID and do not support use of logs source attribute. type consistentTracestateCommon struct{} // neverSampler always decides false. type neverSampler struct{} func (*neverSampler) decide(_ samplingCarrier) sampling.Threshold { return sampling.NeverSampleThreshold } // equalizingSampler raises thresholds up to a fixed value. type equalizingSampler struct { // TraceID-randomness-based calculation tvalueThreshold sampling.Threshold consistentTracestateCommon } func (te *equalizingSampler) decide(carrier samplingCarrier) sampling.Threshold { if tv, has := carrier.threshold(); has && sampling.ThresholdLessThan(te.tvalueThreshold, tv) { return tv } return te.tvalueThreshold } // proportionalSampler raises thresholds relative to incoming value. type proportionalSampler struct { // ratio in the range [2**-56, 1] ratio float64 // precision is the precision in number of hex digits precision int consistentTracestateCommon } func (tp *proportionalSampler) decide(carrier samplingCarrier) sampling.Threshold { incoming := 1.0 if tv, has := carrier.threshold(); has { incoming = tv.Probability() } // There is a potential here for the product probability to // underflow, which is checked here. threshold, err := sampling.ProbabilityToThresholdWithPrecision(incoming*tp.ratio, tp.precision) // Check the only known error condition. if errors.Is(err, sampling.ErrProbabilityRange) { // Considered valid, a case where the sampling probability // has fallen below the minimum supported value and simply // becomes unsampled. return sampling.NeverSampleThreshold } return threshold } func getBytesFromValue(value pcommon.Value) []byte { if value.Type() == pcommon.ValueTypeBytes { return value.Bytes().AsRaw() } return []byte(value.AsString()) } func randomnessFromBytes(b []byte, hashSeed uint32) sampling.Randomness { hashed32 := computeHash(b, hashSeed) hashed := uint64(hashed32 & bitMaskHashBuckets) // Ordinarily, hashed is compared against an acceptance // threshold i.e., sampled when hashed < scaledSamplerate, // which has the form R < T with T in [1, 2^14] and // R in [0, 2^14-1]. // // Here, modify R to R' and T to T', so that the sampling // equation has identical form to the specification, i.e., T' // <= R', using: // // T' = numHashBuckets-T // R' = numHashBuckets-1-R // // As a result, R' has the correct most-significant 14 bits to // use in an R-value. rprime14 := numHashBuckets - 1 - hashed // There are 18 unused bits from the FNV hash function. unused18 := uint64(hashed32 >> (32 - numHashBucketsLg2)) mixed28 := unused18 ^ (unused18 << 10) // The 56 bit quantity here consists of, most- to least-significant: // - 14 bits: R' = numHashBuckets - 1 - hashed // - 28 bits: mixture of unused 18 bits // - 14 bits: original `hashed`. rnd56 := (rprime14 << 42) | (mixed28 << 14) | hashed // Note: by construction: // - OTel samplers make the same probabilistic decision with this r-value, // - only 14 out of 56 bits are used in the sampling decision, // - there are only 32 actual random bits. rnd, _ := sampling.UnsignedToRandomness(rnd56) return rnd } func consistencyCheck(rnd randomnessNamer, carrier samplingCarrier) error { // Without randomness, do not check the threshold. if isMissing(rnd) { return ErrMissingRandomness } // When the carrier is nil, it means there was trouble parsing the // tracestate or trace-related attributes. In this case, skip the // consistency check. if carrier == nil { return nil } // Consistency check: if the TraceID is out of range, the // TValue is a lie. If inconsistent, clear it and return an error. if tv, has := carrier.threshold(); has { if !tv.ShouldSample(rnd.randomness()) { // In case we fail open, the threshold is cleared as // recommended in the OTel spec. carrier.clearThreshold() return ErrInconsistentArrivingTValue } } return nil } // makeSample constructs a sampler. There are no errors, as the only // potential error, out-of-range probability, is corrected automatically // according to the README, which allows percents >100 to equal 100%. // // Extending this logic, we round very small probabilities up to the // minimum supported value(s) which varies according to sampler mode. func makeSampler(cfg *Config, isLogs bool) dataSampler { // README allows percents >100 to equal 100%. pct := cfg.SamplingPercentage if pct > 100 { pct = 100 } mode := cfg.Mode if mode == modeUnset { // Reasons to choose the legacy behavior include: // (a) having set the hash seed // (b) logs signal w/o trace ID source if cfg.HashSeed != 0 || (isLogs && cfg.AttributeSource != traceIDAttributeSource) { mode = HashSeed } else { mode = defaultMode } } if pct == 0 { return &neverSampler{} } // Note: Convert to float64 before dividing by 100, otherwise loss of precision. // If the probability is too small, round it up to the minimum. ratio := float64(pct) / 100 // Like the pct > 100 test above, but for values too small to // express in 14 bits of precision. if ratio < sampling.MinSamplingProbability { ratio = sampling.MinSamplingProbability } switch mode { case Equalizing: // The error case below is ignored, we have rounded the probability so // that it is in-range threshold, _ := sampling.ProbabilityToThresholdWithPrecision(ratio, cfg.SamplingPrecision) return &equalizingSampler{ tvalueThreshold: threshold, } case Proportional: return &proportionalSampler{ ratio: ratio, precision: cfg.SamplingPrecision, } default: // i.e., HashSeed // Note: the original hash function used in this code // is preserved to ensure consistency across updates. // // uint32(pct * percentageScaleFactor) // // (a) carried out the multiplication in 32-bit precision // (b) rounded to zero instead of nearest. scaledSamplerate := uint32(pct * percentageScaleFactor) if scaledSamplerate == 0 { return &neverSampler{} } // Convert the accept threshold to a reject threshold, // then shift it into 56-bit value. reject := numHashBuckets - scaledSamplerate reject56 := uint64(reject) << 42 threshold, _ := sampling.UnsignedToThreshold(reject56) return &hashingSampler{ tvalueThreshold: threshold, hashSeed: cfg.HashSeed, // Logs specific: logsTraceIDEnabled: cfg.AttributeSource == traceIDAttributeSource, logsRandomnessSourceAttribute: cfg.FromAttribute, } } } // randFunc returns randomness (w/ named policy), a carrier, and the error. type randFunc[T any] func(T) (randomnessNamer, samplingCarrier, error) // priorityFunc makes changes resulting from sampling priority. type priorityFunc[T any] func(T, randomnessNamer, sampling.Threshold) (randomnessNamer, sampling.Threshold) // commonShouldSampleLogic implements sampling on a per-item basis // independent of the signal type, as embodied in the functional // parameters: func commonShouldSampleLogic[T any]( ctx context.Context, item T, sampler dataSampler, failClosed bool, randFunc randFunc[T], priorityFunc priorityFunc[T], description string, logger *zap.Logger, counter metric.Int64Counter, ) bool { rnd, carrier, err := randFunc(item) if err == nil { err = consistencyCheck(rnd, carrier) } var threshold sampling.Threshold if err != nil { var se samplerError if errors.As(err, &se) { logger.Debug(description, zap.Error(err)) } else { logger.Info(description, zap.Error(err)) } if failClosed { threshold = sampling.NeverSampleThreshold } else { threshold = sampling.AlwaysSampleThreshold } } else { threshold = sampler.decide(carrier) } rnd, threshold = priorityFunc(item, rnd, threshold) sampled := threshold.ShouldSample(rnd.randomness()) if sampled && carrier != nil { // Note: updateThreshold limits loss of adjusted count, by // preventing the threshold from being lowered, only allowing // probability to fall and never to rise. if err := carrier.updateThreshold(threshold); err != nil { if errors.Is(err, sampling.ErrInconsistentSampling) { // This is working-as-intended. You can't lower // the threshold, it's illogical. logger.Debug(description, zap.Error(err)) } else { logger.Info(description, zap.Error(err)) } } if err := carrier.reserialize(); err != nil { logger.Info(description, zap.Error(err)) } } counter.Add(ctx, 1, metric.WithAttributes(attribute.String("policy", rnd.policyName()), attribute.String("sampled", strconv.FormatBool(sampled)))) return sampled }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package probabilisticsamplerprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" import ( "context" "strconv" "strings" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor/internal/metadata" ) // samplingPriority has the semantic result of parsing the "sampling.priority" // attribute per OpenTracing semantic conventions. type samplingPriority int const ( // deferDecision means that the decision if a span will be "sampled" (ie.: // forwarded by the collector) is made by hashing the trace ID according // to the configured sampling rate. deferDecision samplingPriority = iota // mustSampleSpan indicates that the span had a "sampling.priority" attribute // greater than zero and it is going to be sampled, ie.: forwarded by the // collector. mustSampleSpan // doNotSampleSpan indicates that the span had a "sampling.priority" attribute // equal zero and it is NOT going to be sampled, ie.: it won't be forwarded // by the collector. doNotSampleSpan ) type traceProcessor struct { sampler dataSampler failClosed bool logger *zap.Logger telemetryBuilder *metadata.TelemetryBuilder } // tracestateCarrier conveys information about sampled spans between // the call to parse incoming randomness/threshold and the call to // decide. type tracestateCarrier struct { span ptrace.Span sampling.W3CTraceState } var _ samplingCarrier = &tracestateCarrier{} func newTracestateCarrier(s ptrace.Span) (samplingCarrier, error) { var err error tsc := &tracestateCarrier{ span: s, } tsc.W3CTraceState, err = sampling.NewW3CTraceState(s.TraceState().AsRaw()) return tsc, err } func (tc *tracestateCarrier) threshold() (sampling.Threshold, bool) { return tc.W3CTraceState.OTelValue().TValueThreshold() } func (tc *tracestateCarrier) explicitRandomness() (randomnessNamer, bool) { rnd, ok := tc.W3CTraceState.OTelValue().RValueRandomness() if !ok { return newMissingRandomnessMethod(), false } return newSamplingRandomnessMethod(rnd), true } func (tc *tracestateCarrier) updateThreshold(th sampling.Threshold) error { return tc.W3CTraceState.OTelValue().UpdateTValueWithSampling(th) } func (tc *tracestateCarrier) setExplicitRandomness(rnd randomnessNamer) { tc.W3CTraceState.OTelValue().SetRValue(rnd.randomness()) } func (tc *tracestateCarrier) clearThreshold() { tc.W3CTraceState.OTelValue().ClearTValue() } func (tc *tracestateCarrier) reserialize() error { var w strings.Builder err := tc.W3CTraceState.Serialize(&w) if err == nil { tc.span.TraceState().FromRaw(w.String()) } return err } // newTracesProcessor returns a processor.TracesProcessor that will // perform intermediate span sampling according to the given // configuration. func newTracesProcessor(ctx context.Context, set processor.Settings, cfg *Config, nextConsumer consumer.Traces) (processor.Traces, error) { telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } tp := &traceProcessor{ sampler: makeSampler(cfg, false), failClosed: cfg.FailClosed, logger: set.Logger, telemetryBuilder: telemetryBuilder, } return processorhelper.NewTraces( ctx, set, cfg, nextConsumer, tp.processTraces, processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) } func (th *hashingSampler) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) { tid := s.TraceID() tsc, err := newTracestateCarrier(s) rnd := newMissingRandomnessMethod() if !tid.IsEmpty() { rnd = newTraceIDHashingMethod(randomnessFromBytes(tid[:], th.hashSeed)) } // If the tracestate contains a proper R-value or T-value, we // have to leave it alone. The user should not be using this // sampler mode if they are using specified forms of consistent // sampling in OTel. if err != nil { return rnd, nil, err } else if _, has := tsc.explicitRandomness(); has { err = ErrRandomnessInUse tsc = nil } else if _, has := tsc.threshold(); has { err = ErrThresholdInUse tsc = nil } else { // When no sampling information is present, add a // Randomness value. tsc.setExplicitRandomness(rnd) } return rnd, tsc, err } func (ctc *consistentTracestateCommon) randomnessFromSpan(s ptrace.Span) (randomnessNamer, samplingCarrier, error) { rnd := newMissingRandomnessMethod() tsc, err := newTracestateCarrier(s) if err != nil { tsc = nil } else if rv, has := tsc.explicitRandomness(); has { // When the tracestate is OK and has r-value, use it. rnd = rv } else if !s.TraceID().IsEmpty() { rnd = newTraceIDW3CSpecMethod(sampling.TraceIDToRandomness(s.TraceID())) } return rnd, tsc, err } func (th *neverSampler) randomnessFromSpan(span ptrace.Span) (randomnessNamer, samplingCarrier, error) { // We return a fake randomness value, since it will not be used. // This avoids a consistency check error for missing randomness. tsc, err := newTracestateCarrier(span) return newSamplingPriorityMethod(sampling.AllProbabilitiesRandomness), tsc, err } func (tp *traceProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool { rs.ScopeSpans().RemoveIf(func(ils ptrace.ScopeSpans) bool { ils.Spans().RemoveIf(func(s ptrace.Span) bool { return !commonShouldSampleLogic( ctx, s, tp.sampler, tp.failClosed, tp.sampler.randomnessFromSpan, tp.priorityFunc, "traces sampler", tp.logger, tp.telemetryBuilder.ProcessorProbabilisticSamplerCountTracesSampled, ) }) // Filter out empty ScopeMetrics return ils.Spans().Len() == 0 }) // Filter out empty ResourceMetrics return rs.ScopeSpans().Len() == 0 }) if td.ResourceSpans().Len() == 0 { return td, processorhelper.ErrSkipProcessingData } return td, nil } func (tp *traceProcessor) priorityFunc(s ptrace.Span, rnd randomnessNamer, threshold sampling.Threshold) (randomnessNamer, sampling.Threshold) { switch parseSpanSamplingPriority(s) { case doNotSampleSpan: // OpenTracing mentions this as a "hint". We take a stronger // approach and do not sample the span since some may use it to // remove specific spans from traces. threshold = sampling.NeverSampleThreshold rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name case mustSampleSpan: threshold = sampling.AlwaysSampleThreshold rnd = newSamplingPriorityMethod(rnd.randomness()) // override policy name case deferDecision: // Note that the logs processor has very different logic here, // but that in tracing the priority can only force to never or // always. } return rnd, threshold } // parseSpanSamplingPriority checks if the span has the "sampling.priority" tag to // decide if the span should be sampled or not. The usage of the tag follows the // OpenTracing semantic tags: // https://github.com/opentracing/specification/blob/main/semantic_conventions.md#span-tags-table func parseSpanSamplingPriority(span ptrace.Span) samplingPriority { attribMap := span.Attributes() if attribMap.Len() <= 0 { return deferDecision } samplingPriorityAttrib, ok := attribMap.Get("sampling.priority") if !ok { return deferDecision } // By default defer the decision. decision := deferDecision // Try check for different types since there are various client libraries // using different conventions regarding "sampling.priority". Besides the // client libraries it is also possible that the type was lost in translation // between different formats. switch samplingPriorityAttrib.Type() { case pcommon.ValueTypeInt: value := samplingPriorityAttrib.Int() if value == 0 { decision = doNotSampleSpan } else if value > 0 { decision = mustSampleSpan } case pcommon.ValueTypeDouble: value := samplingPriorityAttrib.Double() if value == 0.0 { decision = doNotSampleSpan } else if value > 0.0 { decision = mustSampleSpan } case pcommon.ValueTypeStr: attribVal := samplingPriorityAttrib.Str() if value, err := strconv.ParseFloat(attribVal, 64); err == nil { if value == 0.0 { decision = doNotSampleSpan } else if value > 0.0 { decision = mustSampleSpan } } } return decision }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "fmt" "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) // aggregateAttributesProcessor type aggregateAttributesProcessor struct { aggregations []*aggregation } type aggregation struct { attribute string prefixes []string } func newAggregateAttributesProcessor(config []aggregationPair) *aggregateAttributesProcessor { aggregations := []*aggregation{} for i := 0; i < len(config); i++ { pair := &aggregation{ attribute: config[i].Attribute, prefixes: config[i].Prefixes, } aggregations = append(aggregations, pair) } return &aggregateAttributesProcessor{aggregations: aggregations} } func (proc *aggregateAttributesProcessor) processLogs(logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { resourceLogs := logs.ResourceLogs().At(i) err := proc.processAttributes(resourceLogs.Resource().Attributes()) if err != nil { return err } for j := 0; j < resourceLogs.ScopeLogs().Len(); j++ { scopeLogs := resourceLogs.ScopeLogs().At(j) for k := 0; k < scopeLogs.LogRecords().Len(); k++ { err := proc.processAttributes(scopeLogs.LogRecords().At(k).Attributes()) if err != nil { return err } } } } return nil } func (proc *aggregateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error { for i := 0; i < metrics.ResourceMetrics().Len(); i++ { resourceMetrics := metrics.ResourceMetrics().At(i) err := proc.processAttributes(resourceMetrics.Resource().Attributes()) if err != nil { return err } for j := 0; j < resourceMetrics.ScopeMetrics().Len(); j++ { scopeMetrics := resourceMetrics.ScopeMetrics().At(j) for k := 0; k < scopeMetrics.Metrics().Len(); k++ { err := processMetricLevelAttributes(proc, scopeMetrics.Metrics().At(k)) if err != nil { return err } } } } return nil } func (proc *aggregateAttributesProcessor) processTraces(traces ptrace.Traces) error { for i := 0; i < traces.ResourceSpans().Len(); i++ { resourceSpans := traces.ResourceSpans().At(i) err := proc.processAttributes(resourceSpans.Resource().Attributes()) if err != nil { return err } for j := 0; j < resourceSpans.ScopeSpans().Len(); j++ { scopeSpans := resourceSpans.ScopeSpans().At(j) for k := 0; k < scopeSpans.Spans().Len(); k++ { err := proc.processAttributes(scopeSpans.Spans().At(k).Attributes()) if err != nil { return err } } } } return nil } func (proc *aggregateAttributesProcessor) isEnabled() bool { return len(proc.aggregations) != 0 } func (*aggregateAttributesProcessor) ConfigPropertyName() string { return "aggregate_attributes" } func (proc *aggregateAttributesProcessor) processAttributes(attributes pcommon.Map) error { for i := 0; i < len(proc.aggregations); i++ { curr := proc.aggregations[i] names := []string{} attrs := []pcommon.Value{} for j := 0; j < len(curr.prefixes); j++ { prefix := curr.prefixes[j] // Create a new map. Unused keys will be added here, // so we can check them against other prefixes. newMap := pcommon.NewMap() newMap.EnsureCapacity(attributes.Len()) attributes.Range(func(key string, value pcommon.Value) bool { ok, trimmedKey := getNewKey(key, prefix) if ok { // TODO: Potential name conflict to resolve, eg.: // pod_* matches pod_foo // pod2_* matches pod2_foo // both will be renamed to foo // ref: https://github.com/SumoLogic/sumologic-otel-collector/issues/1263 names = append(names, trimmedKey) val := pcommon.NewValueEmpty() value.CopyTo(val) attrs = append(attrs, val) } else { value.CopyTo(newMap.PutEmpty(key)) } return true }) newMap.CopyTo(attributes) } if len(names) != len(attrs) { return fmt.Errorf( "internal error: number of values does not equal the number of keys; len(keys) = %d, len(values) = %d", len(names), len(attrs), ) } // Add a new attribute only if there's anything that should be put under it. if len(names) > 0 { aggregated := attributes.PutEmptyMap(curr.attribute) for j := 0; j < len(names); j++ { attrs[j].CopyTo(aggregated.PutEmpty(names[j])) } } } return nil } // Checks if the key has given prefix and trims it if so. func getNewKey(key string, prefix string) (bool, string) { if strings.HasPrefix(key, prefix) { return true, strings.TrimPrefix(key, prefix) } return false, "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "fmt" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" ) // This file contains some common functionalities for subprocessors that modify attributes (represented by pcommon.Map) type attributesProcessor interface { processAttributes(pcommon.Map) error } func processMetricLevelAttributes(proc attributesProcessor, metric pmetric.Metric) error { switch metric.Type() { case pmetric.MetricTypeEmpty: return nil case pmetric.MetricTypeSum: dp := metric.Sum().DataPoints() for i := 0; i < dp.Len(); i++ { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err } } return nil case pmetric.MetricTypeGauge: dp := metric.Gauge().DataPoints() for i := 0; i < dp.Len(); i++ { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err } } return nil case pmetric.MetricTypeHistogram: dp := metric.Histogram().DataPoints() for i := 0; i < dp.Len(); i++ { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err } } return nil case pmetric.MetricTypeExponentialHistogram: dp := metric.ExponentialHistogram().DataPoints() for i := 0; i < dp.Len(); i++ { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err } } return nil case pmetric.MetricTypeSummary: dp := metric.Summary().DataPoints() for i := 0; i < dp.Len(); i++ { err := proc.processAttributes(dp.At(i).Attributes()) if err != nil { return err } } return nil } return fmt.Errorf("unknown metric type: %s", metric.Type().String()) } func mapToPcommonMap(m map[string]pcommon.Value) pcommon.Map { attrs := pcommon.NewMap() for k, v := range m { v.CopyTo(attrs.PutEmpty(k)) } return attrs } func mapToPcommonValue(m map[string]pcommon.Value) pcommon.Value { attrs := pcommon.NewValueMap() for k, v := range m { v.CopyTo(attrs.Map().PutEmpty(k)) } return attrs }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" ) // cloudNamespaceProcessor adds the `cloud.namespace` resource attribute to logs, metrics and traces. type cloudNamespaceProcessor struct { addCloudNamespace bool } const ( cloudNamespaceAttributeName = "cloud.namespace" cloudNamespaceAwsEc2 = "aws/ec2" cloudNamespaceAwsEcs = "ecs" cloudNamespaceAwsBeanstalk = "ElasticBeanstalk" ) func newCloudNamespaceProcessor(addCloudNamespace bool) *cloudNamespaceProcessor { return &cloudNamespaceProcessor{ addCloudNamespace: addCloudNamespace, } } func (*cloudNamespaceProcessor) processLogs(logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { addCloudNamespaceAttribute(logs.ResourceLogs().At(i).Resource().Attributes()) } return nil } func (*cloudNamespaceProcessor) processMetrics(metrics pmetric.Metrics) error { for i := 0; i < metrics.ResourceMetrics().Len(); i++ { addCloudNamespaceAttribute(metrics.ResourceMetrics().At(i).Resource().Attributes()) } return nil } func (*cloudNamespaceProcessor) processTraces(traces ptrace.Traces) error { for i := 0; i < traces.ResourceSpans().Len(); i++ { addCloudNamespaceAttribute(traces.ResourceSpans().At(i).Resource().Attributes()) } return nil } func (proc *cloudNamespaceProcessor) isEnabled() bool { return proc.addCloudNamespace } func (*cloudNamespaceProcessor) ConfigPropertyName() string { return "add_cloud_namespace" } // addCloudNamespaceAttribute adds the `cloud.namespace` attribute // to a collection of attributes that already contains a `cloud.platform` attribute. // It does not add the `cloud.namespace` attribute for all `cloud.platform` values, // but only for a few specific ones - namely AWS EC2, AWS ECS, and AWS Elastic Beanstalk. func addCloudNamespaceAttribute(attributes pcommon.Map) { cloudPlatformAttributeValue, found := attributes.Get(conventions.AttributeCloudPlatform) if !found { return } switch cloudPlatformAttributeValue.Str() { case conventions.AttributeCloudPlatformAWSEC2: attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEc2) case conventions.AttributeCloudPlatformAWSECS: attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsEcs) case conventions.AttributeCloudPlatformAWSElasticBeanstalk: attributes.PutStr(cloudNamespaceAttributeName, cloudNamespaceAwsBeanstalk) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "go.opentelemetry.io/collector/component" ) type Config struct { AddCloudNamespace bool `mapstructure:"add_cloud_namespace"` TranslateAttributes bool `mapstructure:"translate_attributes"` TranslateTelegrafAttributes bool `mapstructure:"translate_telegraf_attributes"` NestAttributes *NestingProcessorConfig `mapstructure:"nest_attributes"` AggregateAttributes []aggregationPair `mapstructure:"aggregate_attributes"` LogFieldsAttributes *logFieldAttributesConfig `mapstructure:"field_attributes"` TranslateDockerMetrics bool `mapstructure:"translate_docker_metrics"` } type aggregationPair struct { Attribute string `mapstructure:"attribute"` Prefixes []string `mapstructure:"prefixes"` } const ( defaultAddCloudNamespace = true defaultTranslateAttributes = true defaultTranslateTelegrafAttributes = true defaultTranslateDockerMetrics = false // Nesting processor default config defaultNestingEnabled = false defaultNestingSeparator = "." defaultNestingSquashSingleValues = false defaultAddSeverityNumberAttribute = false defaultAddSeverityTextAttribute = false defaultAddSpanIDAttribute = false defaultAddTraceIDAttribute = false ) var _ component.Config = (*Config)(nil) func defaultNestingInclude() []string { return []string{} } func defaultNestingExclude() []string { return []string{} } func defaultAggregateAttributes() []aggregationPair { return []aggregationPair{} } func createDefaultConfig() component.Config { return &Config{ AddCloudNamespace: defaultAddCloudNamespace, TranslateAttributes: defaultTranslateAttributes, TranslateTelegrafAttributes: defaultTranslateTelegrafAttributes, NestAttributes: &NestingProcessorConfig{ Separator: defaultNestingSeparator, Enabled: defaultNestingEnabled, Include: defaultNestingInclude(), Exclude: defaultNestingExclude(), SquashSingleValues: defaultNestingSquashSingleValues, }, AggregateAttributes: defaultAggregateAttributes(), LogFieldsAttributes: &logFieldAttributesConfig{ SeverityNumberAttribute: &logFieldAttribute{defaultAddSeverityNumberAttribute, SeverityNumberAttributeName}, SeverityTextAttribute: &logFieldAttribute{defaultAddSeverityTextAttribute, SeverityTextAttributeName}, SpanIDAttribute: &logFieldAttribute{defaultAddSpanIDAttribute, SpanIDAttributeName}, TraceIDAttribute: &logFieldAttribute{defaultAddTraceIDAttribute, TraceIDAttributeName}, }, TranslateDockerMetrics: defaultTranslateDockerMetrics, } } // Validate config func (cfg *Config) Validate() error { return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // //go:generate mdatagen metadata.yaml package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor/internal/metadata" ) var processorCapabilities = consumer.Capabilities{MutatesData: true} // NewFactory returns a new factory for the processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, createDefaultConfig, processor.WithTraces(createTracesProcessor, metadata.TracesStability), processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability), processor.WithLogs(createLogsProcessor, metadata.LogsStability), ) } func createLogsProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { processor := newsumologicProcessor(set, cfg.(*Config)) return processorhelper.NewLogs( ctx, set, cfg, nextConsumer, processor.processLogs, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithStart(processor.start), processorhelper.WithShutdown(processor.shutdown)) } func createMetricsProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { processor := newsumologicProcessor(set, cfg.(*Config)) return processorhelper.NewMetrics( ctx, set, cfg, nextConsumer, processor.processMetrics, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithStart(processor.start), processorhelper.WithShutdown(processor.shutdown)) } func createTracesProcessor( ctx context.Context, set processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { processor := newsumologicProcessor(set, cfg.(*Config)) return processorhelper.NewTraces( ctx, set, cfg, nextConsumer, processor.processTraces, processorhelper.WithCapabilities(processorCapabilities), processorhelper.WithStart(processor.start), processorhelper.WithShutdown(processor.shutdown)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "encoding/hex" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) const ( SeverityNumberAttributeName = "loglevel" SeverityTextAttributeName = "severitytext" SpanIDAttributeName = "spanid" TraceIDAttributeName = "traceid" ) type logFieldAttribute struct { Enabled bool `mapstructure:"enabled"` Name string `mapstructure:"name"` } type logFieldAttributesConfig struct { SeverityNumberAttribute *logFieldAttribute `mapstructure:"severity_number"` SeverityTextAttribute *logFieldAttribute `mapstructure:"severity_text"` SpanIDAttribute *logFieldAttribute `mapstructure:"span_id"` TraceIDAttribute *logFieldAttribute `mapstructure:"trace_id"` } // spanIDToHexOrEmptyString returns a hex string from SpanID. // An empty string is returned, if SpanID is empty. func spanIDToHexOrEmptyString(id pcommon.SpanID) string { if id.IsEmpty() { return "" } return hex.EncodeToString(id[:]) } // traceIDToHexOrEmptyString returns a hex string from TraceID. // An empty string is returned, if TraceID is empty. func traceIDToHexOrEmptyString(id pcommon.TraceID) string { if id.IsEmpty() { return "" } return hex.EncodeToString(id[:]) } var severityNumberToLevel = map[string]string{ plog.SeverityNumberUnspecified.String(): "UNSPECIFIED", plog.SeverityNumberTrace.String(): "TRACE", plog.SeverityNumberTrace2.String(): "TRACE2", plog.SeverityNumberTrace3.String(): "TRACE3", plog.SeverityNumberTrace4.String(): "TRACE4", plog.SeverityNumberDebug.String(): "DEBUG", plog.SeverityNumberDebug2.String(): "DEBUG2", plog.SeverityNumberDebug3.String(): "DEBUG3", plog.SeverityNumberDebug4.String(): "DEBUG4", plog.SeverityNumberInfo.String(): "INFO", plog.SeverityNumberInfo2.String(): "INFO2", plog.SeverityNumberInfo3.String(): "INFO3", plog.SeverityNumberInfo4.String(): "INFO4", plog.SeverityNumberWarn.String(): "WARN", plog.SeverityNumberWarn2.String(): "WARN2", plog.SeverityNumberWarn3.String(): "WARN3", plog.SeverityNumberWarn4.String(): "WARN4", plog.SeverityNumberError.String(): "ERROR", plog.SeverityNumberError2.String(): "ERROR2", plog.SeverityNumberError3.String(): "ERROR3", plog.SeverityNumberError4.String(): "ERROR4", plog.SeverityNumberFatal.String(): "FATAL", plog.SeverityNumberFatal2.String(): "FATAL2", plog.SeverityNumberFatal3.String(): "FATAL3", plog.SeverityNumberFatal4.String(): "FATAL4", } // logFieldsConversionProcessor converts specific log entries to attributes which leads to presenting them as fields // in the backend type logFieldsConversionProcessor struct { LogFieldsAttributes *logFieldAttributesConfig } func newLogFieldConversionProcessor(logFieldsAttributes *logFieldAttributesConfig) *logFieldsConversionProcessor { return &logFieldsConversionProcessor{ logFieldsAttributes, } } func (proc *logFieldsConversionProcessor) addAttributes(log plog.LogRecord) { if log.SeverityNumber() != plog.SeverityNumberUnspecified { if _, found := log.Attributes().Get(SeverityNumberAttributeName); !found && proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled { level := severityNumberToLevel[log.SeverityNumber().String()] log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityNumberAttribute.Name, level) } } if _, found := log.Attributes().Get(SeverityTextAttributeName); !found && proc.LogFieldsAttributes.SeverityTextAttribute.Enabled { log.Attributes().PutStr(proc.LogFieldsAttributes.SeverityTextAttribute.Name, log.SeverityText()) } if _, found := log.Attributes().Get(SpanIDAttributeName); !found && proc.LogFieldsAttributes.SpanIDAttribute.Enabled { log.Attributes().PutStr(proc.LogFieldsAttributes.SpanIDAttribute.Name, spanIDToHexOrEmptyString(log.SpanID())) } if _, found := log.Attributes().Get(TraceIDAttributeName); !found && proc.LogFieldsAttributes.TraceIDAttribute.Enabled { log.Attributes().PutStr(proc.LogFieldsAttributes.TraceIDAttribute.Name, traceIDToHexOrEmptyString(log.TraceID())) } } func (proc *logFieldsConversionProcessor) processLogs(logs plog.Logs) error { if !proc.isEnabled() { return nil } rls := logs.ResourceLogs() for i := 0; i < rls.Len(); i++ { ills := rls.At(i).ScopeLogs() for j := 0; j < ills.Len(); j++ { logs := ills.At(j).LogRecords() for k := 0; k < logs.Len(); k++ { proc.addAttributes(logs.At(k)) } } } return nil } func (proc *logFieldsConversionProcessor) processMetrics(_ pmetric.Metrics) error { // No-op. Metrics should not be translated. return nil } func (proc *logFieldsConversionProcessor) processTraces(_ ptrace.Traces) error { // No-op. Traces should not be translated. return nil } func (proc *logFieldsConversionProcessor) isEnabled() bool { return proc.LogFieldsAttributes.SeverityNumberAttribute.Enabled || proc.LogFieldsAttributes.SeverityTextAttribute.Enabled || proc.LogFieldsAttributes.SpanIDAttribute.Enabled || proc.LogFieldsAttributes.TraceIDAttribute.Enabled } func (*logFieldsConversionProcessor) ConfigPropertyName() string { return "field_attributes" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) type NestingProcessorConfig struct { Separator string `mapstructure:"separator"` Enabled bool `mapstructure:"enabled"` Include []string `mapstructure:"include"` Exclude []string `mapstructure:"exclude"` SquashSingleValues bool `mapstructure:"squash_single_values"` } type NestingProcessor struct { separator string enabled bool allowlist []string denylist []string squashSingleValues bool } func newNestingProcessor(config *NestingProcessorConfig) *NestingProcessor { proc := &NestingProcessor{ separator: config.Separator, enabled: config.Enabled, allowlist: config.Include, denylist: config.Exclude, squashSingleValues: config.SquashSingleValues, } return proc } func (proc *NestingProcessor) processLogs(logs plog.Logs) error { if !proc.enabled { return nil } for i := 0; i < logs.ResourceLogs().Len(); i++ { rl := logs.ResourceLogs().At(i) if err := proc.processAttributes(rl.Resource().Attributes()); err != nil { return err } for j := 0; j < rl.ScopeLogs().Len(); j++ { logsRecord := rl.ScopeLogs().At(j).LogRecords() for k := 0; k < logsRecord.Len(); k++ { if err := proc.processAttributes(logsRecord.At(k).Attributes()); err != nil { return err } } } } return nil } func (proc *NestingProcessor) processMetrics(metrics pmetric.Metrics) error { if !proc.enabled { return nil } for i := 0; i < metrics.ResourceMetrics().Len(); i++ { rm := metrics.ResourceMetrics().At(i) if err := proc.processAttributes(rm.Resource().Attributes()); err != nil { return err } for j := 0; j < rm.ScopeMetrics().Len(); j++ { metricsSlice := rm.ScopeMetrics().At(j).Metrics() for k := 0; k < metricsSlice.Len(); k++ { if err := processMetricLevelAttributes(proc, metricsSlice.At(k)); err != nil { return err } } } } return nil } func (proc *NestingProcessor) processTraces(traces ptrace.Traces) error { if !proc.enabled { return nil } for i := 0; i < traces.ResourceSpans().Len(); i++ { rs := traces.ResourceSpans().At(i) if err := proc.processAttributes(rs.Resource().Attributes()); err != nil { return err } for j := 0; j < rs.ScopeSpans().Len(); j++ { spans := rs.ScopeSpans().At(j).Spans() for k := 0; k < spans.Len(); k++ { if err := proc.processAttributes(spans.At(k).Attributes()); err != nil { return err } } } } return nil } func (proc *NestingProcessor) processAttributes(attributes pcommon.Map) error { newMap := pcommon.NewMap() attributes.Range(func(k string, v pcommon.Value) bool { // If key is not on allow list or is on deny list, skip translating it. if !proc.shouldTranslateKey(k) { v.CopyTo(newMap.PutEmpty(k)) return true } keys := strings.Split(k, proc.separator) if len(keys) == 0 { // Split returns empty slice only if both string and separator are empty // set map[""] = v and return newVal := newMap.PutEmpty(k) v.CopyTo(newVal) return true } prevValue := pcommon.NewValueMap() nextMap := prevValue.Map() newMap.CopyTo(nextMap) for i := 0; i < len(keys); i++ { if prevValue.Type() != pcommon.ValueTypeMap { // If previous value was not a map, change it into a map. // The former value will be set under the key "". tempMap := pcommon.NewValueMap() prevValue.CopyTo(tempMap.Map().PutEmpty("")) tempMap.CopyTo(prevValue) } newValue, ok := prevValue.Map().Get(keys[i]) if ok { prevValue = newValue } else { if i == len(keys)-1 { // If we're checking the last key, insert empty value, to which v will be copied. prevValue = prevValue.Map().PutEmpty(keys[i]) } else { // If we're not checking the last key, put a map. prevValue = prevValue.Map().PutEmpty(keys[i]) prevValue.SetEmptyMap() } } } if prevValue.Type() == pcommon.ValueTypeMap { // Now check the value we want to copy. If it is a map, we should merge both maps. // Else, just place the value under the key "". if v.Type() == pcommon.ValueTypeMap { v.Map().Range(func(k string, val pcommon.Value) bool { val.CopyTo(prevValue.Map().PutEmpty(k)) return true }) } else { v.CopyTo(prevValue.Map().PutEmpty("")) } } else { v.CopyTo(prevValue) } nextMap.CopyTo(newMap) return true }) if proc.squashSingleValues { newMap = proc.squash(newMap) } newMap.CopyTo(attributes) return nil } // Checks if given key fulfills the following conditions: // - has a prefix that exists in the allowlist (if it's not empty) // - does not have a prefix that exists in the denylist func (proc *NestingProcessor) shouldTranslateKey(k string) bool { if len(proc.allowlist) > 0 { isOk := false for i := 0; i < len(proc.allowlist); i++ { if strings.HasPrefix(k, proc.allowlist[i]) { isOk = true break } } if !isOk { return false } } if len(proc.denylist) > 0 { for i := 0; i < len(proc.denylist); i++ { if strings.HasPrefix(k, proc.denylist[i]) { return false } } } return true } // Squashes maps that have single values, eg. map {"a": {"b": {"c": "C", "d": "D"}}}} // gets squashes into {"a.b": {"c": "C", "d": "D"}}} func (proc *NestingProcessor) squash(attributes pcommon.Map) pcommon.Map { newMap := pcommon.NewValueMap() attributes.CopyTo(newMap.Map()) key := proc.squashAttribute(newMap) if key != "" { retMap := pcommon.NewMap() newMap.Map().CopyTo(retMap.PutEmptyMap(key)) return retMap } return newMap.Map() } // A function that squashes keys in a value. // If this value contained a map with one element, it gets squished and its key gets returned. // // If this value contained a map with many elements, this function is called on these elements, // and the key gets replaced if needed, "" is returned. // // Else, nothing happens and "" is returned. func (proc *NestingProcessor) squashAttribute(value pcommon.Value) string { if value.Type() != pcommon.ValueTypeMap { return "" } m := value.Map() if m.Len() == 1 { // If the map contains only one key-value pair, squash it. key := "" val := pcommon.NewValueEmpty() // This will iterate only over one value (the only one) m.Range(func(k string, v pcommon.Value) bool { keySuffix := proc.squashAttribute(v) key = proc.squashKey(k, keySuffix) val = v return false }) val.CopyTo(value) return key } // This map doesn't get squashed, but its content might have keys replaced. newMap := pcommon.NewMap() m.Range(func(k string, v pcommon.Value) bool { keySuffix := proc.squashAttribute(v) // If "" was returned, the value was not a one-element map and did not get squashed. if keySuffix == "" { v.CopyTo(newMap.PutEmpty(k)) } else { v.CopyTo(newMap.PutEmpty(proc.squashKey(k, keySuffix))) } return true }) newMap.CopyTo(value.Map()) return "" } func (proc *NestingProcessor) squashKey(key string, keySuffix string) string { if keySuffix == "" { return key } return key + proc.separator + keySuffix } func (proc *NestingProcessor) isEnabled() bool { return proc.enabled } func (*NestingProcessor) ConfigPropertyName() string { return "nest_attributes" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "context" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) type sumologicSubprocessor interface { processLogs(plog.Logs) error processMetrics(pmetric.Metrics) error processTraces(ptrace.Traces) error isEnabled() bool ConfigPropertyName() string } type sumologicProcessor struct { logger *zap.Logger subprocessors []sumologicSubprocessor } func newsumologicProcessor(set processor.Settings, config *Config) *sumologicProcessor { cloudNamespaceProcessor := newCloudNamespaceProcessor(config.AddCloudNamespace) translateAttributesProcessor := newTranslateAttributesProcessor(config.TranslateAttributes) translateTelegrafMetricsProcessor := newTranslateTelegrafMetricsProcessor(config.TranslateTelegrafAttributes) nestingProcessor := newNestingProcessor(config.NestAttributes) aggregateAttributesProcessor := newAggregateAttributesProcessor(config.AggregateAttributes) logFieldsConversionProcessor := newLogFieldConversionProcessor(config.LogFieldsAttributes) translateDockerMetricsProcessor := newTranslateDockerMetricsProcessor(config.TranslateDockerMetrics) processors := []sumologicSubprocessor{ cloudNamespaceProcessor, translateAttributesProcessor, translateTelegrafMetricsProcessor, nestingProcessor, aggregateAttributesProcessor, logFieldsConversionProcessor, translateDockerMetricsProcessor, } processor := &sumologicProcessor{ logger: set.Logger, subprocessors: processors, } return processor } func (processor *sumologicProcessor) start(_ context.Context, _ component.Host) error { enabledSubprocessors := []zapcore.Field{} for _, proc := range processor.subprocessors { enabledSubprocessors = append(enabledSubprocessors, zap.Bool(proc.ConfigPropertyName(), proc.isEnabled())) } processor.logger.Info("Sumo Logic Processor has started.", enabledSubprocessors...) return nil } func (processor *sumologicProcessor) shutdown(_ context.Context) error { processor.logger.Info("Sumo Logic Processor has shut down.") return nil } func (processor *sumologicProcessor) processLogs(_ context.Context, logs plog.Logs) (plog.Logs, error) { for _, subprocessor := range processor.subprocessors { if err := subprocessor.processLogs(logs); err != nil { return logs, fmt.Errorf("failed to process logs for property %s: %w", subprocessor.ConfigPropertyName(), err) } } return logs, nil } func (processor *sumologicProcessor) processMetrics(_ context.Context, metrics pmetric.Metrics) (pmetric.Metrics, error) { for _, subprocessor := range processor.subprocessors { if err := subprocessor.processMetrics(metrics); err != nil { return metrics, fmt.Errorf("failed to process metrics for property %s: %w", subprocessor.ConfigPropertyName(), err) } } return metrics, nil } func (processor *sumologicProcessor) processTraces(_ context.Context, traces ptrace.Traces) (ptrace.Traces, error) { for _, subprocessor := range processor.subprocessors { if err := subprocessor.processTraces(traces); err != nil { return traces, fmt.Errorf("failed to process traces for property %s: %w", subprocessor.ConfigPropertyName(), err) } } return traces, nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) // translateAttributesProcessor translates attribute names from OpenTelemetry to Sumo Logic convention type translateAttributesProcessor struct { shouldTranslate bool } // attributeTranslations maps OpenTelemetry attribute names to Sumo Logic attribute names var attributeTranslations = map[string]string{ "cloud.account.id": "AccountId", "cloud.availability_zone": "AvailabilityZone", "cloud.platform": "aws_service", "cloud.region": "Region", "host.id": "InstanceId", "host.name": "host", "host.type": "InstanceType", "k8s.cluster.name": "Cluster", "k8s.container.name": "container", "k8s.daemonset.name": "daemonset", "k8s.deployment.name": "deployment", "k8s.namespace.name": "namespace", "k8s.node.name": "node", "k8s.service.name": "service", "k8s.pod.hostname": "host", "k8s.pod.name": "pod", "k8s.pod.uid": "pod_id", "k8s.replicaset.name": "replicaset", "k8s.statefulset.name": "statefulset", "service.name": "service", "log.file.path_resolved": "_sourceName", } func newTranslateAttributesProcessor(shouldTranslate bool) *translateAttributesProcessor { return &translateAttributesProcessor{ shouldTranslate: shouldTranslate, } } func (proc *translateAttributesProcessor) processLogs(logs plog.Logs) error { if !proc.shouldTranslate { return nil } for i := 0; i < logs.ResourceLogs().Len(); i++ { translateAttributes(logs.ResourceLogs().At(i).Resource().Attributes()) } return nil } func (proc *translateAttributesProcessor) processMetrics(metrics pmetric.Metrics) error { if !proc.shouldTranslate { return nil } for i := 0; i < metrics.ResourceMetrics().Len(); i++ { translateAttributes(metrics.ResourceMetrics().At(i).Resource().Attributes()) } return nil } func (proc *translateAttributesProcessor) processTraces(_ ptrace.Traces) error { // No-op. Traces should not be translated. return nil } func (proc *translateAttributesProcessor) isEnabled() bool { return proc.shouldTranslate } func (*translateAttributesProcessor) ConfigPropertyName() string { return "translate_attributes" } func translateAttributes(attributes pcommon.Map) { result := pcommon.NewMap() result.EnsureCapacity(attributes.Len()) attributes.Range(func(otKey string, value pcommon.Value) bool { if sumoKey, ok := attributeTranslations[otKey]; ok { // Only insert if it doesn't exist yet to prevent overwriting. // We have to do it this way since the final return value is not // ready yet to rely on .Insert() not overwriting. if _, exists := attributes.Get(sumoKey); !exists { if _, ok := result.Get(sumoKey); !ok { value.CopyTo(result.PutEmpty(sumoKey)) } } else { if _, ok := result.Get(otKey); !ok { value.CopyTo(result.PutEmpty(otKey)) } } } else { if _, ok := result.Get(otKey); !ok { value.CopyTo(result.PutEmpty(otKey)) } } return true }) result.CopyTo(attributes) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) // translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention type translateDockerMetricsProcessor struct { shouldTranslate bool } // metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention var dockerMetricsTranslations = map[string]string{ "container.cpu.percent": "cpu_percentage", "container.cpu.usage.system": "system_cpu_usage", "container.cpu.usage.percpu": "cpu_usage.percpu_usage", "container.cpu.usage.total": "cpu_usage.total_usage", "container.cpu.usage.kernelmode": "cpu_usage.usage_in_kernelmode", "container.cpu.usage.usermode": "cpu_usage.usage_in_usermode", "container.cpu.throttling_data.periods": "throttling_data.periods", "container.cpu.throttling_data.throttled_periods": "throttling_data.throttled_periods", "container.cpu.throttling_data.throttled_time": "throttling_data.throttled_time", "container.memory.usage.limit": "limit", "container.memory.usage.max": "max_usage", "container.memory.percent": "memory_percentage", "container.memory.usage.total": "usage", "container.memory.active_anon": "stats.active_anon", "container.memory.active_file": "stats.active_file", "container.memory.cache": "stats.cache", "container.memory.hierarchical_memory_limit": "stats.hierarchical_memory_limit", "container.memory.inactive_anon": "stats.inactive_anon", "container.memory.inactive_file": "stats.inactive_file", "container.memory.mapped_file": "stats.mapped_file", "container.memory.pgfault": "stats.pgfault", "container.memory.pgmajfault": "stats.pgmajfault", "container.memory.pgpgin": "stats.pgpgin", "container.memory.pgpgout": "stats.pgpgout", "container.memory.rss": "stats.rss", "container.memory.rss_huge": "stats.rss_huge", "container.memory.unevictable": "stats.unevictable", "container.memory.writeback": "stats.writeback", "container.memory.total_active_anon": "stats.total_active_anon", "container.memory.total_active_file": "stats.total_active_file", "container.memory.total_cache": "stats.total_cache", "container.memory.total_inactive_anon": "stats.total_inactive_anon", "container.memory.total_mapped_file": "stats.total_mapped_file", "container.memory.total_pgfault": "stats.total_pgfault", "container.memory.total_pgmajfault": "stats.total_pgmajfault", "container.memory.total_pgpgin": "stats.total_pgpgin", "container.memory.total_pgpgout": "stats.total_pgpgout", "container.memory.total_rss": "stats.total_rss", "container.memory.total_rss_huge": "stats.total_rss_huge", "container.memory.total_unevictable": "stats.total_unevictable", "container.memory.total_writeback": "stats.total_writeback", "container.blockio.io_merged_recursive": "io_merged_recursive", "container.blockio.io_queued_recursive": "io_queue_recursive", "container.blockio.io_service_bytes_recursive": "io_service_bytes_recursive", "container.blockio.io_service_time_recursive": "io_service_time_recursive", "container.blockio.io_serviced_recursive": "io_serviced_recursive", "container.blockio.io_time_recursive": "io_time_recursive", "container.blockio.io_wait_time_recursive": "io_wait_time_recursive", "container.blockio.sectors_recursive": "sectors_recursive", } var dockerResourceAttributeTranslations = map[string]string{ "container.id": "container.FullID", "container.image.name": "container.ImageName", "container.name": "container.Name", } func newTranslateDockerMetricsProcessor(shouldTranslate bool) *translateDockerMetricsProcessor { return &translateDockerMetricsProcessor{ shouldTranslate: shouldTranslate, } } func (proc *translateDockerMetricsProcessor) processLogs(_ plog.Logs) error { // No-op, this subprocessor doesn't process logs. return nil } func (proc *translateDockerMetricsProcessor) processMetrics(metrics pmetric.Metrics) error { if !proc.shouldTranslate { return nil } for i := 0; i < metrics.ResourceMetrics().Len(); i++ { rm := metrics.ResourceMetrics().At(i) translateDockerResourceAttributes(rm.Resource().Attributes()) for j := 0; j < rm.ScopeMetrics().Len(); j++ { metricsSlice := rm.ScopeMetrics().At(j).Metrics() for k := 0; k < metricsSlice.Len(); k++ { translateDockerMetric(metricsSlice.At(k)) } } } return nil } func (proc *translateDockerMetricsProcessor) processTraces(_ ptrace.Traces) error { // No-op, this subprocessor doesn't process traces. return nil } func (proc *translateDockerMetricsProcessor) isEnabled() bool { return proc.shouldTranslate } func (*translateDockerMetricsProcessor) ConfigPropertyName() string { return "translate_docker_metrics" } func translateDockerMetric(m pmetric.Metric) { name, exists := dockerMetricsTranslations[m.Name()] if exists { m.SetName(name) } } func translateDockerResourceAttributes(attributes pcommon.Map) { result := pcommon.NewMap() result.EnsureCapacity(attributes.Len()) attributes.Range(func(otKey string, value pcommon.Value) bool { if sumoKey, ok := dockerResourceAttributeTranslations[otKey]; ok { // Only insert if it doesn't exist yet to prevent overwriting. // We have to do it this way since the final return value is not // ready yet to rely on .Insert() not overwriting. if _, exists := attributes.Get(sumoKey); !exists { if _, ok := result.Get(sumoKey); !ok { value.CopyTo(result.PutEmpty(sumoKey)) } } else { if _, ok := result.Get(otKey); !ok { value.CopyTo(result.PutEmpty(otKey)) } } } else { if _, ok := result.Get(otKey); !ok { value.CopyTo(result.PutEmpty(otKey)) } } return true }) result.CopyTo(attributes) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sumologicprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor" import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) // translateTelegrafMetricsProcessor translates metric names from OpenTelemetry to Sumo Logic convention type translateTelegrafMetricsProcessor struct { shouldTranslate bool } // metricsTranslations maps Telegraf metric names to corresponding names in Sumo Logic convention var metricsTranslations = map[string]string{ // CPU metrics "cpu_usage_active": "CPU_Total", "cpu_usage_idle": "CPU_Idle", "cpu_usage_iowait": "CPU_IOWait", "cpu_usage_irq": "CPU_Irq", "cpu_usage_nice": "CPU_Nice", "cpu_usage_softirq": "CPU_SoftIrq", "cpu_usage_steal": "CPU_Stolen", "cpu_usage_System": "CPU_Sys", "cpu_usage_user": "CPU_User", "system_load1": "CPU_LoadAvg_1min", "system_load5": "CPU_LoadAvg_5min", "system_load15": "CPU_LoadAvg_15min", // Disk metrics "disk_used": "Disk_Used", "disk_used_percent": "Disk_UsedPercent", "disk_inodes_free": "Disk_InodesAvailable", // Disk IO metrics "diskio_reads": "Disk_Reads", "diskio_read_bytes": "Disk_ReadBytes", "diskio_writes": "Disk_Writes", "diskio_write_bytes": "Disk_WriteBytes", // Memory metrics "mem_total": "Mem_Total", "mem_free": "Mem_free", "mem_available": "Mem_ActualFree", "mem_used": "Mem_ActualUsed", "mem_used_percent": "Mem_UsedPercent", "mem_available_percent": "Mem_FreePercent", // Procstat metrics "procstat_num_threads": "Proc_Threads", "procstat_memory_vms": "Proc_VMSize", "procstat_memory_rss": "Proc_RSSize", "procstat_cpu_usage": "Proc_CPU", "procstat_major_faults": "Proc_MajorFaults", "procstat_minor_faults": "Proc_MinorFaults", // Net metrics "net_bytes_sent": "Net_OutBytes", "net_bytes_recv": "Net_InBytes", "net_packets_sent": "Net_OutPackets", "net_packets_recv": "Net_InPackets", // Netstat metrics "netstat_tcp_close": "TCP_Close", "netstat_tcp_close_wait": "TCP_CloseWait", "netstat_tcp_closing": "TCP_Closing", "netstat_tcp_established": "TCP_Established", "netstat_tcp_listen": "TCP_Listen", "netstat_tcp_time_wait": "TCP_TimeWait", } func newTranslateTelegrafMetricsProcessor(shouldTranslate bool) *translateTelegrafMetricsProcessor { return &translateTelegrafMetricsProcessor{ shouldTranslate: shouldTranslate, } } func (proc *translateTelegrafMetricsProcessor) processLogs(_ plog.Logs) error { // No-op, this subprocessor doesn't process logs. return nil } func (proc *translateTelegrafMetricsProcessor) processMetrics(metrics pmetric.Metrics) error { if !proc.shouldTranslate { return nil } for i := 0; i < metrics.ResourceMetrics().Len(); i++ { rm := metrics.ResourceMetrics().At(i) for j := 0; j < rm.ScopeMetrics().Len(); j++ { metricsSlice := rm.ScopeMetrics().At(j).Metrics() for k := 0; k < metricsSlice.Len(); k++ { translateTelegrafMetric(metricsSlice.At(k)) } } } return nil } func (proc *translateTelegrafMetricsProcessor) processTraces(_ ptrace.Traces) error { // No-op, this subprocessor doesn't process traces. return nil } func (proc *translateTelegrafMetricsProcessor) isEnabled() bool { return proc.shouldTranslate } func (*translateTelegrafMetricsProcessor) ConfigPropertyName() string { return "translate_telegraf_attributes" } func translateTelegrafMetric(m pmetric.Metric) { name, exists := metricsTranslations[m.Name()] if exists { m.SetName(name) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" import ( "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" ) func getNewAndPolicy(settings component.TelemetrySettings, config *AndCfg) (sampling.PolicyEvaluator, error) { subPolicyEvaluators := make([]sampling.PolicyEvaluator, len(config.SubPolicyCfg)) for i := range config.SubPolicyCfg { policyCfg := &config.SubPolicyCfg[i] policy, err := getAndSubPolicyEvaluator(settings, policyCfg) if err != nil { return nil, err } subPolicyEvaluators[i] = policy } return sampling.NewAnd(settings.Logger, subPolicyEvaluators), nil } // Return instance of and sub-policy func getAndSubPolicyEvaluator(settings component.TelemetrySettings, cfg *AndSubPolicyCfg) (sampling.PolicyEvaluator, error) { return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" import ( "go.opentelemetry.io/collector/component" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" ) func getNewCompositePolicy(settings component.TelemetrySettings, config *CompositeCfg) (sampling.PolicyEvaluator, error) { subPolicyEvalParams := make([]sampling.SubPolicyEvalParams, len(config.SubPolicyCfg)) rateAllocationsMap := getRateAllocationMap(config) for i := range config.SubPolicyCfg { policyCfg := &config.SubPolicyCfg[i] policy, err := getCompositeSubPolicyEvaluator(settings, policyCfg) if err != nil { return nil, err } evalParams := sampling.SubPolicyEvalParams{ Evaluator: policy, MaxSpansPerSecond: int64(rateAllocationsMap[policyCfg.Name]), } subPolicyEvalParams[i] = evalParams } return sampling.NewComposite(settings.Logger, config.MaxTotalSpansPerSecond, subPolicyEvalParams, sampling.MonotonicClock{}), nil } // Apply rate allocations to the sub-policies func getRateAllocationMap(config *CompositeCfg) map[string]float64 { rateAllocationsMap := make(map[string]float64) maxTotalSPS := float64(config.MaxTotalSpansPerSecond) // Default SPS determined by equally diving number of sub policies defaultSPS := maxTotalSPS / float64(len(config.SubPolicyCfg)) for _, rAlloc := range config.RateAllocation { if rAlloc.Percent > 0 { rateAllocationsMap[rAlloc.Policy] = (float64(rAlloc.Percent) / 100) * maxTotalSPS } else { rateAllocationsMap[rAlloc.Policy] = defaultSPS } } return rateAllocationsMap } // Return instance of composite sub-policy func getCompositeSubPolicyEvaluator(settings component.TelemetrySettings, cfg *CompositeSubPolicyCfg) (sampling.PolicyEvaluator, error) { switch cfg.Type { case And: return getNewAndPolicy(settings, &cfg.AndCfg) default: return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 //go:generate mdatagen metadata.yaml package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" import ( "context" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/processor" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata" ) // NewFactory returns a new factory for the Tail Sampling processor. func NewFactory() processor.Factory { return processor.NewFactory( metadata.Type, createDefaultConfig, processor.WithTraces(createTracesProcessor, metadata.TracesStability)) } func createDefaultConfig() component.Config { return &Config{ DecisionWait: 30 * time.Second, NumTraces: 50000, } } func createTracesProcessor( ctx context.Context, params processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { tCfg := cfg.(*Config) return newTracesProcessor(ctx, params, nextConsumer, *tCfg) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package tailsamplingprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" import ( "context" "fmt" "math" "runtime" "sync" "sync/atomic" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/cache" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/idbatcher" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/telemetry" ) // policy combines a sampling policy evaluator with the destinations to be // used for that policy. type policy struct { // name used to identify this policy instance. name string // evaluator that decides if a trace is sampled or not by this policy instance. evaluator sampling.PolicyEvaluator // attribute to use in the telemetry to denote the policy. attribute metric.MeasurementOption } // tailSamplingSpanProcessor handles the incoming trace data and uses the given sampling // policy to sample traces. type tailSamplingSpanProcessor struct { ctx context.Context set processor.Settings telemetry *metadata.TelemetryBuilder logger *zap.Logger nextConsumer consumer.Traces maxNumTraces uint64 policies []*policy idToTrace sync.Map policyTicker timeutils.TTicker tickerFrequency time.Duration decisionBatcher idbatcher.Batcher sampledIDCache cache.Cache[bool] nonSampledIDCache cache.Cache[bool] deleteChan chan pcommon.TraceID numTracesOnMap *atomic.Uint64 setPolicyMux sync.Mutex pendingPolicy []PolicyCfg } // spanAndScope a structure for holding information about span and its instrumentation scope. // required for preserving the instrumentation library information while sampling. // We use pointers there to fast find the span in the map. type spanAndScope struct { span *ptrace.Span instrumentationScope *pcommon.InstrumentationScope } var ( attrSampledTrue = metric.WithAttributes(attribute.String("sampled", "true")) attrSampledFalse = metric.WithAttributes(attribute.String("sampled", "false")) decisionToAttribute = map[sampling.Decision]metric.MeasurementOption{ sampling.Sampled: attrSampledTrue, sampling.NotSampled: attrSampledFalse, sampling.InvertNotSampled: attrSampledFalse, sampling.InvertSampled: attrSampledTrue, } ) type Option func(*tailSamplingSpanProcessor) // newTracesProcessor returns a processor.TracesProcessor that will perform tail sampling according to the given // configuration. func newTracesProcessor(ctx context.Context, set processor.Settings, nextConsumer consumer.Traces, cfg Config, opts ...Option) (processor.Traces, error) { telemetrySettings := set.TelemetrySettings telemetry, err := metadata.NewTelemetryBuilder(telemetrySettings) if err != nil { return nil, err } nopCache := cache.NewNopDecisionCache[bool]() sampledDecisions := nopCache nonSampledDecisions := nopCache if cfg.DecisionCache.SampledCacheSize > 0 { sampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.SampledCacheSize) if err != nil { return nil, err } } if cfg.DecisionCache.NonSampledCacheSize > 0 { nonSampledDecisions, err = cache.NewLRUDecisionCache[bool](cfg.DecisionCache.NonSampledCacheSize) if err != nil { return nil, err } } tsp := &tailSamplingSpanProcessor{ ctx: ctx, set: set, telemetry: telemetry, nextConsumer: nextConsumer, maxNumTraces: cfg.NumTraces, sampledIDCache: sampledDecisions, nonSampledIDCache: nonSampledDecisions, logger: telemetrySettings.Logger, numTracesOnMap: &atomic.Uint64{}, deleteChan: make(chan pcommon.TraceID, cfg.NumTraces), } tsp.policyTicker = &timeutils.PolicyTicker{OnTickFunc: tsp.samplingPolicyOnTick} for _, opt := range opts { opt(tsp) } if tsp.tickerFrequency == 0 { tsp.tickerFrequency = time.Second } if tsp.policies == nil { err := tsp.loadSamplingPolicy(cfg.PolicyCfgs) if err != nil { return nil, err } } if tsp.decisionBatcher == nil { // this will start a goroutine in the background, so we run it only if everything went // well in creating the policies numDecisionBatches := math.Max(1, cfg.DecisionWait.Seconds()) inBatcher, err := idbatcher.New(uint64(numDecisionBatches), cfg.ExpectedNewTracesPerSec, uint64(2*runtime.NumCPU())) if err != nil { return nil, err } tsp.decisionBatcher = inBatcher } return tsp, nil } // withDecisionBatcher sets the batcher used to batch trace IDs for policy evaluation. func withDecisionBatcher(batcher idbatcher.Batcher) Option { return func(tsp *tailSamplingSpanProcessor) { tsp.decisionBatcher = batcher } } // withPolicies sets the sampling policies to be used by the processor. func withPolicies(policies []*policy) Option { return func(tsp *tailSamplingSpanProcessor) { tsp.policies = policies } } // withTickerFrequency sets the frequency at which the processor will evaluate the sampling policies. func withTickerFrequency(frequency time.Duration) Option { return func(tsp *tailSamplingSpanProcessor) { tsp.tickerFrequency = frequency } } // withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs. func withSampledDecisionCache(c cache.Cache[bool]) Option { return func(tsp *tailSamplingSpanProcessor) { tsp.sampledIDCache = c } } // withSampledDecisionCache sets the cache which the processor uses to store recently sampled trace IDs. func withNonSampledDecisionCache(c cache.Cache[bool]) Option { return func(tsp *tailSamplingSpanProcessor) { tsp.nonSampledIDCache = c } } func getPolicyEvaluator(settings component.TelemetrySettings, cfg *PolicyCfg) (sampling.PolicyEvaluator, error) { switch cfg.Type { case Composite: return getNewCompositePolicy(settings, &cfg.CompositeCfg) case And: return getNewAndPolicy(settings, &cfg.AndCfg) default: return getSharedPolicyEvaluator(settings, &cfg.sharedPolicyCfg) } } func getSharedPolicyEvaluator(settings component.TelemetrySettings, cfg *sharedPolicyCfg) (sampling.PolicyEvaluator, error) { settings.Logger = settings.Logger.With(zap.Any("policy", cfg.Type)) switch cfg.Type { case AlwaysSample: return sampling.NewAlwaysSample(settings), nil case Latency: lfCfg := cfg.LatencyCfg return sampling.NewLatency(settings, lfCfg.ThresholdMs, lfCfg.UpperThresholdmsMs), nil case NumericAttribute: nafCfg := cfg.NumericAttributeCfg return sampling.NewNumericAttributeFilter(settings, nafCfg.Key, nafCfg.MinValue, nafCfg.MaxValue, nafCfg.InvertMatch), nil case Probabilistic: pCfg := cfg.ProbabilisticCfg return sampling.NewProbabilisticSampler(settings, pCfg.HashSalt, pCfg.SamplingPercentage), nil case StringAttribute: safCfg := cfg.StringAttributeCfg return sampling.NewStringAttributeFilter(settings, safCfg.Key, safCfg.Values, safCfg.EnabledRegexMatching, safCfg.CacheMaxSize, safCfg.InvertMatch), nil case StatusCode: scfCfg := cfg.StatusCodeCfg return sampling.NewStatusCodeFilter(settings, scfCfg.StatusCodes) case RateLimiting: rlfCfg := cfg.RateLimitingCfg return sampling.NewRateLimiting(settings, rlfCfg.SpansPerSecond), nil case SpanCount: spCfg := cfg.SpanCountCfg return sampling.NewSpanCount(settings, spCfg.MinSpans, spCfg.MaxSpans), nil case TraceState: tsfCfg := cfg.TraceStateCfg return sampling.NewTraceStateFilter(settings, tsfCfg.Key, tsfCfg.Values), nil case BooleanAttribute: bafCfg := cfg.BooleanAttributeCfg return sampling.NewBooleanAttributeFilter(settings, bafCfg.Key, bafCfg.Value, bafCfg.InvertMatch), nil case OTTLCondition: ottlfCfg := cfg.OTTLConditionCfg return sampling.NewOTTLConditionFilter(settings, ottlfCfg.SpanConditions, ottlfCfg.SpanEventConditions, ottlfCfg.ErrorMode) default: return nil, fmt.Errorf("unknown sampling policy type %s", cfg.Type) } } type policyMetrics struct { idNotFoundOnMapCount, evaluateErrorCount, decisionSampled, decisionNotSampled int64 } func (tsp *tailSamplingSpanProcessor) loadSamplingPolicy(cfgs []PolicyCfg) error { telemetrySettings := tsp.set.TelemetrySettings componentID := tsp.set.ID.Name() cLen := len(cfgs) policies := make([]*policy, 0, cLen) policyNames := make(map[string]struct{}, cLen) for _, cfg := range cfgs { if cfg.Name == "" { return fmt.Errorf("policy name cannot be empty") } if _, exists := policyNames[cfg.Name]; exists { return fmt.Errorf("duplicate policy name %q", cfg.Name) } policyNames[cfg.Name] = struct{}{} eval, err := getPolicyEvaluator(telemetrySettings, &cfg) if err != nil { return fmt.Errorf("failed to create policy evaluator for %q: %w", cfg.Name, err) } uniquePolicyName := cfg.Name if componentID != "" { uniquePolicyName = fmt.Sprintf("%s.%s", componentID, cfg.Name) } policies = append(policies, &policy{ name: cfg.Name, evaluator: eval, attribute: metric.WithAttributes(attribute.String("policy", uniquePolicyName)), }) } tsp.policies = policies tsp.logger.Debug("Loaded sampling policy", zap.Int("policies.len", len(policies))) return nil } func (tsp *tailSamplingSpanProcessor) SetSamplingPolicy(cfgs []PolicyCfg) { tsp.logger.Debug("Setting pending sampling policy", zap.Int("pending.len", len(cfgs))) tsp.setPolicyMux.Lock() defer tsp.setPolicyMux.Unlock() tsp.pendingPolicy = cfgs } func (tsp *tailSamplingSpanProcessor) loadPendingSamplingPolicy() { tsp.setPolicyMux.Lock() defer tsp.setPolicyMux.Unlock() // Nothing pending, do nothing. pLen := len(tsp.pendingPolicy) if pLen == 0 { return } tsp.logger.Debug("Loading pending sampling policy", zap.Int("pending.len", pLen)) err := tsp.loadSamplingPolicy(tsp.pendingPolicy) // Empty pending regardless of error. If policy is invalid, it will fail on // every tick, no need to do extra work and flood the log with errors. tsp.pendingPolicy = nil if err != nil { tsp.logger.Error("Failed to load pending sampling policy", zap.Error(err)) tsp.logger.Debug("Continuing to use the previously loaded sampling policy") } } func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() { tsp.logger.Debug("Sampling Policy Evaluation ticked") tsp.loadPendingSamplingPolicy() ctx := context.Background() metrics := policyMetrics{} startTime := time.Now() batch, _ := tsp.decisionBatcher.CloseCurrentAndTakeFirstBatch() batchLen := len(batch) for _, id := range batch { d, ok := tsp.idToTrace.Load(id) if !ok { metrics.idNotFoundOnMapCount++ continue } trace := d.(*sampling.TraceData) trace.DecisionTime = time.Now() decision := tsp.makeDecision(id, trace, &metrics) tsp.telemetry.ProcessorTailSamplingSamplingDecisionTimerLatency.Record(tsp.ctx, int64(time.Since(startTime)/time.Microsecond)) tsp.telemetry.ProcessorTailSamplingGlobalCountTracesSampled.Add(tsp.ctx, 1, decisionToAttribute[decision]) // Sampled or not, remove the batches trace.Lock() allSpans := trace.ReceivedBatches trace.FinalDecision = decision trace.ReceivedBatches = ptrace.NewTraces() trace.Unlock() switch decision { case sampling.Sampled: tsp.releaseSampledTrace(ctx, id, allSpans) case sampling.NotSampled: tsp.releaseNotSampledTrace(id) } } tsp.telemetry.ProcessorTailSamplingSamplingTracesOnMemory.Record(tsp.ctx, int64(tsp.numTracesOnMap.Load())) tsp.telemetry.ProcessorTailSamplingSamplingTraceDroppedTooEarly.Add(tsp.ctx, metrics.idNotFoundOnMapCount) tsp.telemetry.ProcessorTailSamplingSamplingPolicyEvaluationError.Add(tsp.ctx, metrics.evaluateErrorCount) tsp.logger.Debug("Sampling policy evaluation completed", zap.Int("batch.len", batchLen), zap.Int64("sampled", metrics.decisionSampled), zap.Int64("notSampled", metrics.decisionNotSampled), zap.Int64("droppedPriorToEvaluation", metrics.idNotFoundOnMapCount), zap.Int64("policyEvaluationErrors", metrics.evaluateErrorCount), ) } func (tsp *tailSamplingSpanProcessor) makeDecision(id pcommon.TraceID, trace *sampling.TraceData, metrics *policyMetrics) sampling.Decision { var decisions [8]bool ctx := context.Background() startTime := time.Now() // Check all policies before making a final decision. for _, p := range tsp.policies { decision, err := p.evaluator.Evaluate(ctx, id, trace) latency := time.Since(startTime) tsp.telemetry.ProcessorTailSamplingSamplingDecisionLatency.Record(ctx, int64(latency/time.Microsecond), p.attribute) if err != nil { decisions[sampling.Error] = true metrics.evaluateErrorCount++ tsp.logger.Debug("Sampling policy error", zap.Error(err)) continue } tsp.telemetry.ProcessorTailSamplingCountTracesSampled.Add(ctx, 1, p.attribute, decisionToAttribute[decision]) if telemetry.IsMetricStatCountSpansSampledEnabled() { tsp.telemetry.ProcessorTailSamplingCountSpansSampled.Add(ctx, trace.SpanCount.Load(), p.attribute, decisionToAttribute[decision]) } decisions[decision] = true } var finalDecision sampling.Decision switch { case decisions[sampling.InvertNotSampled]: // InvertNotSampled takes precedence finalDecision = sampling.NotSampled case decisions[sampling.Sampled]: finalDecision = sampling.Sampled case decisions[sampling.InvertSampled] && !decisions[sampling.NotSampled]: finalDecision = sampling.Sampled default: finalDecision = sampling.NotSampled } if finalDecision == sampling.Sampled { metrics.decisionSampled++ } else { metrics.decisionNotSampled++ } return finalDecision } // ConsumeTraces is required by the processor.Traces interface. func (tsp *tailSamplingSpanProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error { resourceSpans := td.ResourceSpans() for i := 0; i < resourceSpans.Len(); i++ { tsp.processTraces(resourceSpans.At(i)) } return nil } func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans ptrace.ResourceSpans) map[pcommon.TraceID][]spanAndScope { idToSpans := make(map[pcommon.TraceID][]spanAndScope) ilss := resourceSpans.ScopeSpans() for j := 0; j < ilss.Len(); j++ { scope := ilss.At(j) spans := scope.Spans() is := scope.Scope() spansLen := spans.Len() for k := 0; k < spansLen; k++ { span := spans.At(k) key := span.TraceID() idToSpans[key] = append(idToSpans[key], spanAndScope{ span: &span, instrumentationScope: &is, }) } } return idToSpans } func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans ptrace.ResourceSpans) { currTime := time.Now() // Group spans per their traceId to minimize contention on idToTrace idToSpansAndScope := tsp.groupSpansByTraceKey(resourceSpans) var newTraceIDs int64 for id, spans := range idToSpansAndScope { // If the trace ID is in the sampled cache, short circuit the decision if _, ok := tsp.sampledIDCache.Get(id); ok { tsp.logger.Debug("Trace ID is in the sampled cache", zap.Stringer("id", id)) traceTd := ptrace.NewTraces() appendToTraces(traceTd, resourceSpans, spans) tsp.releaseSampledTrace(tsp.ctx, id, traceTd) metric.WithAttributeSet(attribute.NewSet()) tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision. Add(tsp.ctx, int64(len(spans)), attrSampledTrue) continue } // If the trace ID is in the non-sampled cache, short circuit the decision if _, ok := tsp.nonSampledIDCache.Get(id); ok { tsp.logger.Debug("Trace ID is in the non-sampled cache", zap.Stringer("id", id)) tsp.telemetry.ProcessorTailSamplingEarlyReleasesFromCacheDecision. Add(tsp.ctx, int64(len(spans)), attrSampledFalse) continue } lenSpans := int64(len(spans)) d, loaded := tsp.idToTrace.Load(id) if !loaded { spanCount := &atomic.Int64{} spanCount.Store(lenSpans) td := &sampling.TraceData{ ArrivalTime: currTime, SpanCount: spanCount, ReceivedBatches: ptrace.NewTraces(), } if d, loaded = tsp.idToTrace.LoadOrStore(id, td); !loaded { newTraceIDs++ tsp.decisionBatcher.AddToCurrentBatch(id) tsp.numTracesOnMap.Add(1) postDeletion := false for !postDeletion { select { case tsp.deleteChan <- id: postDeletion = true default: traceKeyToDrop := <-tsp.deleteChan tsp.dropTrace(traceKeyToDrop, currTime) } } } } actualData := d.(*sampling.TraceData) if loaded { actualData.SpanCount.Add(lenSpans) } actualData.Lock() finalDecision := actualData.FinalDecision if finalDecision == sampling.Unspecified { // If the final decision hasn't been made, add the new spans under the lock. appendToTraces(actualData.ReceivedBatches, resourceSpans, spans) actualData.Unlock() continue } actualData.Unlock() switch finalDecision { case sampling.Sampled: traceTd := ptrace.NewTraces() appendToTraces(traceTd, resourceSpans, spans) tsp.releaseSampledTrace(tsp.ctx, id, traceTd) case sampling.NotSampled: tsp.releaseNotSampledTrace(id) default: tsp.logger.Warn("Unexpected sampling decision", zap.Int("decision", int(finalDecision))) } if !actualData.DecisionTime.IsZero() { tsp.telemetry.ProcessorTailSamplingSamplingLateSpanAge.Record(tsp.ctx, int64(time.Since(actualData.DecisionTime)/time.Second)) } } tsp.telemetry.ProcessorTailSamplingNewTraceIDReceived.Add(tsp.ctx, newTraceIDs) } func (tsp *tailSamplingSpanProcessor) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } // Start is invoked during service startup. func (tsp *tailSamplingSpanProcessor) Start(context.Context, component.Host) error { tsp.policyTicker.Start(tsp.tickerFrequency) return nil } // Shutdown is invoked during service shutdown. func (tsp *tailSamplingSpanProcessor) Shutdown(context.Context) error { tsp.decisionBatcher.Stop() tsp.policyTicker.Stop() return nil } func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pcommon.TraceID, deletionTime time.Time) { var trace *sampling.TraceData if d, ok := tsp.idToTrace.Load(traceID); ok { trace = d.(*sampling.TraceData) tsp.idToTrace.Delete(traceID) // Subtract one from numTracesOnMap per https://godoc.org/sync/atomic#AddUint64 tsp.numTracesOnMap.Add(^uint64(0)) } if trace == nil { tsp.logger.Debug("Attempt to delete trace ID not on table", zap.Stringer("id", traceID)) return } tsp.telemetry.ProcessorTailSamplingSamplingTraceRemovalAge.Record(tsp.ctx, int64(deletionTime.Sub(trace.ArrivalTime)/time.Second)) } // releaseSampledTrace sends the trace data to the next consumer. It // additionally adds the trace ID to the cache of sampled trace IDs. If the // trace ID is cached, it deletes the spans from the internal map. func (tsp *tailSamplingSpanProcessor) releaseSampledTrace(ctx context.Context, id pcommon.TraceID, td ptrace.Traces) { tsp.sampledIDCache.Put(id, true) if err := tsp.nextConsumer.ConsumeTraces(ctx, td); err != nil { tsp.logger.Warn( "Error sending spans to destination", zap.Error(err)) } _, ok := tsp.sampledIDCache.Get(id) if ok { tsp.dropTrace(id, time.Now()) } } // releaseNotSampledTrace adds the trace ID to the cache of not sampled trace // IDs. If the trace ID is cached, it deletes the spans from the internal map. func (tsp *tailSamplingSpanProcessor) releaseNotSampledTrace(id pcommon.TraceID) { tsp.nonSampledIDCache.Put(id, true) _, ok := tsp.nonSampledIDCache.Get(id) if ok { tsp.dropTrace(id, time.Now()) } } func appendToTraces(dest ptrace.Traces, rss ptrace.ResourceSpans, spanAndScopes []spanAndScope) { rs := dest.ResourceSpans().AppendEmpty() rss.Resource().CopyTo(rs.Resource()) scopePointerToNewScope := make(map[*pcommon.InstrumentationScope]*ptrace.ScopeSpans) for _, spanAndScope := range spanAndScopes { // If the scope of the spanAndScope is not in the map, add it to the map and the destination. if scope, ok := scopePointerToNewScope[spanAndScope.instrumentationScope]; !ok { is := rs.ScopeSpans().AppendEmpty() spanAndScope.instrumentationScope.CopyTo(is.Scope()) scopePointerToNewScope[spanAndScope.instrumentationScope] = &is sp := is.Spans().AppendEmpty() spanAndScope.span.CopyTo(sp) } else { sp := scope.Spans().AppendEmpty() spanAndScope.span.CopyTo(sp) } } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver" import ( "errors" "fmt" "net" "go.opentelemetry.io/collector/config/configtls" "go.uber.org/multierr" ) // Config holds all the parameters to start an HTTP server that can be sent logs from CloudFlare type Config struct { Logs LogsConfig `mapstructure:"logs"` } type LogsConfig struct { Secret string `mapstructure:"secret"` Endpoint string `mapstructure:"endpoint"` TLS *configtls.ServerConfig `mapstructure:"tls"` Attributes map[string]string `mapstructure:"attributes"` TimestampField string `mapstructure:"timestamp_field"` } var ( errNoEndpoint = errors.New("an endpoint must be specified") errNoCert = errors.New("tls was configured, but no cert file was specified") errNoKey = errors.New("tls was configured, but no key file was specified") defaultTimestampField = "EdgeStartTimestamp" ) func (c *Config) Validate() error { if c.Logs.Endpoint == "" { return errNoEndpoint } var errs error if c.Logs.TLS != nil { // Missing key if c.Logs.TLS.KeyFile == "" { errs = multierr.Append(errs, errNoKey) } // Missing cert if c.Logs.TLS.CertFile == "" { errs = multierr.Append(errs, errNoCert) } } _, _, err := net.SplitHostPort(c.Logs.Endpoint) if err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err)) } return errs }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata" ) // NewFactory returns the component factory for the cloudflarereceiver func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithLogs(createLogsReceiver, metadata.LogsStability), ) } func createLogsReceiver( _ context.Context, params receiver.Settings, rConf component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { cfg := rConf.(*Config) return newLogsReceiver(params, cfg, consumer) } func createDefaultConfig() component.Config { return &Config{ Logs: LogsConfig{ TimestampField: defaultTimestampField, }, } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package cloudflarereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver" import ( "bytes" "compress/gzip" "context" "encoding/json" "errors" "fmt" "io" "net" "net/http" "strconv" "sync" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" rcvr "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver/internal/metadata" ) type logsReceiver struct { logger *zap.Logger cfg *LogsConfig server *http.Server consumer consumer.Logs wg *sync.WaitGroup id component.ID // ID of the receiver component telemetrySettings component.TelemetrySettings } const secretHeaderName = "X-CF-Secret" func newLogsReceiver(params rcvr.Settings, cfg *Config, consumer consumer.Logs) (*logsReceiver, error) { recv := &logsReceiver{ cfg: &cfg.Logs, consumer: consumer, logger: params.Logger, wg: &sync.WaitGroup{}, telemetrySettings: params.TelemetrySettings, id: params.ID, } recv.server = &http.Server{ Handler: http.HandlerFunc(recv.handleRequest), ReadHeaderTimeout: 20 * time.Second, } if recv.cfg.TLS != nil { tlsConfig, err := recv.cfg.TLS.LoadTLSConfig(context.Background()) if err != nil { return nil, err } recv.server.TLSConfig = tlsConfig } return recv, nil } func (l *logsReceiver) Start(ctx context.Context, host component.Host) error { return l.startListening(ctx, host) } func (l *logsReceiver) Shutdown(ctx context.Context) error { l.logger.Debug("Shutting down server") err := l.server.Shutdown(ctx) if err != nil { return err } l.logger.Debug("Waiting for shutdown to complete.") l.wg.Wait() return nil } func (l *logsReceiver) startListening(ctx context.Context, host component.Host) error { l.logger.Debug("starting receiver HTTP server") // We use l.server.Serve* over l.server.ListenAndServe* // So that we can catch and return errors relating to binding to network interface on start. var lc net.ListenConfig listener, err := lc.Listen(ctx, "tcp", l.cfg.Endpoint) if err != nil { return err } l.wg.Add(1) go func() { defer l.wg.Done() if l.cfg.TLS != nil { l.logger.Debug("Starting ServeTLS", zap.String("address", l.cfg.Endpoint), zap.String("certfile", l.cfg.TLS.CertFile), zap.String("keyfile", l.cfg.TLS.KeyFile)) err := l.server.ServeTLS(listener, l.cfg.TLS.CertFile, l.cfg.TLS.KeyFile) l.logger.Debug("ServeTLS done") if !errors.Is(err, http.ErrServerClosed) { l.logger.Error("ServeTLS failed", zap.Error(err)) componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } } else { l.logger.Debug("Starting Serve", zap.String("address", l.cfg.Endpoint)) err := l.server.Serve(listener) l.logger.Debug("Serve done") if !errors.Is(err, http.ErrServerClosed) { l.logger.Error("Serve failed", zap.Error(err)) componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } } }() return nil } func (l *logsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) { if l.cfg.Secret != "" { secretHeader := req.Header.Get(secretHeaderName) if secretHeader == "" { rw.WriteHeader(http.StatusUnauthorized) l.logger.Debug("Got payload with no Secret when it was specified in config, dropping...") return } else if secretHeader != l.cfg.Secret { rw.WriteHeader(http.StatusUnauthorized) l.logger.Debug("Got payload with invalid Secret, dropping...") return } } var payload []byte if req.Header.Get("Content-Encoding") == "gzip" { reader, err := gzip.NewReader(req.Body) if err != nil { rw.WriteHeader(http.StatusUnprocessableEntity) l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err)) return } defer reader.Close() // Read the decompressed response body payload, err = io.ReadAll(reader) if err != nil { rw.WriteHeader(http.StatusUnprocessableEntity) l.logger.Debug("Got payload with gzip, but failed to read", zap.Error(err)) return } } else { var err error payload, err = io.ReadAll(req.Body) if err != nil { rw.WriteHeader(http.StatusUnprocessableEntity) l.logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr)) return } } if string(payload) == "test" { l.logger.Info("Received test request from Cloudflare") rw.WriteHeader(http.StatusOK) return } logs, err := parsePayload(payload) if err != nil { rw.WriteHeader(http.StatusUnprocessableEntity) l.logger.Error("Failed to convert cloudflare request payload to maps", zap.Error(err)) return } if err := l.consumer.ConsumeLogs(req.Context(), l.processLogs(pcommon.NewTimestampFromTime(time.Now()), logs)); err != nil { errorutil.HTTPError(rw, err) l.logger.Error("Failed to consumer alert as log", zap.Error(err)) return } rw.WriteHeader(http.StatusOK) } func parsePayload(payload []byte) ([]map[string]any, error) { lines := bytes.Split(payload, []byte("\n")) logs := make([]map[string]any, 0, len(lines)) for _, line := range lines { if len(line) == 0 { continue } var log map[string]any err := json.Unmarshal(line, &log) if err != nil { return logs, err } logs = append(logs, log) } return logs, nil } func (l *logsReceiver) processLogs(now pcommon.Timestamp, logs []map[string]any) plog.Logs { pLogs := plog.NewLogs() // Group logs by ZoneName field if it was configured so it can be used as a resource attribute groupedLogs := make(map[string][]map[string]any) for _, log := range logs { zone := "" if v, ok := log["ZoneName"]; ok { if stringV, ok := v.(string); ok { zone = stringV } } groupedLogs[zone] = append(groupedLogs[zone], log) } for zone, logGroup := range groupedLogs { resourceLogs := pLogs.ResourceLogs().AppendEmpty() if zone != "" { resource := resourceLogs.Resource() resource.Attributes().PutStr("cloudflare.zone", zone) } scopeLogs := resourceLogs.ScopeLogs().AppendEmpty() scopeLogs.Scope().SetName(metadata.ScopeName) for _, log := range logGroup { logRecord := scopeLogs.LogRecords().AppendEmpty() logRecord.SetObservedTimestamp(now) if v, ok := log[l.cfg.TimestampField]; ok { if stringV, ok := v.(string); ok { ts, err := time.Parse(time.RFC3339, stringV) if err != nil { l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Error(err), zap.String("value", stringV)) } else { logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts)) } } else { l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Any("value", v)) } } if v, ok := log["EdgeResponseStatus"]; ok { sev := plog.SeverityNumberUnspecified switch v := v.(type) { case string: intV, err := strconv.ParseInt(v, 10, 64) if err != nil { l.logger.Warn("unable to parse EdgeResponseStatus", zap.Error(err), zap.String("value", v)) } else { sev = severityFromStatusCode(intV) } case int64: sev = severityFromStatusCode(v) case float64: sev = severityFromStatusCode(int64(v)) } if sev != plog.SeverityNumberUnspecified { logRecord.SetSeverityNumber(sev) logRecord.SetSeverityText(sev.String()) } } attrs := logRecord.Attributes() for field, attribute := range l.cfg.Attributes { if v, ok := log[field]; ok { switch v := v.(type) { case string: attrs.PutStr(attribute, v) case int: attrs.PutInt(attribute, int64(v)) case int64: attrs.PutInt(attribute, v) case float64: attrs.PutDouble(attribute, v) case bool: attrs.PutBool(attribute, v) default: l.logger.Warn("unable to translate field to attribute, unsupported type", zap.String("field", field), zap.Any("value", v), zap.String("type", fmt.Sprintf("%T", v))) } } } err := logRecord.Body().SetEmptyMap().FromRaw(log) if err != nil { l.logger.Warn("unable to set body", zap.Error(err)) } } } return pLogs } // severityFromStatusCode translates HTTP status code to OpenTelemetry severity number. func severityFromStatusCode(statusCode int64) plog.SeverityNumber { switch { case statusCode < 300: return plog.SeverityNumberInfo case statusCode < 400: return plog.SeverityNumberInfo2 case statusCode < 500: return plog.SeverityNumberWarn case statusCode < 600: return plog.SeverityNumberError default: return plog.SeverityNumberUnspecified } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal" import ( "compress/flate" "compress/gzip" "fmt" "io" "math" "mime" "net/http" "github.com/grafana/loki/pkg/push" ) var ( contentType = http.CanonicalHeaderKey("Content-Type") contentEnc = http.CanonicalHeaderKey("Content-Encoding") ) const applicationJSON = "application/json" func ParseRequest(req *http.Request) (*push.PushRequest, error) { var body io.Reader contentEncoding := req.Header.Get(contentEnc) switch contentEncoding { case "", "snappy": body = req.Body case "gzip": gzipReader, err := gzip.NewReader(req.Body) if err != nil { return nil, err } defer gzipReader.Close() body = gzipReader case "deflate": flateReader := flate.NewReader(req.Body) defer flateReader.Close() body = flateReader default: return nil, fmt.Errorf("Content-Encoding %q not supported", contentEncoding) } var pushRequest push.PushRequest reqContentType := req.Header.Get(contentType) reqContentType, _ /* params */, err := mime.ParseMediaType(reqContentType) if err != nil { return nil, err } switch reqContentType { case applicationJSON: if err = decodePushRequest(body, &pushRequest); err != nil { return nil, err } default: // When no content-type header is set or when it is set to // `application/x-protobuf`: expect snappy compression. if err := parseProtoReader(body, int(req.ContentLength), math.MaxInt32, &pushRequest); err != nil { return nil, err } return &pushRequest, nil } return &pushRequest, nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal" import ( "io" "sort" "strconv" "strings" "time" "unsafe" "github.com/buger/jsonparser" "github.com/grafana/loki/pkg/push" jsoniter "github.com/json-iterator/go" ) // PushRequest models a log stream push but is unmarshalled to proto push format. type PushRequest struct { Streams []Stream `json:"streams"` } // Stream helps with unmarshalling of each log stream for push request. type Stream push.Stream func (s *Stream) UnmarshalJSON(data []byte) error { err := jsonparser.ObjectEach(data, func(key, val []byte, ty jsonparser.ValueType, _ int) error { switch string(key) { case "stream": var labels LabelSet if err := labels.UnmarshalJSON(val); err != nil { return err } s.Labels = labels.String() case "values": if ty == jsonparser.Null { return nil } entries, err := unmarshalHTTPToLogProtoEntries(val) if err != nil { return err } s.Entries = entries } return nil }) return err } func unmarshalHTTPToLogProtoEntries(data []byte) ([]push.Entry, error) { var ( entries []push.Entry parseError error ) if _, err := jsonparser.ArrayEach(data, func(value []byte, ty jsonparser.ValueType, _ int, err error) { if err != nil || parseError != nil { return } if ty == jsonparser.Null { return } e, err := unmarshalHTTPToLogProtoEntry(value) if err != nil { parseError = err return } entries = append(entries, e) }); err != nil { parseError = err } if parseError != nil { return nil, parseError } return entries, nil } func unmarshalHTTPToLogProtoEntry(data []byte) (push.Entry, error) { var ( i int parseError error e push.Entry ) _, err := jsonparser.ArrayEach(data, func(value []byte, t jsonparser.ValueType, _ int, _ error) { // assert that both items in array are of type string if (i == 0 || i == 1) && t != jsonparser.String { parseError = jsonparser.MalformedStringError return } else if i == 2 && t != jsonparser.Object { parseError = jsonparser.MalformedObjectError return } switch i { case 0: // timestamp ts, err := jsonparser.ParseInt(value) if err != nil { parseError = err return } e.Timestamp = time.Unix(0, ts) case 1: // value v, err := jsonparser.ParseString(value) if err != nil { parseError = err return } e.Line = v case 2: // structuredMetadata var structuredMetadata []push.LabelAdapter err := jsonparser.ObjectEach(value, func(key, val []byte, dataType jsonparser.ValueType, _ int) error { if dataType != jsonparser.String { return jsonparser.MalformedStringError } structuredMetadata = append(structuredMetadata, push.LabelAdapter{ Name: string(key), Value: string(val), }) return nil }) if err != nil { parseError = err return } e.StructuredMetadata = structuredMetadata } i++ }) if parseError != nil { return e, parseError } return e, err } // LabelSet is a key/value pair mapping of labels type LabelSet map[string]string func (l *LabelSet) UnmarshalJSON(data []byte) error { if *l == nil { *l = make(LabelSet) } return jsonparser.ObjectEach(data, func(key, val []byte, _ jsonparser.ValueType, _ int) error { v, err := jsonparser.ParseString(val) if err != nil { return err } k, err := jsonparser.ParseString(key) if err != nil { return err } (*l)[k] = v return nil }) } // String implements the Stringer interface. It returns a formatted/sorted set of label key/value pairs. func (l LabelSet) String() string { var b strings.Builder keys := make([]string, 0, len(l)) for k := range l { keys = append(keys, k) } sort.Strings(keys) b.WriteByte('{') for i, k := range keys { if i > 0 { b.WriteByte(',') b.WriteByte(' ') } b.WriteString(k) b.WriteByte('=') b.WriteString(strconv.Quote(l[k])) } b.WriteByte('}') return b.String() } // decodePushRequest directly decodes json to a push.PushRequest func decodePushRequest(b io.Reader, r *push.PushRequest) error { var request PushRequest if err := jsoniter.NewDecoder(b).Decode(&request); err != nil { return err } *r = push.PushRequest{ Streams: *(*[]push.Stream)(unsafe.Pointer(&request.Streams)), } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver/internal" import ( "bytes" "fmt" "io" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" ) const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)" // parseProtoReader parses a compressed proto from an io.Reader. func parseProtoReader(reader io.Reader, expectedSize, maxSize int, req proto.Message) error { body, err := decompressRequest(reader, expectedSize, maxSize) if err != nil { return err } // We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first, // which we can't override without upsetting golint. req.Reset() if u, ok := req.(proto.Unmarshaler); ok { err = u.Unmarshal(body) } else { err = proto.NewBuffer(body).Unmarshal(req) } if err != nil { return err } return nil } func decompressRequest(reader io.Reader, expectedSize, maxSize int) (body []byte, err error) { defer func() { if err != nil && len(body) > maxSize { err = fmt.Errorf(messageSizeLargerErrFmt, len(body), maxSize) } }() if expectedSize > maxSize { return nil, fmt.Errorf(messageSizeLargerErrFmt, expectedSize, maxSize) } buffer, ok := tryBufferFromReader(reader) if ok { body, err = decompressFromBuffer(buffer, maxSize) return } body, err = decompressFromReader(reader, expectedSize, maxSize) return } func decompressFromReader(reader io.Reader, expectedSize, maxSize int) ([]byte, error) { var ( buf bytes.Buffer body []byte err error ) if expectedSize > 0 { buf.Grow(expectedSize + bytes.MinRead) // extra space guarantees no reallocation } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. reader = io.LimitReader(reader, int64(maxSize)+1) _, err = buf.ReadFrom(reader) if err != nil { return nil, err } body, err = decompressFromBuffer(&buf, maxSize) return body, err } func decompressFromBuffer(buffer *bytes.Buffer, maxSize int) ([]byte, error) { if len(buffer.Bytes()) > maxSize { return nil, fmt.Errorf(messageSizeLargerErrFmt, len(buffer.Bytes()), maxSize) } size, err := snappy.DecodedLen(buffer.Bytes()) if err != nil { return nil, err } if size > maxSize { return nil, fmt.Errorf(messageSizeLargerErrFmt, size, maxSize) } body, err := snappy.Decode(nil, buffer.Bytes()) if err != nil { return nil, err } return body, nil } // tryBufferFromReader attempts to cast the reader to a `*bytes.Buffer` this is possible when using httpgrpc. // If it fails it will return nil and false. func tryBufferFromReader(reader io.Reader) (*bytes.Buffer, bool) { if bufReader, ok := reader.(interface { BytesBuffer() *bytes.Buffer }); ok && bufReader != nil { return bufReader.BytesBuffer(), true } return nil, false }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "encoding/json" "fmt" "sync" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/extension/xextension/storage" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" rcvr "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" ) const ( accessLogStorageKey = "last_endtime_access_logs_%s" defaultAccessLogsPollInterval = 5 * time.Minute defaultAccessLogsPageSize = 20000 defaultAccessLogsMaxPages = 10 ) type accessLogStorageRecord struct { ClusterName string `json:"cluster_name"` NextPollStartTime time.Time `json:"next_poll_start_time"` } type accessLogClient interface { GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error) GetClusters(ctx context.Context, groupID string) ([]mongodbatlas.Cluster, error) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *internal.GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) } type accessLogsReceiver struct { client accessLogClient logger *zap.Logger storageClient storage.Client cfg *Config consumer consumer.Logs record map[string][]*accessLogStorageRecord authResult *bool wg *sync.WaitGroup cancel context.CancelFunc } func newAccessLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *accessLogsReceiver { r := &accessLogsReceiver{ cancel: func() {}, client: internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger), cfg: cfg, logger: settings.Logger, consumer: consumer, wg: &sync.WaitGroup{}, storageClient: storage.NewNopClient(), record: make(map[string][]*accessLogStorageRecord), } for _, p := range cfg.Logs.Projects { p.populateIncludesAndExcludes() if p.AccessLogs != nil && p.AccessLogs.IsEnabled() { if p.AccessLogs.PageSize <= 0 { p.AccessLogs.PageSize = defaultAccessLogsPageSize } if p.AccessLogs.MaxPages <= 0 { p.AccessLogs.MaxPages = defaultAccessLogsMaxPages } if p.AccessLogs.PollInterval == 0 { p.AccessLogs.PollInterval = defaultAccessLogsPollInterval } } } return r } func (alr *accessLogsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error { alr.logger.Debug("Starting up access log receiver") cancelCtx, cancel := context.WithCancel(ctx) alr.cancel = cancel alr.storageClient = storageClient return alr.startPolling(cancelCtx) } func (alr *accessLogsReceiver) Shutdown(_ context.Context) error { alr.logger.Debug("Shutting down accessLog receiver") alr.cancel() alr.wg.Wait() return nil } func (alr *accessLogsReceiver) startPolling(ctx context.Context) error { for _, pc := range alr.cfg.Logs.Projects { if pc.AccessLogs == nil || !pc.AccessLogs.IsEnabled() { continue } t := time.NewTicker(pc.AccessLogs.PollInterval) alr.wg.Add(1) go func() { defer alr.wg.Done() for { select { case <-t.C: if err := alr.pollAccessLogs(ctx, pc); err != nil { alr.logger.Error("error while polling for accessLog", zap.Error(err)) } case <-ctx.Done(): return } } }() } return nil } func (alr *accessLogsReceiver) pollAccessLogs(ctx context.Context, pc *LogsProjectConfig) error { st := pcommon.NewTimestampFromTime(time.Now().Add(-1 * pc.AccessLogs.PollInterval)).AsTime() et := time.Now() project, err := alr.client.GetProject(ctx, pc.Name) if err != nil { alr.logger.Error("error retrieving project information", zap.Error(err), zap.String("project", pc.Name)) return err } alr.loadCheckpoint(ctx, project.ID) clusters, err := alr.client.GetClusters(ctx, project.ID) if err != nil { alr.logger.Error("error retrieving cluster information", zap.Error(err), zap.String("project", pc.Name)) return err } filteredClusters, err := filterClusters(clusters, pc.ProjectConfig) if err != nil { alr.logger.Error("error filtering clusters", zap.Error(err), zap.String("project", pc.Name)) return err } for _, cluster := range filteredClusters { clusterCheckpoint := alr.getClusterCheckpoint(project.ID, cluster.Name) if clusterCheckpoint == nil { clusterCheckpoint = &accessLogStorageRecord{ ClusterName: cluster.Name, NextPollStartTime: st, } alr.setClusterCheckpoint(project.ID, clusterCheckpoint) } clusterCheckpoint.NextPollStartTime = alr.pollCluster(ctx, pc, project, cluster, clusterCheckpoint.NextPollStartTime, et) if err = alr.checkpoint(ctx, project.ID); err != nil { alr.logger.Warn("error checkpointing", zap.Error(err), zap.String("project", pc.Name)) } } return nil } func (alr *accessLogsReceiver) pollCluster(ctx context.Context, pc *LogsProjectConfig, project *mongodbatlas.Project, cluster mongodbatlas.Cluster, startTime, now time.Time) time.Time { nowTimestamp := pcommon.NewTimestampFromTime(now) opts := &internal.GetAccessLogsOptions{ MaxDate: now, MinDate: startTime, AuthResult: alr.authResult, NLogs: int(pc.AccessLogs.PageSize), } pageCount := 0 // Assume failure, in which case we poll starting with the same startTime // unless we successfully make request(s) for access logs and they are successfully sent to the consumer nextPollStartTime := startTime for { accessLogs, err := alr.client.GetAccessLogs(ctx, project.ID, cluster.Name, opts) pageCount++ if err != nil { alr.logger.Error("unable to get access logs", zap.Error(err), zap.String("project", project.Name), zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name)) return nextPollStartTime } // No logs retrieved, try again on next interval with the same start time as the API may not have // all logs for the given time available to be queried yet (undocumented behavior) if len(accessLogs) == 0 { return nextPollStartTime } logs := transformAccessLogs(nowTimestamp, accessLogs, project, cluster, alr.logger) if err = alr.consumer.ConsumeLogs(ctx, logs); err != nil { alr.logger.Error("error consuming project cluster log", zap.Error(err), zap.String("project", project.Name), zap.String("clusterID", cluster.ID), zap.String("clusterName", cluster.Name)) return nextPollStartTime } // The first page of results will have the latest data, so we want to update the nextPollStartTime // There is risk of data loss at this point if we are unable to then process the remaining pages // of data, but that is a limitation of the API that we can't work around. if pageCount == 1 { // This slice access is safe as we have previously confirmed that the slice is not empty mostRecentLogTimestamp, tsErr := getTimestamp(accessLogs[0]) if tsErr != nil { alr.logger.Error("error getting latest log timestamp for calculating next poll timestamps", zap.Error(tsErr), zap.String("project", project.Name), zap.String("clusterName", cluster.Name)) // If we are not able to get the latest log timestamp, we have to assume that we are collecting all // data and don't want to risk duplicated data by re-polling the same data again. nextPollStartTime = now } else { nextPollStartTime = mostRecentLogTimestamp.Add(100 * time.Millisecond) } } // If we get back less than the maximum number of logs, we can assume that we've retrieved all of the logs // that are currently available for this time period, though some logs may not be available in the API yet. if len(accessLogs) < int(pc.AccessLogs.PageSize) { return nextPollStartTime } if pageCount >= int(pc.AccessLogs.MaxPages) { alr.logger.Warn(`reached maximum number of pages of access logs, increase 'max_pages' or frequency of 'poll_interval' to ensure all access logs are retrieved`, zap.Int("maxPages", int(pc.AccessLogs.MaxPages))) return nextPollStartTime } // If we get back the maximum number of logs, we need to re-query with a new end time. While undocumented, the API // returns the most recent logs first. If we get the maximum number of logs back, we can assume that // there are more logs to be retrieved. We'll re-query with the same start time, but the end // time set to just before the timestamp of the oldest log entry returned. oldestLogTimestampFromPage, err := getTimestamp(accessLogs[len(accessLogs)-1]) if err != nil { alr.logger.Error("error getting oldest log timestamp for calculating next request timestamps", zap.Error(err), zap.String("project", project.Name), zap.String("clusterName", cluster.Name)) return nextPollStartTime } opts.MaxDate = oldestLogTimestampFromPage.Add(-1 * time.Millisecond) // If the new max date is before the min date, we've retrieved all of the logs for this time period // and receiving the maximum number of logs back is a coincidence. if opts.MaxDate.Before(opts.MinDate) { break } } return now } func getTimestamp(log *mongodbatlas.AccessLogs) (time.Time, error) { body, err := parseLogMessage(log) if err != nil { // If body couldn't be parsed, we'll still use the outer Timestamp field to determine the new max date. body = map[string]any{} } return getTimestampPreparsedBody(log, body) } func getTimestampPreparsedBody(log *mongodbatlas.AccessLogs, body map[string]any) (time.Time, error) { // If the log message has a timestamp, use that. When present, it has more precision than the timestamp from the access log entry. if tMap, ok := body["t"]; ok { if dateMap, ok := tMap.(map[string]any); ok { if v, ok := dateMap["$date"]; ok { if dateStr, ok := v.(string); ok { return time.Parse(time.RFC3339, dateStr) } } } } // If the log message doesn't have a timestamp, use the timestamp from the outer access log entry. t, err := time.Parse(time.RFC3339, log.Timestamp) if err != nil { // The documentation claims ISO8601/RFC3339, but the API has been observed returning timestamps in UnixDate format // UnixDate looks like Wed Apr 26 02:38:56 GMT 2023 unixDate, err2 := time.Parse(time.UnixDate, log.Timestamp) if err2 != nil { // Return the original error as the documentation claims ISO8601 return time.Time{}, err } return unixDate, nil } return t, nil } func parseLogMessage(log *mongodbatlas.AccessLogs) (map[string]any, error) { var body map[string]any if err := json.Unmarshal([]byte(log.LogLine), &body); err != nil { return nil, err } return body, nil } func transformAccessLogs(now pcommon.Timestamp, accessLogs []*mongodbatlas.AccessLogs, p *mongodbatlas.Project, c mongodbatlas.Cluster, logger *zap.Logger) plog.Logs { logs := plog.NewLogs() resourceLogs := logs.ResourceLogs().AppendEmpty() ra := resourceLogs.Resource().Attributes() ra.PutStr("mongodbatlas.project.name", p.Name) ra.PutStr("mongodbatlas.project.id", p.ID) ra.PutStr("mongodbatlas.region.name", c.ProviderSettings.RegionName) ra.PutStr("mongodbatlas.provider.name", c.ProviderSettings.ProviderName) ra.PutStr("mongodbatlas.org.id", p.OrgID) ra.PutStr("mongodbatlas.cluster.name", c.Name) // Expected format documented https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Access-Tracking/operation/listAccessLogsByClusterName logRecords := resourceLogs.ScopeLogs().AppendEmpty().LogRecords() for _, accessLog := range accessLogs { logRecord := logRecords.AppendEmpty() logBody, err := parseLogMessage(accessLog) if err != nil { logger.Error("unable to unmarshal access log into body string", zap.Error(err)) continue } err = logRecord.Body().SetEmptyMap().FromRaw(logBody) if err != nil { logger.Error("unable to set log record body as map", zap.Error(err)) logRecord.Body().SetStr(accessLog.LogLine) } ts, err := getTimestampPreparsedBody(accessLog, logBody) if err != nil { logger.Warn("unable to interpret when an access log event was recorded, timestamp not parsed", zap.Error(err), zap.String("timestamp", accessLog.Timestamp)) logRecord.SetTimestamp(now) } else { logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts)) } logRecord.SetObservedTimestamp(now) attrs := logRecord.Attributes() attrs.PutStr("event.domain", "mongodbatlas") logRecord.SetSeverityNumber(plog.SeverityNumberInfo) logRecord.SetSeverityText(plog.SeverityNumberInfo.String()) if accessLog.AuthResult != nil { status := "success" if !*accessLog.AuthResult { logRecord.SetSeverityNumber(plog.SeverityNumberWarn) logRecord.SetSeverityText(plog.SeverityNumberWarn.String()) status = "failure" } attrs.PutStr("auth.result", status) } if accessLog.FailureReason != "" { attrs.PutStr("auth.failure_reason", accessLog.FailureReason) } attrs.PutStr("auth.source", accessLog.AuthSource) attrs.PutStr("username", accessLog.Username) attrs.PutStr("hostname", accessLog.Hostname) attrs.PutStr("remote.ip", accessLog.IPAddress) } return logs } func accessLogsCheckpointKey(groupID string) string { return fmt.Sprintf(accessLogStorageKey, groupID) } func (alr *accessLogsReceiver) checkpoint(ctx context.Context, groupID string) error { marshalBytes, err := json.Marshal(alr.record) if err != nil { return fmt.Errorf("unable to write checkpoint: %w", err) } return alr.storageClient.Set(ctx, accessLogsCheckpointKey(groupID), marshalBytes) } func (alr *accessLogsReceiver) loadCheckpoint(ctx context.Context, groupID string) { cBytes, err := alr.storageClient.Get(ctx, accessLogsCheckpointKey(groupID)) if err != nil { alr.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err)) if _, ok := alr.record[groupID]; !ok { alr.record[groupID] = []*accessLogStorageRecord{} } return } if cBytes == nil { if _, ok := alr.record[groupID]; !ok { alr.record[groupID] = []*accessLogStorageRecord{} } return } var record []*accessLogStorageRecord if err = json.Unmarshal(cBytes, &record); err != nil { alr.logger.Error("unable to decode stored record for access logs, continuing without a checkpoint", zap.Error(err)) if _, ok := alr.record[groupID]; !ok { alr.record[groupID] = []*accessLogStorageRecord{} } } } func (alr *accessLogsReceiver) getClusterCheckpoint(groupID, clusterName string) *accessLogStorageRecord { for key, value := range alr.record { if key == groupID { for _, v := range value { if v.ClusterName == clusterName { return v } } } } return nil } func (alr *accessLogsReceiver) setClusterCheckpoint(groupID string, clusterCheckpoint *accessLogStorageRecord) { groupCheckpoints, ok := alr.record[groupID] if !ok { alr.record[groupID] = []*accessLogStorageRecord{clusterCheckpoint} } var found bool for idx, v := range groupCheckpoints { if v.ClusterName == clusterCheckpoint.ClusterName { found = true alr.record[groupID][idx] = clusterCheckpoint } } if !found { alr.record[groupID] = append(alr.record[groupID], clusterCheckpoint) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "bytes" "context" "crypto/hmac" "crypto/sha1" // #nosec G505 -- SHA1 is the algorithm mongodbatlas uses, it must be used to calculate the HMAC signature "crypto/tls" "encoding/base64" "encoding/json" "errors" "fmt" "io" "net" "net/http" "strconv" "sync" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/extension/xextension/storage" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" rcvr "go.opentelemetry.io/collector/receiver" "go.uber.org/multierr" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model" ) // maxContentLength is the maximum payload size we will accept from incoming requests. // Requests are generally ~1000 bytes, so we overshoot that by an order of magnitude. // This is to protect from overly large requests. const ( maxContentLength int64 = 16384 signatureHeaderName string = "X-MMS-Signature" alertModeListen = "listen" alertModePoll = "poll" alertCacheKey = "last_recorded_alert" defaultAlertsPollInterval = 5 * time.Minute // defaults were based off API docs https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/ defaultAlertsPageSize = 100 defaultAlertsMaxPages = 10 ) type alertsClient interface { GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error) GetAlerts(ctx context.Context, groupID string, opts *internal.AlertPollOptions) ([]mongodbatlas.Alert, bool, error) } type alertsReceiver struct { addr string secret string server *http.Server mode string tlsSettings *configtls.ServerConfig consumer consumer.Logs wg *sync.WaitGroup // only relevant in `poll` mode projects []*ProjectConfig client alertsClient privateKey string publicKey string backoffConfig configretry.BackOffConfig pollInterval time.Duration record *alertRecord pageSize int64 maxPages int64 doneChan chan bool storageClient storage.Client telemetrySettings component.TelemetrySettings } func newAlertsReceiver(params rcvr.Settings, baseConfig *Config, consumer consumer.Logs) (*alertsReceiver, error) { cfg := baseConfig.Alerts var tlsConfig *tls.Config if cfg.TLS != nil { var err error tlsConfig, err = cfg.TLS.LoadTLSConfig(context.Background()) if err != nil { return nil, err } } for _, p := range cfg.Projects { p.populateIncludesAndExcludes() } recv := &alertsReceiver{ addr: cfg.Endpoint, secret: string(cfg.Secret), tlsSettings: cfg.TLS, consumer: consumer, mode: cfg.Mode, projects: cfg.Projects, backoffConfig: baseConfig.BackOffConfig, publicKey: baseConfig.PublicKey, privateKey: string(baseConfig.PrivateKey), wg: &sync.WaitGroup{}, pollInterval: baseConfig.Alerts.PollInterval, maxPages: baseConfig.Alerts.MaxPages, pageSize: baseConfig.Alerts.PageSize, doneChan: make(chan bool, 1), telemetrySettings: params.TelemetrySettings, } if recv.mode == alertModePoll { recv.client = internal.NewMongoDBAtlasClient(recv.publicKey, recv.privateKey, recv.backoffConfig, recv.telemetrySettings.Logger) return recv, nil } s := &http.Server{ TLSConfig: tlsConfig, Handler: http.HandlerFunc(recv.handleRequest), ReadHeaderTimeout: 20 * time.Second, } recv.server = s return recv, nil } func (a *alertsReceiver) Start(ctx context.Context, host component.Host, storageClient storage.Client) error { if a.mode == alertModePoll { return a.startPolling(ctx, storageClient) } return a.startListening(ctx, host) } func (a *alertsReceiver) startPolling(ctx context.Context, storageClient storage.Client) error { a.telemetrySettings.Logger.Debug("starting alerts receiver in retrieval mode") a.storageClient = storageClient err := a.syncPersistence(ctx) if err != nil { a.telemetrySettings.Logger.Error("there was an error syncing the receiver with checkpoint", zap.Error(err)) } t := time.NewTicker(a.pollInterval) a.wg.Add(1) go func() { defer a.wg.Done() for { select { case <-t.C: if err := a.retrieveAndProcessAlerts(ctx); err != nil { a.telemetrySettings.Logger.Error("unable to retrieve alerts", zap.Error(err)) } case <-a.doneChan: return case <-ctx.Done(): return } } }() return nil } func (a *alertsReceiver) retrieveAndProcessAlerts(ctx context.Context) error { for _, p := range a.projects { project, err := a.client.GetProject(ctx, p.Name) if err != nil { a.telemetrySettings.Logger.Error("error retrieving project "+p.Name+":", zap.Error(err)) continue } a.pollAndProcess(ctx, p, project) } return a.writeCheckpoint(ctx) } func (a *alertsReceiver) pollAndProcess(ctx context.Context, pc *ProjectConfig, project *mongodbatlas.Project) { for pageNum := 1; pageNum <= int(a.maxPages); pageNum++ { projectAlerts, hasNext, err := a.client.GetAlerts(ctx, project.ID, &internal.AlertPollOptions{ PageNum: pageNum, PageSize: int(a.pageSize), }) if err != nil { a.telemetrySettings.Logger.Error("unable to get alerts for project", zap.Error(err)) break } filteredAlerts := a.applyFilters(pc, projectAlerts) now := pcommon.NewTimestampFromTime(time.Now()) logs, err := a.convertAlerts(now, filteredAlerts, project) if err != nil { a.telemetrySettings.Logger.Error("error processing alerts", zap.Error(err)) break } if logs.LogRecordCount() > 0 { if err = a.consumer.ConsumeLogs(ctx, logs); err != nil { a.telemetrySettings.Logger.Error("error consuming alerts", zap.Error(err)) break } } if !hasNext { break } } } func (a *alertsReceiver) startListening(ctx context.Context, host component.Host) error { a.telemetrySettings.Logger.Debug("starting alerts receiver in listening mode") // We use a.server.Serve* over a.server.ListenAndServe* // So that we can catch and return errors relating to binding to network interface on start. var lc net.ListenConfig l, err := lc.Listen(ctx, "tcp", a.addr) if err != nil { return err } a.wg.Add(1) if a.tlsSettings != nil { go func() { defer a.wg.Done() a.telemetrySettings.Logger.Debug("Starting ServeTLS", zap.String("address", a.addr), zap.String("certfile", a.tlsSettings.CertFile), zap.String("keyfile", a.tlsSettings.KeyFile)) err := a.server.ServeTLS(l, a.tlsSettings.CertFile, a.tlsSettings.KeyFile) a.telemetrySettings.Logger.Debug("Serve TLS done") if err != http.ErrServerClosed { a.telemetrySettings.Logger.Error("ServeTLS failed", zap.Error(err)) componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() } else { go func() { defer a.wg.Done() a.telemetrySettings.Logger.Debug("Starting Serve", zap.String("address", a.addr)) err := a.server.Serve(l) a.telemetrySettings.Logger.Debug("Serve done") if err != http.ErrServerClosed { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() } return nil } func (a *alertsReceiver) handleRequest(rw http.ResponseWriter, req *http.Request) { if req.ContentLength < 0 { rw.WriteHeader(http.StatusLengthRequired) a.telemetrySettings.Logger.Debug("Got request with no Content-Length specified", zap.String("remote", req.RemoteAddr)) return } if req.ContentLength > maxContentLength { rw.WriteHeader(http.StatusRequestEntityTooLarge) a.telemetrySettings.Logger.Debug("Got request with large Content-Length specified", zap.String("remote", req.RemoteAddr), zap.Int64("content-length", req.ContentLength), zap.Int64("max-content-length", maxContentLength)) return } payloadSigHeader := req.Header.Get(signatureHeaderName) if payloadSigHeader == "" { rw.WriteHeader(http.StatusBadRequest) a.telemetrySettings.Logger.Debug("Got payload with no HMAC signature, dropping...") return } payload := make([]byte, req.ContentLength) _, err := io.ReadFull(req.Body, payload) if err != nil { rw.WriteHeader(http.StatusBadRequest) a.telemetrySettings.Logger.Debug("Failed to read alerts payload", zap.Error(err), zap.String("remote", req.RemoteAddr)) return } if err = verifyHMACSignature(a.secret, payload, payloadSigHeader); err != nil { rw.WriteHeader(http.StatusBadRequest) a.telemetrySettings.Logger.Debug("Got payload with invalid HMAC signature, dropping...", zap.Error(err), zap.String("remote", req.RemoteAddr)) return } logs, err := payloadToLogs(time.Now(), payload) if err != nil { rw.WriteHeader(http.StatusBadRequest) a.telemetrySettings.Logger.Error("Failed to convert log payload to log record", zap.Error(err)) return } if err := a.consumer.ConsumeLogs(req.Context(), logs); err != nil { rw.WriteHeader(http.StatusInternalServerError) a.telemetrySettings.Logger.Error("Failed to consumer alert as log", zap.Error(err)) return } rw.WriteHeader(http.StatusOK) } func (a *alertsReceiver) Shutdown(ctx context.Context) error { if a.mode == alertModePoll { return a.shutdownPoller(ctx) } return a.shutdownListener(ctx) } func (a *alertsReceiver) shutdownListener(ctx context.Context) error { a.telemetrySettings.Logger.Debug("Shutting down server") err := a.server.Shutdown(ctx) if err != nil { return err } a.telemetrySettings.Logger.Debug("Waiting for shutdown to complete.") a.wg.Wait() return nil } func (a *alertsReceiver) shutdownPoller(ctx context.Context) error { a.telemetrySettings.Logger.Debug("Shutting down client") close(a.doneChan) a.wg.Wait() return a.writeCheckpoint(ctx) } func (a *alertsReceiver) convertAlerts(now pcommon.Timestamp, alerts []mongodbatlas.Alert, project *mongodbatlas.Project) (plog.Logs, error) { logs := plog.NewLogs() var errs error for i := range alerts { alert := alerts[i] resourceLogs := logs.ResourceLogs().AppendEmpty() resourceAttrs := resourceLogs.Resource().Attributes() resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID) resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID) resourceAttrs.PutStr("mongodbatlas.org.id", project.OrgID) resourceAttrs.PutStr("mongodbatlas.project.name", project.Name) putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", &alert.ClusterName) putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", &alert.ReplicaSetName) logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() logRecord.SetObservedTimestamp(now) ts, err := time.Parse(time.RFC3339, alert.Updated) if err != nil { a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated)) continue } logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts)) logRecord.SetSeverityNumber(severityFromAPIAlert(alert.Status)) logRecord.SetSeverityText(alert.Status) // this could be fairly expensive to do, expecting not too many issues unless there are a ton // of unrecognized alerts to process. bodyBytes, err := json.Marshal(alert) if err != nil { a.telemetrySettings.Logger.Warn("unable to marshal alert into a body string") continue } logRecord.Body().SetStr(string(bodyBytes)) attrs := logRecord.Attributes() // These attributes are always present attrs.PutStr("event.domain", "mongodbatlas") attrs.PutStr("event.name", alert.EventTypeName) attrs.PutStr("status", alert.Status) attrs.PutStr("created", alert.Created) attrs.PutStr("updated", alert.Updated) attrs.PutStr("id", alert.ID) // These attributes are optional and may not be present, depending on the alert type. putStringToMapNotNil(attrs, "metric.name", &alert.MetricName) putStringToMapNotNil(attrs, "type_name", &alert.EventTypeName) putStringToMapNotNil(attrs, "last_notified", &alert.LastNotified) putStringToMapNotNil(attrs, "resolved", &alert.Resolved) putStringToMapNotNil(attrs, "acknowledgement.comment", &alert.AcknowledgementComment) putStringToMapNotNil(attrs, "acknowledgement.username", &alert.AcknowledgingUsername) putStringToMapNotNil(attrs, "acknowledgement.until", &alert.AcknowledgedUntil) if alert.CurrentValue != nil { attrs.PutDouble("metric.value", *alert.CurrentValue.Number) attrs.PutStr("metric.units", alert.CurrentValue.Units) } // Only present for HOST, HOST_METRIC, and REPLICA_SET alerts if alert.HostnameAndPort == "" { continue } host, portStr, err := net.SplitHostPort(alert.HostnameAndPort) if err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to split host:port %s: %w", alert.HostnameAndPort, err)) continue } port, err := strconv.ParseInt(portStr, 10, 64) if err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to parse port %s: %w", portStr, err)) continue } attrs.PutStr("net.peer.name", host) attrs.PutInt("net.peer.port", port) } return logs, errs } func verifyHMACSignature(secret string, payload []byte, signatureHeader string) error { b64Decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(signatureHeader)) payloadSig, err := io.ReadAll(b64Decoder) if err != nil { return err } h := hmac.New(sha1.New, []byte(secret)) h.Write(payload) calculatedSig := h.Sum(nil) if !hmac.Equal(calculatedSig, payloadSig) { return errors.New("calculated signature does not equal header signature") } return nil } func payloadToLogs(now time.Time, payload []byte) (plog.Logs, error) { var alert model.Alert err := json.Unmarshal(payload, &alert) if err != nil { return plog.Logs{}, err } logs := plog.NewLogs() resourceLogs := logs.ResourceLogs().AppendEmpty() logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(now)) logRecord.SetTimestamp(timestampFromAlert(alert)) logRecord.SetSeverityNumber(severityFromAlert(alert)) logRecord.Body().SetStr(string(payload)) resourceAttrs := resourceLogs.Resource().Attributes() resourceAttrs.PutStr("mongodbatlas.group.id", alert.GroupID) resourceAttrs.PutStr("mongodbatlas.alert.config.id", alert.AlertConfigID) putStringToMapNotNil(resourceAttrs, "mongodbatlas.cluster.name", alert.ClusterName) putStringToMapNotNil(resourceAttrs, "mongodbatlas.replica_set.name", alert.ReplicaSetName) attrs := logRecord.Attributes() // These attributes are always present attrs.PutStr("event.domain", "mongodbatlas") attrs.PutStr("event.name", alert.EventType) attrs.PutStr("message", alert.HumanReadable) attrs.PutStr("status", alert.Status) attrs.PutStr("created", alert.Created) attrs.PutStr("updated", alert.Updated) attrs.PutStr("id", alert.ID) // These attributes are optional and may not be present, depending on the alert type. putStringToMapNotNil(attrs, "metric.name", alert.MetricName) putStringToMapNotNil(attrs, "type_name", alert.TypeName) putStringToMapNotNil(attrs, "user_alias", alert.UserAlias) putStringToMapNotNil(attrs, "last_notified", alert.LastNotified) putStringToMapNotNil(attrs, "resolved", alert.Resolved) putStringToMapNotNil(attrs, "acknowledgement.comment", alert.AcknowledgementComment) putStringToMapNotNil(attrs, "acknowledgement.username", alert.AcknowledgementUsername) putStringToMapNotNil(attrs, "acknowledgement.until", alert.AcknowledgedUntil) if alert.CurrentValue != nil { attrs.PutDouble("metric.value", alert.CurrentValue.Number) attrs.PutStr("metric.units", alert.CurrentValue.Units) } if alert.HostNameAndPort != nil { host, portStr, err := net.SplitHostPort(*alert.HostNameAndPort) if err != nil { return plog.Logs{}, fmt.Errorf("failed to split host:port %s: %w", *alert.HostNameAndPort, err) } port, err := strconv.ParseInt(portStr, 10, 64) if err != nil { return plog.Logs{}, fmt.Errorf("failed to parse port %s: %w", portStr, err) } attrs.PutStr("net.peer.name", host) attrs.PutInt("net.peer.port", port) } return logs, nil } // alertRecord wraps a sync Map so it is goroutine safe as well as // can have custom marshaling type alertRecord struct { sync.Mutex LastRecordedTime *time.Time `mapstructure:"last_recorded"` } func (a *alertRecord) SetLastRecorded(lastUpdated *time.Time) { a.Lock() a.LastRecordedTime = lastUpdated a.Unlock() } func (a *alertsReceiver) syncPersistence(ctx context.Context) error { if a.storageClient == nil { return nil } cBytes, err := a.storageClient.Get(ctx, alertCacheKey) if err != nil || cBytes == nil { a.record = &alertRecord{} return nil } var cache alertRecord if err = json.Unmarshal(cBytes, &cache); err != nil { return fmt.Errorf("unable to decode stored cache: %w", err) } a.record = &cache return nil } func (a *alertsReceiver) writeCheckpoint(ctx context.Context) error { if a.storageClient == nil { a.telemetrySettings.Logger.Error("unable to write checkpoint since no storage client was found") return errors.New("missing non-nil storage client") } marshalBytes, err := json.Marshal(&a.record) if err != nil { return fmt.Errorf("unable to write checkpoint: %w", err) } return a.storageClient.Set(ctx, alertCacheKey, marshalBytes) } func (a *alertsReceiver) applyFilters(pConf *ProjectConfig, alerts []mongodbatlas.Alert) []mongodbatlas.Alert { filtered := []mongodbatlas.Alert{} lastRecordedTime := pcommon.Timestamp(0).AsTime() if a.record.LastRecordedTime != nil { lastRecordedTime = *a.record.LastRecordedTime } // we need to maintain two timestamps in order to not conflict while iterating latestInPayload := pcommon.Timestamp(0).AsTime() for _, alert := range alerts { updatedTime, err := time.Parse(time.RFC3339, alert.Updated) if err != nil { a.telemetrySettings.Logger.Warn("unable to interpret updated time for alert, expecting a RFC3339 timestamp", zap.String("timestamp", alert.Updated)) continue } if updatedTime.Before(lastRecordedTime) || updatedTime.Equal(lastRecordedTime) { // already processed if the updated time was before or equal to the last recorded continue } if len(pConf.excludesByClusterName) > 0 { if _, ok := pConf.excludesByClusterName[alert.ClusterName]; ok { continue } } if len(pConf.IncludeClusters) > 0 { if _, ok := pConf.includesByClusterName[alert.ClusterName]; !ok { continue } } filtered = append(filtered, alert) if updatedTime.After(latestInPayload) { latestInPayload = updatedTime } } if latestInPayload.After(lastRecordedTime) { a.record.SetLastRecorded(&latestInPayload) } return filtered } func timestampFromAlert(a model.Alert) pcommon.Timestamp { if time, err := time.Parse(time.RFC3339, a.Updated); err == nil { return pcommon.NewTimestampFromTime(time) } return pcommon.Timestamp(0) } // severityFromAlert maps the alert to a severity number. // Currently, it just maps "OPEN" alerts to WARN, and everything else to INFO. func severityFromAlert(a model.Alert) plog.SeverityNumber { // Status is defined here: https://www.mongodb.com/docs/atlas/reference/api/alerts-get-alert/#response-elements // It may also be "INFORMATIONAL" for single-fire alerts (events) switch a.Status { case "OPEN": return plog.SeverityNumberWarn default: return plog.SeverityNumberInfo } } // severityFromAPIAlert is a workaround for shared types between the API and the model func severityFromAPIAlert(a string) plog.SeverityNumber { switch a { case "OPEN": return plog.SeverityNumberWarn default: return plog.SeverityNumberInfo } } func putStringToMapNotNil(m pcommon.Map, k string, v *string) { if v != nil { m.PutStr(k, *v) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "fmt" "go.opentelemetry.io/collector/component" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter" ) // combinedLogsReceiver wraps alerts and log receivers in a single log receiver to be consumed by the factory type combinedLogsReceiver struct { alerts *alertsReceiver logs *logsReceiver events *eventsReceiver accessLogs *accessLogsReceiver storageID *component.ID id component.ID } // Starts up the combined MongoDB Atlas Logs and Alert Receiver func (c *combinedLogsReceiver) Start(ctx context.Context, host component.Host) error { var errs error storageClient, err := adapter.GetStorageClient(ctx, host, c.storageID, c.id) if err != nil { return fmt.Errorf("failed to get storage client: %w", err) } if c.alerts != nil { if err := c.alerts.Start(ctx, host, storageClient); err != nil { errs = multierr.Append(errs, err) } } if c.logs != nil { if err := c.logs.Start(ctx, host); err != nil { errs = multierr.Append(errs, err) } } if c.events != nil { if err := c.events.Start(ctx, host, storageClient); err != nil { errs = multierr.Append(errs, err) } } if c.accessLogs != nil { if err := c.accessLogs.Start(ctx, host, storageClient); err != nil { errs = multierr.Append(errs, err) } } return errs } // Shuts down the combined MongoDB Atlas Logs and Alert Receiver func (c *combinedLogsReceiver) Shutdown(ctx context.Context) error { var errs error if c.alerts != nil { if err := c.alerts.Shutdown(ctx); err != nil { errs = multierr.Append(errs, err) } } if c.logs != nil { if err := c.logs.Shutdown(ctx); err != nil { errs = multierr.Append(errs, err) } } if c.events != nil { if err := c.events.Shutdown(ctx); err != nil { errs = multierr.Append(errs, err) } } if c.accessLogs != nil { if err := c.accessLogs.Shutdown(ctx); err != nil { errs = multierr.Append(errs, err) } } return errs }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "errors" "fmt" "net" "strings" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/scraper/scraperhelper" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) var _ component.Config = (*Config)(nil) type Config struct { scraperhelper.ControllerConfig `mapstructure:",squash"` PublicKey string `mapstructure:"public_key"` PrivateKey configopaque.String `mapstructure:"private_key"` Granularity string `mapstructure:"granularity"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` Projects []*ProjectConfig `mapstructure:"projects"` Alerts AlertConfig `mapstructure:"alerts"` Events *EventsConfig `mapstructure:"events"` Logs LogConfig `mapstructure:"logs"` BackOffConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` StorageID *component.ID `mapstructure:"storage"` } type AlertConfig struct { Enabled bool `mapstructure:"enabled"` Endpoint string `mapstructure:"endpoint"` Secret configopaque.String `mapstructure:"secret"` TLS *configtls.ServerConfig `mapstructure:"tls"` Mode string `mapstructure:"mode"` // these parameters are only relevant in retrieval mode Projects []*ProjectConfig `mapstructure:"projects"` PollInterval time.Duration `mapstructure:"poll_interval"` PageSize int64 `mapstructure:"page_size"` MaxPages int64 `mapstructure:"max_pages"` } type LogConfig struct { Enabled bool `mapstructure:"enabled"` Projects []*LogsProjectConfig `mapstructure:"projects"` } // EventsConfig is the configuration options for events collection type EventsConfig struct { Projects []*ProjectConfig `mapstructure:"projects"` Organizations []*OrgConfig `mapstructure:"organizations"` PollInterval time.Duration `mapstructure:"poll_interval"` Types []string `mapstructure:"types"` PageSize int64 `mapstructure:"page_size"` MaxPages int64 `mapstructure:"max_pages"` } type LogsProjectConfig struct { ProjectConfig `mapstructure:",squash"` EnableAuditLogs bool `mapstructure:"collect_audit_logs"` EnableHostLogs *bool `mapstructure:"collect_host_logs"` AccessLogs *AccessLogsConfig `mapstructure:"access_logs"` } type AccessLogsConfig struct { Enabled *bool `mapstructure:"enabled"` PollInterval time.Duration `mapstructure:"poll_interval"` PageSize int64 `mapstructure:"page_size"` MaxPages int64 `mapstructure:"max_pages"` AuthResult *bool `mapstructure:"auth_result"` } func (alc *AccessLogsConfig) IsEnabled() bool { return alc.Enabled == nil || *alc.Enabled } type ProjectConfig struct { Name string `mapstructure:"name"` ExcludeClusters []string `mapstructure:"exclude_clusters"` IncludeClusters []string `mapstructure:"include_clusters"` includesByClusterName map[string]struct{} excludesByClusterName map[string]struct{} } type OrgConfig struct { ID string `mapstructure:"id"` } func (pc *ProjectConfig) populateIncludesAndExcludes() { pc.includesByClusterName = map[string]struct{}{} for _, inclusion := range pc.IncludeClusters { pc.includesByClusterName[inclusion] = struct{}{} } pc.excludesByClusterName = map[string]struct{}{} for _, exclusion := range pc.ExcludeClusters { pc.excludesByClusterName[exclusion] = struct{}{} } } var ( // Alerts Receiver Errors errNoEndpoint = errors.New("an endpoint must be specified") errNoSecret = errors.New("a webhook secret must be specified") errNoCert = errors.New("tls was configured, but no cert file was specified") errNoKey = errors.New("tls was configured, but no key file was specified") errNoModeRecognized = fmt.Errorf("alert mode not recognized for mode. Known alert modes are: %s", strings.Join([]string{ alertModeListen, alertModePoll, }, ",")) errPageSizeIncorrect = errors.New("page size must be a value between 1 and 500") // Logs Receiver Errors errNoProjects = errors.New("at least one 'project' must be specified") errNoEvents = errors.New("at least one 'project' or 'organizations' event type must be specified") errClusterConfig = errors.New("only one of 'include_clusters' or 'exclude_clusters' may be specified") // Access Logs Errors errMaxPageSize = errors.New("the maximum value for 'page_size' is 20000") ) func (c *Config) Validate() error { var errs error for _, project := range c.Projects { if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 { errs = multierr.Append(errs, errClusterConfig) } } errs = multierr.Append(errs, c.Alerts.validate()) errs = multierr.Append(errs, c.Logs.validate()) if c.Events != nil { errs = multierr.Append(errs, c.Events.validate()) } return errs } func (l *LogConfig) validate() error { if !l.Enabled { return nil } var errs error if len(l.Projects) == 0 { errs = multierr.Append(errs, errNoProjects) } for _, project := range l.Projects { if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 { errs = multierr.Append(errs, errClusterConfig) } if project.AccessLogs != nil && project.AccessLogs.IsEnabled() { if project.AccessLogs.PageSize > 20000 { errs = multierr.Append(errs, errMaxPageSize) } } } return errs } func (a *AlertConfig) validate() error { if !a.Enabled { // No need to further validate, receiving alerts is disabled. return nil } switch a.Mode { case alertModePoll: return a.validatePollConfig() case alertModeListen: return a.validateListenConfig() default: return errNoModeRecognized } } func (a AlertConfig) validatePollConfig() error { if len(a.Projects) == 0 { return errNoProjects } // based off API limits https://www.mongodb.com/docs/atlas/reference/api/alerts-get-all-alerts/ if 0 >= a.PageSize || a.PageSize > 500 { return errPageSizeIncorrect } var errs error for _, project := range a.Projects { if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 { errs = multierr.Append(errs, errClusterConfig) } } return errs } func (a AlertConfig) validateListenConfig() error { if a.Endpoint == "" { return errNoEndpoint } var errs error _, _, err := net.SplitHostPort(a.Endpoint) if err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to split endpoint into 'host:port' pair: %w", err)) } if a.Secret == "" { errs = multierr.Append(errs, errNoSecret) } if a.TLS != nil { if a.TLS.CertFile == "" { errs = multierr.Append(errs, errNoCert) } if a.TLS.KeyFile == "" { errs = multierr.Append(errs, errNoKey) } } return errs } func (e EventsConfig) validate() error { if len(e.Projects) == 0 && len(e.Organizations) == 0 { return errNoEvents } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "encoding/json" "errors" "fmt" "sync" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/extension/xextension/storage" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" rcvr "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" ) const ( eventStorageKey = "last_recorded_event" defaultEventsMaxPages = 25 defaultEventsPageSize = 100 defaultPollInterval = time.Minute ) type eventsClient interface { GetProject(ctx context.Context, groupID string) (*mongodbatlas.Project, error) GetProjectEvents(ctx context.Context, groupID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) GetOrganization(ctx context.Context, orgID string) (*mongodbatlas.Organization, error) GetOrganizationEvents(ctx context.Context, orgID string, opts *internal.GetEventsOptions) (ret []*mongodbatlas.Event, nextPage bool, err error) Shutdown() error } type eventsReceiver struct { client eventsClient logger *zap.Logger storageClient storage.Client cfg *Config consumer consumer.Logs maxPages int pageSize int pollInterval time.Duration wg *sync.WaitGroup record *eventRecord // this record is used for checkpointing last processed events cancel context.CancelFunc } type eventRecord struct { NextStartTime *time.Time `mapstructure:"next_start_time"` } func newEventsReceiver(settings rcvr.Settings, c *Config, consumer consumer.Logs) *eventsReceiver { r := &eventsReceiver{ client: internal.NewMongoDBAtlasClient(c.PublicKey, string(c.PrivateKey), c.BackOffConfig, settings.Logger), cfg: c, logger: settings.Logger, consumer: consumer, pollInterval: c.Events.PollInterval, wg: &sync.WaitGroup{}, maxPages: int(c.Events.MaxPages), pageSize: int(c.Events.PageSize), storageClient: storage.NewNopClient(), } if r.maxPages == 0 { r.maxPages = defaultEventsMaxPages } if r.pageSize == 0 { r.pageSize = defaultEventsPageSize } if r.pollInterval == 0 { r.pollInterval = time.Minute } return r } func (er *eventsReceiver) Start(ctx context.Context, _ component.Host, storageClient storage.Client) error { er.logger.Debug("Starting up events receiver") cancelCtx, cancel := context.WithCancel(ctx) er.cancel = cancel er.storageClient = storageClient er.loadCheckpoint(cancelCtx) return er.startPolling(cancelCtx) } func (er *eventsReceiver) Shutdown(ctx context.Context) error { er.logger.Debug("Shutting down events receiver") er.cancel() er.wg.Wait() var err []error err = append(err, er.client.Shutdown()) err = append(err, er.checkpoint(ctx)) return errors.Join(err...) } func (er *eventsReceiver) startPolling(ctx context.Context) error { t := time.NewTicker(er.pollInterval) er.wg.Add(1) go func() { defer er.wg.Done() for { select { case <-t.C: if err := er.pollEvents(ctx); err != nil { er.logger.Error("error while polling for events", zap.Error(err)) } case <-ctx.Done(): return } } }() return nil } func (er *eventsReceiver) pollEvents(ctx context.Context) error { st := pcommon.NewTimestampFromTime(time.Now().Add(-er.pollInterval)).AsTime() if er.record.NextStartTime != nil { st = *er.record.NextStartTime } et := time.Now() for _, pc := range er.cfg.Events.Projects { project, err := er.client.GetProject(ctx, pc.Name) if err != nil { er.logger.Error("error retrieving project information for "+pc.Name+":", zap.Error(err)) return err } er.pollProject(ctx, project, pc, st, et) } for _, pc := range er.cfg.Events.Organizations { org, err := er.client.GetOrganization(ctx, pc.ID) if err != nil { er.logger.Error("error retrieving org information for "+pc.ID+":", zap.Error(err)) return err } er.pollOrg(ctx, org, pc, st, et) } er.record.NextStartTime = &et return er.checkpoint(ctx) } func (er *eventsReceiver) pollProject(ctx context.Context, project *mongodbatlas.Project, p *ProjectConfig, startTime, now time.Time) { for pageN := 1; pageN <= er.maxPages; pageN++ { opts := &internal.GetEventsOptions{ PageNum: pageN, EventTypes: er.cfg.Events.Types, MaxDate: now, MinDate: startTime, } projectEvents, hasNext, err := er.client.GetProjectEvents(ctx, project.ID, opts) if err != nil { er.logger.Error("unable to get events for project", zap.Error(err), zap.String("project", p.Name)) break } now := pcommon.NewTimestampFromTime(now) logs := er.transformProjectEvents(now, projectEvents, project) if logs.LogRecordCount() > 0 { if err = er.consumer.ConsumeLogs(ctx, logs); err != nil { er.logger.Error("error consuming project events", zap.Error(err)) break } } if !hasNext { break } } } func (er *eventsReceiver) pollOrg(ctx context.Context, org *mongodbatlas.Organization, p *OrgConfig, startTime, now time.Time) { for pageN := 1; pageN <= er.maxPages; pageN++ { opts := &internal.GetEventsOptions{ PageNum: pageN, EventTypes: er.cfg.Events.Types, MaxDate: now, MinDate: startTime, } organizationEvents, hasNext, err := er.client.GetOrganizationEvents(ctx, org.ID, opts) if err != nil { er.logger.Error("unable to get events for organization", zap.Error(err), zap.String("organization", p.ID)) break } now := pcommon.NewTimestampFromTime(now) logs := er.transformOrgEvents(now, organizationEvents, org) if logs.LogRecordCount() > 0 { if err = er.consumer.ConsumeLogs(ctx, logs); err != nil { er.logger.Error("error consuming organization events", zap.Error(err)) break } } if !hasNext { break } } } func (er *eventsReceiver) transformProjectEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, p *mongodbatlas.Project) plog.Logs { logs := plog.NewLogs() resourceLogs := logs.ResourceLogs().AppendEmpty() ra := resourceLogs.Resource().Attributes() ra.PutStr("mongodbatlas.project.name", p.Name) ra.PutStr("mongodbatlas.org.id", p.OrgID) er.transformEvents(now, events, &resourceLogs) return logs } func (er *eventsReceiver) transformOrgEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, o *mongodbatlas.Organization) plog.Logs { logs := plog.NewLogs() resourceLogs := logs.ResourceLogs().AppendEmpty() ra := resourceLogs.Resource().Attributes() ra.PutStr("mongodbatlas.org.id", o.ID) er.transformEvents(now, events, &resourceLogs) return logs } func (er *eventsReceiver) transformEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, resourceLogs *plog.ResourceLogs) { for _, event := range events { logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() bodyBytes, err := json.Marshal(event) if err != nil { er.logger.Error("unable to unmarshal event into body string", zap.Error(err)) continue } logRecord.Body().SetStr(string(bodyBytes)) // ISO-8601 formatted ts, err := time.Parse(time.RFC3339, event.Created) if err != nil { er.logger.Warn("unable to interpret when an event was created, expecting a RFC3339 timestamp", zap.String("timestamp", event.Created), zap.String("event", event.ID)) logRecord.SetTimestamp(now) } else { logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts)) } logRecord.SetObservedTimestamp(now) attrs := logRecord.Attributes() // always present attributes attrs.PutStr("event.domain", "mongodbatlas") attrs.PutStr("type", event.EventTypeName) attrs.PutStr("id", event.ID) attrs.PutStr("group.id", event.GroupID) parseOptionalAttributes(&attrs, event) } } func (er *eventsReceiver) checkpoint(ctx context.Context) error { marshalBytes, err := json.Marshal(er.record) if err != nil { return fmt.Errorf("unable to write checkpoint: %w", err) } return er.storageClient.Set(ctx, eventStorageKey, marshalBytes) } func (er *eventsReceiver) loadCheckpoint(ctx context.Context) { cBytes, err := er.storageClient.Get(ctx, eventStorageKey) if err != nil { er.logger.Info("unable to load checkpoint from storage client, continuing without a previous checkpoint", zap.Error(err)) er.record = &eventRecord{} return } if cBytes == nil { er.record = &eventRecord{} return } var record eventRecord if err = json.Unmarshal(cBytes, &record); err != nil { er.logger.Error("unable to decode stored record for events, continuing without a checkpoint", zap.Error(err)) er.record = &eventRecord{} return } er.record = &record } func parseOptionalAttributes(m *pcommon.Map, event *mongodbatlas.Event) { if event.AlertID != "" { m.PutStr("alert.id", event.AlertID) } if event.AlertConfigID != "" { m.PutStr("alert.config.id", event.AlertConfigID) } if event.Collection != "" { m.PutStr("collection", event.Collection) } if event.Database != "" { m.PutStr("database", event.Database) } if event.Hostname != "" { m.PutStr("net.peer.name", event.Hostname) } if event.Port != 0 { m.PutInt("net.peer.port", int64(event.Port)) } if event.InvoiceID != "" { m.PutStr("invoice.id", event.InvoiceID) } if event.Username != "" { m.PutStr("user.name", event.Username) } if event.TargetUsername != "" { m.PutStr("target.user.name", event.TargetUsername) } if event.UserID != "" { m.PutStr("user.id", event.UserID) } if event.TeamID != "" { m.PutStr("team.id", event.TeamID) } if event.RemoteAddress != "" { m.PutStr("remote.ip", event.RemoteAddress) } if event.MetricName != "" { m.PutStr("metric.name", event.MetricName) } if event.OpType != "" { m.PutStr("event.op_type", event.OpType) } if event.PaymentID != "" { m.PutStr("payment.id", event.PaymentID) } if event.ReplicaSetName != "" { m.PutStr("replica_set.name", event.ReplicaSetName) } if event.CurrentValue != nil { m.PutDouble("metric.value", *event.CurrentValue.Number) m.PutStr("metric.units", event.CurrentValue.Units) } if event.ShardName != "" { m.PutStr("shard.name", event.ShardName) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper/scraperhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) const ( defaultGranularity = "PT1M" // 1-minute, as per https://docs.atlas.mongodb.com/reference/api/process-measurements/ defaultAlertsEnabled = false defaultLogsEnabled = false ) // NewFactory creates a factory for MongoDB Atlas receiver func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), receiver.WithLogs(createCombinedLogReceiver, metadata.LogsStability)) } func createMetricsReceiver( _ context.Context, params receiver.Settings, rConf component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { cfg := rConf.(*Config) recv := newMongoDBAtlasReceiver(params, cfg) ms, err := newMongoDBAtlasScraper(recv) if err != nil { return nil, fmt.Errorf("unable to create a MongoDB Atlas Scraper instance: %w", err) } return scraperhelper.NewMetricsController(&cfg.ControllerConfig, params, consumer, scraperhelper.AddScraper(metadata.Type, ms)) } func createCombinedLogReceiver( _ context.Context, params receiver.Settings, rConf component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { cfg := rConf.(*Config) if !cfg.Alerts.Enabled && !cfg.Logs.Enabled && cfg.Events == nil { return nil, errors.New("one of 'alerts', 'events', or 'logs' must be enabled") } var err error recv := &combinedLogsReceiver{ id: params.ID, storageID: cfg.StorageID, } if cfg.Alerts.Enabled { recv.alerts, err = newAlertsReceiver(params, cfg, consumer) if err != nil { return nil, fmt.Errorf("unable to create a MongoDB Atlas Alerts Receiver instance: %w", err) } } if cfg.Logs.Enabled { recv.logs = newMongoDBAtlasLogsReceiver(params, cfg, consumer) // Confirm at least one project is enabled for access logs before adding for _, project := range cfg.Logs.Projects { if project.AccessLogs != nil && project.AccessLogs.IsEnabled() { recv.accessLogs = newAccessLogsReceiver(params, cfg, consumer) break } } } if cfg.Events != nil { recv.events = newEventsReceiver(params, cfg, consumer) } return recv, nil } func createDefaultConfig() component.Config { c := &Config{ ControllerConfig: scraperhelper.NewDefaultControllerConfig(), Granularity: defaultGranularity, BackOffConfig: configretry.NewDefaultBackOffConfig(), MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), Alerts: AlertConfig{ Enabled: defaultAlertsEnabled, Mode: alertModeListen, PollInterval: defaultAlertsPollInterval, PageSize: defaultAlertsPageSize, MaxPages: defaultAlertsMaxPages, }, Logs: LogConfig{ Enabled: defaultLogsEnabled, Projects: []*LogsProjectConfig{}, }, } // reset default of 1 minute to be 3 minutes in order to avoid null values for some metrics that do not publish // more frequently c.ControllerConfig.CollectionInterval = 3 * time.Minute return c }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "bufio" "compress/gzip" "encoding/json" "io" "regexp" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model" ) func decodeLogs(logger *zap.Logger, clusterMajorVersion string, r io.Reader) ([]model.LogEntry, error) { switch clusterMajorVersion { case mongoDBMajorVersion4_2: // 4.2 clusters use a console log format return decode4_2(logger.Named("console_decoder"), r) default: // All other versions use JSON logging return decodeJSON(logger.Named("json_decoder"), r) } } func decodeJSON(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) { // Pass this into a gzip reader for decoding gzipReader, err := gzip.NewReader(r) if err != nil { return nil, err } scanner := bufio.NewScanner(gzipReader) var entries []model.LogEntry for { if !scanner.Scan() { // Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error. return entries, scanner.Err() } var entry model.LogEntry if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil { logger.Error("Failed to parse log entry as JSON", zap.String("entry", scanner.Text())) continue } entry.Raw = scanner.Text() entries = append(entries, entry) } } var mongo4_2LogRegex = regexp.MustCompile(`^(?P<timestamp>\S+)\s+(?P<severity>\w+)\s+(?P<component>[\w-]+)\s+\[(?P<context>\S+)\]\s+(?P<message>.*)$`) func decode4_2(logger *zap.Logger, r io.Reader) ([]model.LogEntry, error) { // Pass this into a gzip reader for decoding gzipReader, err := gzip.NewReader(r) if err != nil { return nil, err } scanner := bufio.NewScanner(gzipReader) var entries []model.LogEntry for { if !scanner.Scan() { // Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error. return entries, scanner.Err() } submatches := mongo4_2LogRegex.FindStringSubmatch(scanner.Text()) if len(submatches) != 6 { // Match failed for line; We will skip this line and continue processing others. logger.Error("Entry did not match regex", zap.String("entry", scanner.Text())) continue } entry := model.LogEntry{ Timestamp: model.LogTimestamp{ Date: submatches[1], }, Severity: submatches[2], Component: submatches[3], Context: submatches[4], Message: submatches[5], Raw: submatches[0], } entries = append(entries, entry) } } func decodeAuditJSON(logger *zap.Logger, r io.Reader) ([]model.AuditLog, error) { // Pass this into a gzip reader for decoding gzipReader, err := gzip.NewReader(r) if err != nil { return nil, err } scanner := bufio.NewScanner(gzipReader) var entries []model.AuditLog for { if !scanner.Scan() { // Scan failed; This might just be EOF, in which case Err will be nil, or it could be some other IO error. return entries, scanner.Err() } var entry model.AuditLog if err := json.Unmarshal(scanner.Bytes(), &entry); err != nil { logger.Error("Failed to parse audit log entry as JSON", zap.String("entry", scanner.Text())) continue } entry.Raw = scanner.Text() entries = append(entries, entry) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "errors" "io" "net" "strings" "sync" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" rcvr "go.opentelemetry.io/collector/receiver" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model" ) const mongoDBMajorVersion4_2 = "4.2" type logsReceiver struct { log *zap.Logger cfg *Config client *internal.MongoDBAtlasClient consumer consumer.Logs stopperChan chan struct{} wg sync.WaitGroup start time.Time end time.Time } type ProjectContext struct { Project mongodbatlas.Project orgName string } // MongoDB Atlas Documentation recommends a polling interval of 5 minutes: https://www.mongodb.com/docs/atlas/reference/api/logs/#logs const collectionInterval = time.Minute * 5 func newMongoDBAtlasLogsReceiver(settings rcvr.Settings, cfg *Config, consumer consumer.Logs) *logsReceiver { client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger) for _, p := range cfg.Logs.Projects { p.populateIncludesAndExcludes() } return &logsReceiver{ log: settings.Logger, cfg: cfg, client: client, stopperChan: make(chan struct{}), consumer: consumer, } } // Log receiver logic func (s *logsReceiver) Start(ctx context.Context, _ component.Host) error { s.wg.Add(1) go func() { defer s.wg.Done() s.start = time.Now().Add(-collectionInterval) s.end = time.Now() for { s.collect(ctx) // collection interval loop, select { case <-ctx.Done(): return case <-s.stopperChan: return case <-time.After(collectionInterval): s.start = s.end s.end = time.Now() } } }() return nil } func (s *logsReceiver) Shutdown(_ context.Context) error { close(s.stopperChan) s.wg.Wait() return s.client.Shutdown() } // parseHostNames parses out the hostname from the specified cluster host func parseHostNames(s string, logger *zap.Logger) []string { var hostnames []string if s == "" { return []string{} } for _, t := range strings.Split(s, ",") { // separate hostname from scheme and port host, _, err := net.SplitHostPort(strings.TrimPrefix(t, "mongodb://")) if err != nil { logger.Error("Could not parse out hostname: " + host) continue } hostnames = append(hostnames, host) } return hostnames } // collect spins off functionality of the receiver from the Start function func (s *logsReceiver) collect(ctx context.Context) { for _, projectCfg := range s.cfg.Logs.Projects { project, err := s.client.GetProject(ctx, projectCfg.Name) if err != nil { s.log.Error("Error retrieving project "+projectCfg.Name+":", zap.Error(err)) continue } pc := ProjectContext{Project: *project} org, err := s.client.GetOrganization(ctx, project.OrgID) if err != nil { s.log.Error("Error retrieving organization", zap.Error(err)) pc.orgName = "unknown" } else { pc.orgName = org.Name } // get clusters for each of the projects clusters, err := s.processClusters(ctx, *projectCfg, project.ID) if err != nil { s.log.Error("Failure to process Clusters", zap.Error(err)) } s.collectClusterLogs(clusters, *projectCfg, pc) } } func (s *logsReceiver) processClusters(ctx context.Context, projectCfg LogsProjectConfig, projectID string) ([]mongodbatlas.Cluster, error) { clusters, err := s.client.GetClusters(ctx, projectID) if err != nil { s.log.Error("Failure to collect clusters from project: %w", zap.Error(err)) return nil, err } return filterClusters(clusters, projectCfg.ProjectConfig) } type ClusterInfo struct { ClusterName string RegionName string ProviderName string MongoDBMajorVersion string } func (s *logsReceiver) collectClusterLogs(clusters []mongodbatlas.Cluster, projectCfg LogsProjectConfig, pc ProjectContext) { for _, cluster := range clusters { clusterInfo := ClusterInfo{ ClusterName: cluster.Name, RegionName: cluster.ProviderSettings.RegionName, ProviderName: cluster.ProviderSettings.ProviderName, MongoDBMajorVersion: cluster.MongoDBMajorVersion, } hostnames := parseHostNames(cluster.ConnectionStrings.Standard, s.log) for _, hostname := range hostnames { // Defaults to true if not specified if projectCfg.EnableHostLogs == nil || *projectCfg.EnableHostLogs { s.log.Debug("Collecting logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name)) s.collectLogs(pc, hostname, "mongodb.gz", clusterInfo) s.collectLogs(pc, hostname, "mongos.gz", clusterInfo) } // Defaults to false if not specified if projectCfg.EnableAuditLogs { s.log.Debug("Collecting audit logs for host", zap.String("hostname", hostname), zap.String("cluster", cluster.Name)) s.collectAuditLogs(pc, hostname, "mongodb-audit-log.gz", clusterInfo) s.collectAuditLogs(pc, hostname, "mongos-audit-log.gz", clusterInfo) } } } } func filterClusters(clusters []mongodbatlas.Cluster, projectCfg ProjectConfig) ([]mongodbatlas.Cluster, error) { include, exclude := projectCfg.IncludeClusters, projectCfg.ExcludeClusters var allowed bool var clusterNameSet map[string]struct{} // check to include or exclude clusters switch { // keep all clusters if include and exclude are not specified case len(include) == 0 && len(exclude) == 0: return clusters, nil // include is initialized case len(include) > 0 && len(exclude) == 0: allowed = true clusterNameSet = projectCfg.includesByClusterName // exclude is initialized case len(exclude) > 0 && len(include) == 0: allowed = false clusterNameSet = projectCfg.excludesByClusterName // both are initialized default: return nil, errors.New("both Include and Exclude clusters configured") } var filtered []mongodbatlas.Cluster for _, cluster := range clusters { if _, ok := clusterNameSet[cluster.Name]; (!ok && !allowed) || (ok && allowed) { filtered = append(filtered, cluster) } } return filtered, nil } func (s *logsReceiver) getHostLogs(groupID, hostname, logName string, clusterMajorVersion string) ([]model.LogEntry, error) { // Get gzip bytes buffer from API buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end) if err != nil { return nil, err } return decodeLogs(s.log, clusterMajorVersion, buf) } func (s *logsReceiver) getHostAuditLogs(groupID, hostname, logName string) ([]model.AuditLog, error) { // Get gzip bytes buffer from API buf, err := s.client.GetLogs(context.Background(), groupID, hostname, logName, s.start, s.end) if err != nil { return nil, err } return decodeAuditJSON(s.log, buf) } func (s *logsReceiver) collectLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) { logs, err := s.getHostLogs(pc.Project.ID, hostname, logName, clusterInfo.MongoDBMajorVersion) if err != nil && !errors.Is(err, io.EOF) { s.log.Warn("Failed to retrieve host logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end)) return } if len(logs) == 0 { s.log.Warn("Attempted to retrieve host logs but received 0 logs", zap.Error(err), zap.String("log", logName), zap.String("hostname", hostname), zap.Time("startTime", s.start), zap.Time("endTime", s.end)) return } plog := mongodbEventToLogData(s.log, logs, pc, hostname, logName, clusterInfo) err = s.consumer.ConsumeLogs(context.Background(), plog) if err != nil { s.log.Error("Failed to consume logs", zap.Error(err)) } } func (s *logsReceiver) collectAuditLogs(pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) { logs, err := s.getHostAuditLogs( pc.Project.ID, hostname, logName, ) if err != nil && !errors.Is(err, io.EOF) { s.log.Warn("Failed to retrieve audit logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end)) return } if len(logs) == 0 { s.log.Warn("Attempted to retrieve audit logs but received 0 logs", zap.Error(err), zap.String("hostname", hostname), zap.String("log", logName), zap.Time("startTime", s.start), zap.Time("endTime", s.end)) return } plog, err := mongodbAuditEventToLogData(s.log, logs, pc, hostname, logName, clusterInfo) if err != nil { s.log.Warn("Failed to translate audit logs: "+logName, zap.Error(err)) return } err = s.consumer.ConsumeLogs(context.Background(), plog) if err != nil { s.log.Error("Failed to consume logs", zap.Error(err)) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "time" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/model" ) const ( // Number of log attributes to add to the plog.LogRecordSlice for host logs. totalLogAttributes = 10 // Number of log attributes to add to the plog.LogRecordSlice for audit logs. totalAuditLogAttributes = 16 // Number of resource attributes to add to the plog.ResourceLogs. totalResourceAttributes = 4 ) // jsonTimestampLayout for the timestamp format in the plog.Logs structure const ( jsonTimestampLayout = "2006-01-02T15:04:05.000-07:00" consoleTimestampLayout = "2006-01-02T15:04:05.000-0700" ) // Severity mapping of the mongodb atlas logs var severityMap = map[string]plog.SeverityNumber{ "F": plog.SeverityNumberFatal, "E": plog.SeverityNumberError, "W": plog.SeverityNumberWarn, "I": plog.SeverityNumberInfo, "D": plog.SeverityNumberDebug, "D1": plog.SeverityNumberDebug, "D2": plog.SeverityNumberDebug2, "D3": plog.SeverityNumberDebug3, "D4": plog.SeverityNumberDebug4, "D5": plog.SeverityNumberDebug4, } // mongoAuditEventToLogRecord converts model.AuditLog event to plog.LogRecordSlice and adds the resource attributes. func mongodbAuditEventToLogData(logger *zap.Logger, logs []model.AuditLog, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) (plog.Logs, error) { ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() resourceAttrs := rl.Resource().Attributes() resourceAttrs.EnsureCapacity(totalResourceAttributes) // Attributes related to the object causing the event. resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName) resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name) resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName) resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName) resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName) resourceAttrs.PutStr("mongodb_atlas.host.name", hostname) var errs []error for _, log := range logs { lr := sl.LogRecords().AppendEmpty() logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion) t, err := time.Parse(logTsFormat, log.Timestamp.Date) if err != nil { logger.Warn("Time failed to parse correctly", zap.Error(err)) } lr.SetTimestamp(pcommon.NewTimestampFromTime(t)) lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now())) // Insert Raw Log message into Body of LogRecord lr.Body().SetStr(log.Raw) // Since Audit Logs don't have a severity/level // Set the "SeverityNumber" and "SeverityText" to INFO lr.SetSeverityNumber(plog.SeverityNumberInfo) lr.SetSeverityText("INFO") attrs := lr.Attributes() attrs.EnsureCapacity(totalAuditLogAttributes) attrs.PutStr("atype", log.Type) if log.Local.IP != nil { attrs.PutStr("local.ip", *log.Local.IP) } if log.Local.Port != nil { attrs.PutInt("local.port", int64(*log.Local.Port)) } if log.Local.SystemUser != nil { attrs.PutBool("local.isSystemUser", *log.Local.SystemUser) } if log.Local.UnixSocket != nil { attrs.PutStr("local.unix", *log.Local.UnixSocket) } if log.Remote.IP != nil { attrs.PutStr("remote.ip", *log.Remote.IP) } if log.Remote.Port != nil { attrs.PutInt("remote.port", int64(*log.Remote.Port)) } if log.Remote.SystemUser != nil { attrs.PutBool("remote.isSystemUser", *log.Remote.SystemUser) } if log.Remote.UnixSocket != nil { attrs.PutStr("remote.unix", *log.Remote.UnixSocket) } if log.ID != nil { attrs.PutStr("uuid.binary", log.ID.Binary) attrs.PutStr("uuid.type", log.ID.Type) } attrs.PutInt("result", int64(log.Result)) if err = attrs.PutEmptyMap("param").FromRaw(log.Param); err != nil { errs = append(errs, err) } usersSlice := attrs.PutEmptySlice("users") usersSlice.EnsureCapacity(len(log.Users)) for _, user := range log.Users { user.Pdata().CopyTo(usersSlice.AppendEmpty().SetEmptyMap()) } rolesSlice := attrs.PutEmptySlice("roles") rolesSlice.EnsureCapacity(len(log.Roles)) for _, roles := range log.Roles { roles.Pdata().CopyTo(rolesSlice.AppendEmpty().SetEmptyMap()) } attrs.PutStr("log_name", logName) } return ld, multierr.Combine(errs...) } // mongoEventToLogRecord converts model.LogEntry event to plog.LogRecordSlice and adds the resource attributes. func mongodbEventToLogData(logger *zap.Logger, logs []model.LogEntry, pc ProjectContext, hostname, logName string, clusterInfo ClusterInfo) plog.Logs { ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() resourceAttrs := rl.Resource().Attributes() resourceAttrs.EnsureCapacity(totalResourceAttributes) // Attributes related to the object causing the event. resourceAttrs.PutStr("mongodb_atlas.org", pc.orgName) resourceAttrs.PutStr("mongodb_atlas.project", pc.Project.Name) resourceAttrs.PutStr("mongodb_atlas.cluster", clusterInfo.ClusterName) resourceAttrs.PutStr("mongodb_atlas.region.name", clusterInfo.RegionName) resourceAttrs.PutStr("mongodb_atlas.provider.name", clusterInfo.ProviderName) resourceAttrs.PutStr("mongodb_atlas.host.name", hostname) logTsFormat := tsLayout(clusterInfo.MongoDBMajorVersion) for _, log := range logs { lr := sl.LogRecords().AppendEmpty() t, err := time.Parse(logTsFormat, log.Timestamp.Date) if err != nil { logger.Warn("Time failed to parse correctly", zap.Error(err)) } lr.SetTimestamp(pcommon.NewTimestampFromTime(t)) lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now())) // Insert Raw Log message into Body of LogRecord lr.Body().SetStr(log.Raw) // Set the "SeverityNumber" and "SeverityText" if a known type of // severity is found. if severityNumber, ok := severityMap[log.Severity]; ok { lr.SetSeverityNumber(severityNumber) lr.SetSeverityText(log.Severity) } else { logger.Debug("unknown severity type", zap.String("type", log.Severity)) } attrs := lr.Attributes() attrs.EnsureCapacity(totalLogAttributes) //nolint:errcheck attrs.FromRaw(log.Attributes) attrs.PutStr("message", log.Message) attrs.PutStr("component", log.Component) attrs.PutStr("context", log.Context) // log ID is not present on MongoDB 4.2 systems if clusterInfo.MongoDBMajorVersion != mongoDBMajorVersion4_2 { attrs.PutInt("id", log.ID) } attrs.PutStr("log_name", logName) } return ld } func tsLayout(clusterVersion string) string { switch clusterVersion { case mongoDBMajorVersion4_2: return consoleTimestampLayout default: return jsonTimestampLayout } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" import ( "context" "fmt" "strconv" "strings" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/scraper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) type mongodbatlasreceiver struct { log *zap.Logger cfg *Config client *internal.MongoDBAtlasClient lastRun time.Time mb *metadata.MetricsBuilder stopperChan chan struct{} } type timeconstraints struct { start string end string resolution string } func newMongoDBAtlasReceiver(settings receiver.Settings, cfg *Config) *mongodbatlasreceiver { client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.BackOffConfig, settings.Logger) for _, p := range cfg.Projects { p.populateIncludesAndExcludes() } return &mongodbatlasreceiver{ log: settings.Logger, cfg: cfg, client: client, mb: metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings), stopperChan: make(chan struct{}), } } func newMongoDBAtlasScraper(recv *mongodbatlasreceiver) (scraper.Metrics, error) { return scraper.NewMetrics(recv.scrape, scraper.WithShutdown(recv.shutdown)) } func (s *mongodbatlasreceiver) scrape(ctx context.Context) (pmetric.Metrics, error) { now := time.Now() if err := s.poll(ctx, s.timeConstraints(now)); err != nil { return pmetric.Metrics{}, err } s.lastRun = now return s.mb.Emit(), nil } func (s *mongodbatlasreceiver) timeConstraints(now time.Time) timeconstraints { var start time.Time if s.lastRun.IsZero() { start = now.Add(s.cfg.CollectionInterval * -1) } else { start = s.lastRun } return timeconstraints{ start.UTC().Format(time.RFC3339), now.UTC().Format(time.RFC3339), s.cfg.Granularity, } } func (s *mongodbatlasreceiver) shutdown(context.Context) error { return s.client.Shutdown() } // poll decides whether to poll all projects or a specific project based on the configuration. func (s *mongodbatlasreceiver) poll(ctx context.Context, time timeconstraints) error { if len(s.cfg.Projects) == 0 { return s.pollAllProjects(ctx, time) } return s.pollProjects(ctx, time) } // pollAllProjects handles polling across all projects within the organizations. func (s *mongodbatlasreceiver) pollAllProjects(ctx context.Context, time timeconstraints) error { orgs, err := s.client.Organizations(ctx) if err != nil { return fmt.Errorf("error retrieving organizations: %w", err) } for _, org := range orgs { proj, err := s.client.Projects(ctx, org.ID) if err != nil { s.log.Error("error retrieving projects", zap.String("orgID", org.ID), zap.Error(err)) continue } for _, project := range proj { // Since there is no specific ProjectConfig for these projects, pass nil. if err := s.processProject(ctx, time, org.Name, project, nil); err != nil { s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err)) } } } return nil } // pollProject handles polling for specific projects as configured. func (s *mongodbatlasreceiver) pollProjects(ctx context.Context, time timeconstraints) error { for _, projectCfg := range s.cfg.Projects { project, err := s.client.GetProject(ctx, projectCfg.Name) if err != nil { s.log.Error("error retrieving project", zap.String("projectName", projectCfg.Name), zap.Error(err)) continue } org, err := s.client.GetOrganization(ctx, project.OrgID) if err != nil { s.log.Error("error retrieving organization from project", zap.String("projectName", projectCfg.Name), zap.Error(err)) continue } if err := s.processProject(ctx, time, org.Name, project, projectCfg); err != nil { s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err)) } } return nil } func (s *mongodbatlasreceiver) processProject(ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, projectCfg *ProjectConfig) error { nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID) if err != nil { return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err) } processes, err := s.client.Processes(ctx, project.ID) if err != nil { return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err) } for _, process := range processes { clusterName := nodeClusterMap[process.UserAlias] providerValues := providerMap[clusterName] if !shouldProcessCluster(projectCfg, clusterName) { // Skip processing for this cluster continue } if err := s.extractProcessMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err) } if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err) } if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil { return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err) } } return nil } // shouldProcessCluster checks whether a given cluster should be processed based on the project configuration. func shouldProcessCluster(projectCfg *ProjectConfig, clusterName string) bool { if projectCfg == nil { // If there is no project config, process all clusters. return true } _, isIncluded := projectCfg.includesByClusterName[clusterName] _, isExcluded := projectCfg.excludesByClusterName[clusterName] // Return false immediately if the cluster is excluded. if isExcluded { return false } // If IncludeClusters is empty, or the cluster is explicitly included, return true. return len(projectCfg.IncludeClusters) == 0 || isIncluded } type providerValues struct { RegionName string ProviderName string } func (s *mongodbatlasreceiver) getNodeClusterNameMap( ctx context.Context, projectID string, ) (map[string]string, map[string]providerValues, error) { providerMap := make(map[string]providerValues) clusterMap := make(map[string]string) clusters, err := s.client.GetClusters(ctx, projectID) if err != nil { return nil, nil, err } for _, cluster := range clusters { // URI in the form mongodb://host1.mongodb.net:27017,host2.mongodb.net:27017,host3.mongodb.net:27017 nodes := strings.Split(strings.TrimPrefix(cluster.MongoURI, "mongodb://"), ",") for _, node := range nodes { // Remove the port from the node n, _, _ := strings.Cut(node, ":") clusterMap[n] = cluster.Name } providerMap[cluster.Name] = providerValues{ RegionName: cluster.ProviderSettings.RegionName, ProviderName: cluster.ProviderSettings.ProviderName, } } return clusterMap, providerMap, nil } func (s *mongodbatlasreceiver) extractProcessMetrics( ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, clusterName string, providerValues providerValues, ) error { if err := s.client.ProcessMetrics( ctx, s.mb, project.ID, process.Hostname, process.Port, time.start, time.end, time.resolution, ); err != nil { return fmt.Errorf("error when polling process metrics from MongoDB Atlas: %w", err) } rb := s.mb.NewResourceBuilder() rb.SetMongodbAtlasOrgName(orgName) rb.SetMongodbAtlasProjectName(project.Name) rb.SetMongodbAtlasProjectID(project.ID) rb.SetMongodbAtlasHostName(process.Hostname) rb.SetMongodbAtlasUserAlias(process.UserAlias) rb.SetMongodbAtlasClusterName(clusterName) rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port)) rb.SetMongodbAtlasProcessTypeName(process.TypeName) rb.SetMongodbAtlasProcessID(process.ID) rb.SetMongodbAtlasRegionName(providerValues.RegionName) rb.SetMongodbAtlasProviderName(providerValues.ProviderName) s.mb.EmitForResource(metadata.WithResource(rb.Emit())) return nil } func (s *mongodbatlasreceiver) extractProcessDatabaseMetrics( ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, clusterName string, providerValues providerValues, ) error { processDatabases, err := s.client.ProcessDatabases( ctx, project.ID, process.Hostname, process.Port, ) if err != nil { return fmt.Errorf("error retrieving process databases: %w", err) } for _, db := range processDatabases { if err := s.client.ProcessDatabaseMetrics( ctx, s.mb, project.ID, process.Hostname, process.Port, db.DatabaseName, time.start, time.end, time.resolution, ); err != nil { return fmt.Errorf("error when polling database metrics from MongoDB Atlas: %w", err) } rb := s.mb.NewResourceBuilder() rb.SetMongodbAtlasOrgName(orgName) rb.SetMongodbAtlasProjectName(project.Name) rb.SetMongodbAtlasProjectID(project.ID) rb.SetMongodbAtlasHostName(process.Hostname) rb.SetMongodbAtlasUserAlias(process.UserAlias) rb.SetMongodbAtlasClusterName(clusterName) rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port)) rb.SetMongodbAtlasProcessTypeName(process.TypeName) rb.SetMongodbAtlasProcessID(process.ID) rb.SetMongodbAtlasDbName(db.DatabaseName) rb.SetMongodbAtlasRegionName(providerValues.RegionName) rb.SetMongodbAtlasProviderName(providerValues.ProviderName) s.mb.EmitForResource(metadata.WithResource(rb.Emit())) } return nil } func (s *mongodbatlasreceiver) extractProcessDiskMetrics( ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, clusterName string, providerValues providerValues, ) error { for _, disk := range s.client.ProcessDisks(ctx, project.ID, process.Hostname, process.Port) { if err := s.client.ProcessDiskMetrics( ctx, s.mb, project.ID, process.Hostname, process.Port, disk.PartitionName, time.start, time.end, time.resolution, ); err != nil { return fmt.Errorf("error when polling disk metrics from MongoDB Atlas: %w", err) } rb := s.mb.NewResourceBuilder() rb.SetMongodbAtlasOrgName(orgName) rb.SetMongodbAtlasProjectName(project.Name) rb.SetMongodbAtlasProjectID(project.ID) rb.SetMongodbAtlasHostName(process.Hostname) rb.SetMongodbAtlasUserAlias(process.UserAlias) rb.SetMongodbAtlasClusterName(clusterName) rb.SetMongodbAtlasProcessPort(strconv.Itoa(process.Port)) rb.SetMongodbAtlasProcessTypeName(process.TypeName) rb.SetMongodbAtlasProcessID(process.ID) rb.SetMongodbAtlasDiskPartition(disk.PartitionName) rb.SetMongodbAtlasRegionName(providerValues.RegionName) rb.SetMongodbAtlasProviderName(providerValues.ProviderName) s.mb.EmitForResource(metadata.WithResource(rb.Emit())) } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver" // This file implements factory for SAPM receiver. import ( "context" "errors" "fmt" "net" "strconv" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver/internal/metadata" ) const ( // Default endpoint to bind to. defaultEndpoint = "localhost:7276" ) // NewFactory creates a factory for SAPM receiver. func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithTraces(createTracesReceiver, metadata.TracesStability)) } func createDefaultConfig() component.Config { return &Config{ ServerConfig: confighttp.ServerConfig{ Endpoint: defaultEndpoint, }, } } // extract the port number from string in "address:port" format. If the // port number cannot be extracted returns an error. // TODO make this a utility function func extractPortFromEndpoint(endpoint string) (int, error) { _, portStr, err := net.SplitHostPort(endpoint) if err != nil { return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err) } port, err := strconv.ParseInt(portStr, 10, 0) if err != nil { return 0, fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { return 0, errors.New("port number must be between 1 and 65535") } return int(port), nil } // verify that the configured port is not 0 func (rCfg *Config) validate() error { _, err := extractPortFromEndpoint(rCfg.Endpoint) if err != nil { return err } return nil } // CreateTraces creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, params receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { // assert config is SAPM config rCfg := cfg.(*Config) err := rCfg.validate() if err != nil { return nil, err } // Create the receiver. return newReceiver(params, rCfg, nextConsumer) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver" import ( "bytes" "compress/gzip" "context" "errors" "fmt" "io" "net/http" "sync" "github.com/gorilla/mux" splunksapm "github.com/signalfx/sapm-proto/gen" "github.com/signalfx/sapm-proto/sapmprotocol" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) var gzipWriterPool = &sync.Pool{ New: func() any { return gzip.NewWriter(io.Discard) }, } // sapmReceiver receives spans in the Splunk SAPM format over HTTP type sapmReceiver struct { settings component.TelemetrySettings config *Config server *http.Server shutdownWG sync.WaitGroup nextConsumer consumer.Traces // defaultResponse is a placeholder. For now this receiver returns an empty sapm response. // This defaultResponse is an optimization so we don't have to proto.Marshal the response // for every request. At some point this may be removed when there is actual content to return. defaultResponse []byte obsrecv *receiverhelper.ObsReport } // handleRequest parses an http request containing sapm and passes the trace data to the next consumer func (sr *sapmReceiver) handleRequest(req *http.Request) error { sapm, err := sapmprotocol.ParseTraceV2Request(req) // errors processing the request should return http.StatusBadRequest if err != nil { return err } ctx := sr.obsrecv.StartTracesOp(req.Context()) td, err := jaeger.ProtoToTraces(sapm.Batches) if err != nil { return err } // pass the trace data to the next consumer err = sr.nextConsumer.ConsumeTraces(ctx, td) if err != nil { err = fmt.Errorf("error passing trace data to next consumer: %w", err) } sr.obsrecv.EndTracesOp(ctx, "protobuf", td.SpanCount(), err) return err } // HTTPHandlerFunc returns an http.HandlerFunc that handles SAPM requests func (sr *sapmReceiver) HTTPHandlerFunc(rw http.ResponseWriter, req *http.Request) { // handle the request payload err := sr.handleRequest(req) if err != nil { errorutil.HTTPError(rw, err) return } // respBytes are bytes to write to the http.Response // build the response message // NOTE currently the response is an empty struct. As an optimization this receiver will pass a // byte array that was generated in the receiver's constructor. If this receiver needs to return // more than an empty struct, then the sapm.PostSpansResponse{} struct will need to be marshaled // and on error a http.StatusInternalServerError should be written to the http.ResponseWriter and // this function should immediately return. respBytes := sr.defaultResponse rw.Header().Set(sapmprotocol.ContentTypeHeaderName, sapmprotocol.ContentTypeHeaderValue) // write the response if client does not accept gzip encoding if req.Header.Get(sapmprotocol.AcceptEncodingHeaderName) != sapmprotocol.GZipEncodingHeaderValue { // write the response bytes _, err = rw.Write(respBytes) if err != nil { rw.WriteHeader(http.StatusBadRequest) } return } // gzip the response // get the gzip writer writer := gzipWriterPool.Get().(*gzip.Writer) defer gzipWriterPool.Put(writer) var gzipBuffer bytes.Buffer // reset the writer with the gzip buffer writer.Reset(&gzipBuffer) // gzip the responseBytes _, err = writer.Write(respBytes) if err != nil { rw.WriteHeader(http.StatusInternalServerError) return } // close the gzip writer and write gzip footer err = writer.Close() if err != nil { rw.WriteHeader(http.StatusInternalServerError) return } // write the successfully gzipped payload rw.Header().Set(sapmprotocol.ContentEncodingHeaderName, sapmprotocol.GZipEncodingHeaderValue) _, err = rw.Write(gzipBuffer.Bytes()) if err != nil { rw.WriteHeader(http.StatusBadRequest) } } // Start starts the sapmReceiver's server. func (sr *sapmReceiver) Start(ctx context.Context, host component.Host) error { // server.Handler will be nil on initial call, otherwise noop. if sr.server != nil && sr.server.Handler != nil { return nil } // set up the listener ln, err := sr.config.ServerConfig.ToListener(ctx) if err != nil { return fmt.Errorf("failed to bind to address %s: %w", sr.config.Endpoint, err) } // use gorilla mux to create a router/handler nr := mux.NewRouter() nr.HandleFunc(sapmprotocol.TraceEndpointV2, sr.HTTPHandlerFunc) // create a server with the handler sr.server, err = sr.config.ServerConfig.ToServer(ctx, host, sr.settings, nr) if err != nil { return err } sr.shutdownWG.Add(1) // run the server on a routine go func() { defer sr.shutdownWG.Done() if errHTTP := sr.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil } // Shutdown stops the the sapmReceiver's server. func (sr *sapmReceiver) Shutdown(context.Context) error { if sr.server == nil { return nil } err := sr.server.Close() sr.shutdownWG.Wait() return err } // this validates at compile time that sapmReceiver implements the receiver.Traces interface var _ receiver.Traces = (*sapmReceiver)(nil) // newReceiver creates a sapmReceiver that receives SAPM over http func newReceiver( params receiver.Settings, config *Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { // build the response message defaultResponse := &splunksapm.PostSpansResponse{} defaultResponseBytes, err := defaultResponse.Marshal() if err != nil { return nil, fmt.Errorf("failed to marshal default response body for %v receiver: %w", params.ID, err) } transport := "http" if config.TLSSetting != nil { transport = "https" } obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: params.ID, Transport: transport, ReceiverCreateSettings: params, }) if err != nil { return nil, err } return &sapmReceiver{ settings: params.TelemetrySettings, config: config, nextConsumer: nextConsumer, defaultResponse: defaultResponseBytes, obsrecv: obsrecv, }, nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" import ( "errors" "go.opentelemetry.io/collector/config/confighttp" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) var errEmptyEndpoint = errors.New("empty endpoint") // Config defines configuration for the SignalFx receiver. type Config struct { confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct // Deprecated: `access_token_passthrough` is deprecated. // Please enable include_metadata in the receiver and add the following config to the batch processor: // batch: // metadata_keys: [X-Sf-Token] splunk.AccessTokenPassthroughConfig `mapstructure:",squash"` } // Validate verifies that the endpoint is valid and the configured port is not 0 func (rCfg *Config) Validate() error { if rCfg.ServerConfig.Endpoint == "" { return errEmptyEndpoint } _, err := extractPortFromEndpoint(rCfg.ServerConfig.Endpoint) if err != nil { return err } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" import ( "context" "errors" "fmt" "net" "strconv" "sync" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata" ) // This file implements factory for SignalFx receiver. const ( // Default endpoint to bind to. defaultEndpoint = "localhost:9943" ) // NewFactory creates a factory for SignalFx receiver. func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), receiver.WithLogs(createLogsReceiver, metadata.LogsStability)) } func createDefaultConfig() component.Config { return &Config{ ServerConfig: confighttp.ServerConfig{ Endpoint: defaultEndpoint, }, } } // extract the port number from string in "address:port" format. If the // port number cannot be extracted returns an error. func extractPortFromEndpoint(endpoint string) (int, error) { _, portStr, err := net.SplitHostPort(endpoint) if err != nil { return 0, fmt.Errorf("endpoint is not formatted correctly: %w", err) } port, err := strconv.ParseInt(portStr, 10, 0) if err != nil { return 0, fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { return 0, errors.New("port number must be between 1 and 65535") } return int(port), nil } // createMetricsReceiver creates a metrics receiver based on provided config. func createMetricsReceiver( _ context.Context, params receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { rCfg := cfg.(*Config) if rCfg.AccessTokenPassthrough { params.Logger.Warn( "access_token_passthrough is deprecated. " + "Please enable include_metadata in the receiver and add " + "`metadata_keys: [X-Sf-Token]` to the batch processor", ) } receiverLock.Lock() r := receivers[rCfg] if r == nil { var err error r, err = newReceiver(params, *rCfg) if err != nil { return nil, err } receivers[rCfg] = r } receiverLock.Unlock() r.RegisterMetricsConsumer(consumer) return r, nil } // createLogsReceiver creates a logs receiver based on provided config. func createLogsReceiver( _ context.Context, params receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { rCfg := cfg.(*Config) if rCfg.AccessTokenPassthrough { params.Logger.Warn( "access_token_passthrough is deprecated. " + "Please enable include_metadata in the receiver and add " + "`metadata_keys: [X-Sf-Token]` to the batch processor", ) } receiverLock.Lock() r := receivers[rCfg] if r == nil { var err error r, err = newReceiver(params, *rCfg) if err != nil { return nil, err } receivers[rCfg] = r } receiverLock.Unlock() r.RegisterLogsConsumer(consumer) return r, nil } var ( receiverLock sync.Mutex receivers = map[*Config]*sfxReceiver{} )
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" import ( "compress/gzip" "context" "encoding/json" "errors" "fmt" "io" "net/http" "sync" "time" "unsafe" "github.com/gorilla/mux" sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/errorutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver/internal/metadata" ) const ( defaultServerTimeout = 20 * time.Second responseOK = "OK" responseInvalidMethod = "Only \"POST\" method is supported" responseEventsInvalidContentType = "\"Content-Type\" must be \"application/x-protobuf\"" responseInvalidContentType = "\"Content-Type\" must be either \"application/x-protobuf\" or \"application/x-protobuf;format=otlp\"" responseInvalidEncoding = "\"Content-Encoding\" must be \"gzip\" or empty" responseErrGzipReader = "Error on gzip body" responseErrReadBody = "Failed to read message body" responseErrUnmarshalBody = "Failed to unmarshal message body" responseErrNextConsumer = "Internal Server Error" responseErrLogsNotConfigured = "Log pipeline has not been configured to handle events" responseErrMetricsNotConfigured = "Metric pipeline has not been configured to handle datapoints" // Centralizing some HTTP and related string constants. protobufContentType = "application/x-protobuf" otlpProtobufContentType = "application/x-protobuf;format=otlp" gzipEncoding = "gzip" httpContentTypeHeader = "Content-Type" httpContentEncodingHeader = "Content-Encoding" ) var ( okRespBody = initJSONResponse(responseOK) invalidMethodRespBody = initJSONResponse(responseInvalidMethod) invalidContentRespBody = initJSONResponse(responseInvalidContentType) invalidEventsContentRespBody = initJSONResponse(responseEventsInvalidContentType) invalidEncodingRespBody = initJSONResponse(responseInvalidEncoding) errGzipReaderRespBody = initJSONResponse(responseErrGzipReader) errReadBodyRespBody = initJSONResponse(responseErrReadBody) errUnmarshalBodyRespBody = initJSONResponse(responseErrUnmarshalBody) errNextConsumerRespBody = initJSONResponse(responseErrNextConsumer) errLogsNotConfigured = initJSONResponse(responseErrLogsNotConfigured) errMetricsNotConfigured = initJSONResponse(responseErrMetricsNotConfigured) translator = &signalfx.ToTranslator{} ) // sfxReceiver implements the receiver.Metrics for SignalFx metric protocol. type sfxReceiver struct { settings receiver.Settings config *Config metricsConsumer consumer.Metrics logsConsumer consumer.Logs server *http.Server shutdownWG sync.WaitGroup obsrecv *receiverhelper.ObsReport } var _ receiver.Metrics = (*sfxReceiver)(nil) // New creates the SignalFx receiver with the given configuration. func newReceiver( settings receiver.Settings, config Config, ) (*sfxReceiver, error) { transport := "http" if config.TLSSetting != nil { transport = "https" } obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: settings.ID, Transport: transport, ReceiverCreateSettings: settings, }) if err != nil { return nil, err } r := &sfxReceiver{ settings: settings, config: &config, obsrecv: obsrecv, } return r, nil } func (r *sfxReceiver) RegisterMetricsConsumer(mc consumer.Metrics) { r.metricsConsumer = mc } func (r *sfxReceiver) RegisterLogsConsumer(lc consumer.Logs) { r.logsConsumer = lc } // Start tells the receiver to start its processing. // By convention the consumer of the received data is set when the receiver // instance is created. func (r *sfxReceiver) Start(ctx context.Context, host component.Host) error { if r.server != nil { return nil } // set up the listener ln, err := r.config.ServerConfig.ToListener(ctx) if err != nil { return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err) } mx := mux.NewRouter() mx.HandleFunc("/v2/datapoint", r.handleDatapointReq) mx.HandleFunc("/v2/event", r.handleEventReq) r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx) if err != nil { return err } // TODO: Evaluate what properties should be configurable, for now // set some hard-coded values. r.server.ReadHeaderTimeout = defaultServerTimeout r.server.WriteTimeout = defaultServerTimeout r.shutdownWG.Add(1) go func() { defer r.shutdownWG.Done() if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil } // Shutdown tells the receiver that should stop reception, // giving it a chance to perform any necessary clean-up. func (r *sfxReceiver) Shutdown(context.Context) error { if r.server == nil { return nil } err := r.server.Close() r.shutdownWG.Wait() return err } func (r *sfxReceiver) readBody(ctx context.Context, resp http.ResponseWriter, req *http.Request) ([]byte, bool) { encoding := req.Header.Get(httpContentEncodingHeader) if encoding != "" && encoding != gzipEncoding { r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, nil) return nil, false } bodyReader := req.Body if encoding == gzipEncoding { var err error bodyReader, err = gzip.NewReader(bodyReader) if err != nil { r.failRequest(ctx, resp, http.StatusBadRequest, errGzipReaderRespBody, err) return nil, false } } body, err := io.ReadAll(bodyReader) if err != nil { r.failRequest(ctx, resp, http.StatusBadRequest, errReadBodyRespBody, err) return nil, false } return body, true } func (r *sfxReceiver) writeResponse(ctx context.Context, resp http.ResponseWriter, err error) { if err != nil { r.failRequest(ctx, resp, errorutil.GetHTTPStatusCodeFromError(err), errNextConsumerRespBody, err) return } resp.WriteHeader(http.StatusOK) _, err = resp.Write(okRespBody) if err != nil { r.failRequest(ctx, resp, http.StatusInternalServerError, errNextConsumerRespBody, err) } } func (r *sfxReceiver) handleDatapointReq(resp http.ResponseWriter, req *http.Request) { ctx := r.obsrecv.StartMetricsOp(req.Context()) if r.metricsConsumer == nil { r.failRequest(ctx, resp, http.StatusBadRequest, errMetricsNotConfigured, nil) return } if req.Method != http.MethodPost { r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil) return } otlpFormat := false switch req.Header.Get(httpContentTypeHeader) { case protobufContentType: case otlpProtobufContentType: otlpFormat = true default: r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidContentRespBody, nil) return } body, ok := r.readBody(ctx, resp, req) if !ok { return } r.settings.Logger.Debug("Handling metrics data") var md pmetric.Metrics if otlpFormat { r.settings.Logger.Debug("Received request is in OTLP format") otlpreq := pmetricotlp.NewExportRequest() if err := otlpreq.UnmarshalProto(body); err != nil { r.settings.Logger.Debug("OTLP data unmarshalling failed", zap.Error(err)) r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) return } md = otlpreq.Metrics() } else { msg := &sfxpb.DataPointUploadMessage{} err := msg.Unmarshal(body) if err != nil { r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) return } md, err = translator.ToMetrics(msg.Datapoints) if err != nil { r.settings.Logger.Debug("SignalFx conversion error", zap.Error(err)) } } dataPointCount := md.DataPointCount() if dataPointCount == 0 { r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil) _, _ = resp.Write(okRespBody) return } r.addAccessTokenLabel(md, req) err := r.metricsConsumer.ConsumeMetrics(ctx, md) r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), dataPointCount, err) r.writeResponse(ctx, resp, err) } func (r *sfxReceiver) handleEventReq(resp http.ResponseWriter, req *http.Request) { ctx := r.obsrecv.StartMetricsOp(req.Context()) if r.logsConsumer == nil { r.failRequest(ctx, resp, http.StatusBadRequest, errLogsNotConfigured, nil) return } if req.Method != http.MethodPost { r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil) return } if req.Header.Get(httpContentTypeHeader) != protobufContentType { r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEventsContentRespBody, nil) return } body, ok := r.readBody(ctx, resp, req) if !ok { return } msg := &sfxpb.EventUploadMessage{} if err := msg.Unmarshal(body); err != nil { r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) return } if len(msg.Events) == 0 { r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, nil) _, _ = resp.Write(okRespBody) return } ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() signalFxV2EventsToLogRecords(msg.Events, sl.LogRecords()) if r.config.AccessTokenPassthrough { if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" { rl.Resource().Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken) } } err := r.logsConsumer.ConsumeLogs(ctx, ld) r.obsrecv.EndMetricsOp( ctx, metadata.Type.String(), len(msg.Events), err) r.writeResponse(ctx, resp, err) } func (r *sfxReceiver) failRequest( ctx context.Context, resp http.ResponseWriter, httpStatusCode int, jsonResponse []byte, err error, ) { resp.WriteHeader(httpStatusCode) if len(jsonResponse) > 0 { // The response needs to be written as a JSON string. _, writeErr := resp.Write(jsonResponse) if writeErr != nil { r.settings.Logger.Warn( "Error writing HTTP response message", zap.Error(writeErr), zap.String("receiver", r.settings.ID.String())) } } // Use the same pattern as strings.Builder String(). msg := *(*string)(unsafe.Pointer(&jsonResponse)) r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, err) r.settings.Logger.Debug( "SignalFx receiver request failed", zap.Int("http_status_code", httpStatusCode), zap.String("msg", msg), zap.Error(err), // It handles nil error ) } func (r *sfxReceiver) addAccessTokenLabel(md pmetric.Metrics, req *http.Request) { if r.config.AccessTokenPassthrough { if accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != "" { for i := 0; i < md.ResourceMetrics().Len(); i++ { rm := md.ResourceMetrics().At(i) res := rm.Resource() res.Attributes().PutStr(splunk.SFxAccessTokenLabel, accessToken) } } } } func initJSONResponse(s string) []byte { respBody, err := json.Marshal(s) if err != nil { // This is to be used in initialization so panic here is fine. panic(err) } return respBody }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) // signalFxV2ToMetricsData converts SignalFx event proto data points to // plog.LogRecordSlice. Returning the converted data and the number of dropped log // records. func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs plog.LogRecordSlice) { lrs.EnsureCapacity(len(events)) for _, event := range events { lr := lrs.AppendEmpty() attrs := lr.Attributes() attrs.EnsureCapacity(2 + len(event.Dimensions) + len(event.Properties)) for _, dim := range event.Dimensions { attrs.PutStr(dim.Key, dim.Value) } // The EventType field is stored as an attribute. eventType := event.EventType if eventType == "" { eventType = "unknown" } attrs.PutStr(splunk.SFxEventType, eventType) // SignalFx timestamps are in millis so convert to nanos by multiplying // by 1 million. lr.SetTimestamp(pcommon.Timestamp(event.Timestamp * 1e6)) if event.Category != nil { attrs.PutInt(splunk.SFxEventCategoryKey, int64(*event.Category)) } else { // This gives us an unambiguous way of determining that a log record // represents a SignalFx event, even if category is missing from the // event. attrs.PutEmpty(splunk.SFxEventCategoryKey) } if len(event.Properties) > 0 { propMap := attrs.PutEmptyMap(splunk.SFxEventPropertiesKey) propMap.EnsureCapacity(len(event.Properties)) for _, prop := range event.Properties { // No way to tell what value type is without testing each // individually. switch { case prop.Value.StrValue != nil: propMap.PutStr(prop.Key, prop.Value.GetStrValue()) case prop.Value.IntValue != nil: propMap.PutInt(prop.Key, prop.Value.GetIntValue()) case prop.Value.DoubleValue != nil: propMap.PutDouble(prop.Key, prop.Value.GetDoubleValue()) case prop.Value.BoolValue != nil: propMap.PutBool(prop.Key, prop.Value.GetBoolValue()) default: // If there is no property value, just insert a null to // record that the key was present. propMap.PutEmpty(prop.Key) } } } } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" conventions "go.opentelemetry.io/collector/semconv/v1.27.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata" ) // This file implements factory for Splunk HEC receiver. const ( // Default endpoint to bind to. defaultEndpoint = "localhost:8088" ) // NewFactory creates a factory for Splunk HEC receiver. func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), receiver.WithLogs(createLogsReceiver, metadata.LogsStability)) } // CreateDefaultConfig creates the default configuration for Splunk HEC receiver. func createDefaultConfig() component.Config { return &Config{ ServerConfig: confighttp.ServerConfig{ Endpoint: defaultEndpoint, }, AccessTokenPassthroughConfig: splunk.AccessTokenPassthroughConfig{}, HecToOtelAttrs: splunk.HecToOtelAttrs{ Source: splunk.DefaultSourceLabel, SourceType: splunk.DefaultSourceTypeLabel, Index: splunk.DefaultIndexLabel, Host: conventions.AttributeHostName, }, RawPath: splunk.DefaultRawPath, HealthPath: splunk.DefaultHealthPath, Ack: Ack{ Extension: nil, Path: splunk.DefaultAckPath, }, Splitting: SplittingStrategyLine, } } // CreateMetrics creates a metrics receiver based on provided config. func createMetricsReceiver( _ context.Context, params receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { var err error var recv receiver.Metrics rCfg := cfg.(*Config) r := receivers.GetOrAdd(cfg, func() component.Component { recv, err = newReceiver(params, *rCfg) return recv }) if err != nil { return nil, err } r.Unwrap().(*splunkReceiver).metricsConsumer = consumer return r, nil } // createLogsReceiver creates a logs receiver based on provided config. func createLogsReceiver( _ context.Context, params receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { var err error var recv receiver.Logs rCfg := cfg.(*Config) r := receivers.GetOrAdd(cfg, func() component.Component { recv, err = newReceiver(params, *rCfg) return recv }) if err != nil { return nil, err } r.Unwrap().(*splunkReceiver).logsConsumer = consumer return r, nil } var receivers = sharedcomponent.NewSharedComponents()
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" import ( "compress/gzip" "context" "encoding/json" "errors" "fmt" "io" "net/http" "strconv" "strings" "sync" "time" "github.com/google/uuid" "github.com/gorilla/mux" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver/internal/metadata" ) const ( defaultServerTimeout = 20 * time.Second ackResponse = `{"acks": %s}` responseOK = `{"text": "Success", "code": 0}` responseOKWithAckID = `{"text": "Success", "code": 0, "ackId": %d}` responseHecHealthy = `{"text": "HEC is healthy", "code": 17}` responseInvalidMethodPostOnly = `"Only \"POST\" method is supported"` responseInvalidEncoding = `"\"Content-Encoding\" must be \"gzip\" or empty"` responseInvalidDataFormat = `{"text":"Invalid data format","code":6}` responseErrEventRequired = `{"text":"Event field is required","code":12}` responseErrEventBlank = `{"text":"Event field cannot be blank","code":13}` responseErrGzipReader = `"Error on gzip body"` responseErrUnmarshalBody = `"Failed to unmarshal message body"` responseErrInternalServerError = `"Internal Server Error"` responseErrUnsupportedMetricEvent = `"Unsupported metric event"` responseErrUnsupportedLogEvent = `"Unsupported log event"` responseErrHandlingIndexedFields = `{"text":"Error in handling indexed fields","code":15,"invalid-event-number":%d}` responseErrDataChannelMissing = `{"text": "Data channel is missing","code":10}` responseErrInvalidDataChannel = `{"text": "Invalid data channel", "code": 11}` responseNoData = `{"text":"No data","code":5}` // Centralizing some HTTP and related string constants. gzipEncoding = "gzip" httpContentEncodingHeader = "Content-Encoding" httpContentTypeHeader = "Content-Type" httpJSONTypeHeader = "application/json" ) var ( errEmptyEndpoint = errors.New("empty endpoint") errInvalidMethod = errors.New("invalid http method") errInvalidEncoding = errors.New("invalid encoding") errExtensionMissing = errors.New("ack extension not found") okRespBody = []byte(responseOK) eventRequiredRespBody = []byte(responseErrEventRequired) eventBlankRespBody = []byte(responseErrEventBlank) requiredDataChannelHeader = []byte(responseErrDataChannelMissing) invalidEncodingRespBody = []byte(responseInvalidEncoding) invalidFormatRespBody = []byte(responseInvalidDataFormat) invalidMethodRespBodyPostOnly = []byte(responseInvalidMethodPostOnly) errGzipReaderRespBody = []byte(responseErrGzipReader) errUnmarshalBodyRespBody = []byte(responseErrUnmarshalBody) errInternalServerError = []byte(responseErrInternalServerError) errUnsupportedMetricEvent = []byte(responseErrUnsupportedMetricEvent) errUnsupportedLogEvent = []byte(responseErrUnsupportedLogEvent) noDataRespBody = []byte(responseNoData) ) // splunkReceiver implements the receiver.Metrics for Splunk HEC metric protocol. type splunkReceiver struct { settings receiver.Settings config *Config logsConsumer consumer.Logs metricsConsumer consumer.Metrics server *http.Server shutdownWG sync.WaitGroup obsrecv *receiverhelper.ObsReport gzipReaderPool *sync.Pool ackExt ackextension.AckExtension } var ( _ receiver.Metrics = (*splunkReceiver)(nil) _ receiver.Logs = (*splunkReceiver)(nil) ) // newReceiver creates the Splunk HEC receiver with the given configuration. func newReceiver(settings receiver.Settings, config Config) (*splunkReceiver, error) { if config.Endpoint == "" { return nil, errEmptyEndpoint } transport := "http" if config.TLSSetting != nil { transport = "https" } obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: settings.ID, Transport: transport, ReceiverCreateSettings: settings, }) if err != nil { return nil, err } r := &splunkReceiver{ settings: settings, config: &config, server: &http.Server{ Addr: config.Endpoint, // TODO: Evaluate what properties should be configurable, for now // set some hard-coded values. ReadHeaderTimeout: defaultServerTimeout, WriteTimeout: defaultServerTimeout, }, obsrecv: obsrecv, gzipReaderPool: &sync.Pool{New: func() any { return new(gzip.Reader) }}, } return r, nil } // Start tells the receiver to start its processing. // By convention the consumer of the received data is set when the receiver // instance is created. func (r *splunkReceiver) Start(ctx context.Context, host component.Host) error { // server.Handler will be nil on initial call, otherwise noop. if r.server != nil && r.server.Handler != nil { return nil } mx := mux.NewRouter() // set up the ack API handler if the ack extension is present if r.config.Ack.Extension != nil { if ext, found := host.GetExtensions()[*r.config.Ack.Extension]; found { r.ackExt = ext.(ackextension.AckExtension) mx.NewRoute().Path(r.config.Ack.Path).HandlerFunc(r.handleAck) } else { return fmt.Errorf("specified ack extension with id %q could not be found", *r.config.Ack.Extension) } } mx.NewRoute().Path(r.config.HealthPath).HandlerFunc(r.handleHealthReq) mx.NewRoute().Path(r.config.HealthPath + "/1.0").HandlerFunc(r.handleHealthReq).Methods(http.MethodGet) if r.logsConsumer != nil { mx.NewRoute().Path(r.config.RawPath).HandlerFunc(r.handleRawReq) } mx.NewRoute().HandlerFunc(r.handleReq) // set up the listener ln, err := r.config.ServerConfig.ToListener(ctx) if err != nil { return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err) } r.server, err = r.config.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, mx) if err != nil { return err } // TODO: Evaluate what properties should be configurable, for now // set some hard-coded values. r.server.ReadHeaderTimeout = defaultServerTimeout r.server.WriteTimeout = defaultServerTimeout r.shutdownWG.Add(1) go func() { defer r.shutdownWG.Done() if errHTTP := r.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return err } // Shutdown tells the receiver that should stop reception, // giving it a chance to perform any necessary clean-up. func (r *splunkReceiver) Shutdown(context.Context) error { err := r.server.Close() r.shutdownWG.Wait() return err } func (r *splunkReceiver) processSuccessResponseWithAck(resp http.ResponseWriter, channelID string) error { if r.ackExt == nil { panic("writing response with ack when ack extension is not configured") } ackID := r.ackExt.ProcessEvent(channelID) r.ackExt.Ack(channelID, ackID) return r.processSuccessResponse(resp, []byte(fmt.Sprintf(responseOKWithAckID, ackID))) } func (r *splunkReceiver) processSuccessResponse(resp http.ResponseWriter, bodyContent []byte) error { resp.Header().Set(httpContentTypeHeader, httpJSONTypeHeader) resp.WriteHeader(http.StatusOK) _, err := resp.Write(bodyContent) return err } func (r *splunkReceiver) handleAck(resp http.ResponseWriter, req *http.Request) { if req.Method != http.MethodPost { r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod) return } // shouldn't run into this case since we only enable this handler IF ackExt exists. But we have this check just in case if r.ackExt == nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, errExtensionMissing) return } var channelID string var extracted bool if channelID, extracted = r.extractChannel(req); extracted { if channelErr := r.validateChannelHeader(channelID); channelErr != nil { r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr) return } } else { r.failRequest(resp, http.StatusBadRequest, requiredDataChannelHeader, nil) return } dec := json.NewDecoder(req.Body) var ackRequest splunk.AckRequest err := dec.Decode(&ackRequest) if err != nil { r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err) return } if len(ackRequest.Acks) == 0 { r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, errors.New("request body must include at least one ackID to be queried")) return } queriedAcks := r.ackExt.QueryAcks(channelID, ackRequest.Acks) ackString, _ := json.Marshal(queriedAcks) if err := r.processSuccessResponse(resp, []byte(fmt.Sprintf(ackResponse, ackString))); err != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err) } } func (r *splunkReceiver) handleRawReq(resp http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx = r.obsrecv.StartLogsOp(ctx) if req.Method != http.MethodPost { r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod) return } encoding := req.Header.Get(httpContentEncodingHeader) if encoding != "" && encoding != gzipEncoding { r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding) return } var channelID string var extracted bool if channelID, extracted = r.extractChannel(req); extracted { if channelErr := r.validateChannelHeader(channelID); channelErr != nil { r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr) return } } if req.ContentLength == 0 { r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil) r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil) return } bodyReader := req.Body if encoding == gzipEncoding { reader := r.gzipReaderPool.Get().(*gzip.Reader) err := reader.Reset(bodyReader) if err != nil { r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err) _, _ = io.ReadAll(req.Body) _ = req.Body.Close() return } bodyReader = reader defer r.gzipReaderPool.Put(reader) } resourceCustomizer := r.createResourceCustomizer(req) query := req.URL.Query() var timestamp pcommon.Timestamp if query.Has(queryTime) { t, err := strconv.ParseInt(query.Get(queryTime), 10, 64) if t < 0 { err = errors.New("time cannot be less than 0") } if err != nil { r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err) return } timestamp = pcommon.NewTimestampFromTime(time.Unix(t, 0)) } ld, slLen, err := splunkHecRawToLogData(bodyReader, query, resourceCustomizer, r.config, timestamp) if err != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err) return } consumerErr := r.logsConsumer.ConsumeLogs(ctx, ld) _ = bodyReader.Close() if consumerErr != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, consumerErr) } else { var ackErr error if len(channelID) > 0 && r.ackExt != nil { ackErr = r.processSuccessResponseWithAck(resp, channelID) } else { ackErr = r.processSuccessResponse(resp, okRespBody) } if ackErr != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, err) } else { r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), slLen, nil) } } } func (r *splunkReceiver) extractChannel(req *http.Request) (string, bool) { // check header for k, v := range req.Header { if strings.EqualFold(k, splunk.HTTPSplunkChannelHeader) { return strings.ToUpper(v[0]), true } } // check query param for k, v := range req.URL.Query() { if strings.EqualFold(k, "channel") { return strings.ToUpper(v[0]), true } } return "", false } func (r *splunkReceiver) validateChannelHeader(channelID string) error { if len(channelID) == 0 { return errors.New(responseErrDataChannelMissing) } // channel id must be a valid uuid // https://docs.splunk.com/Documentation/Splunk/9.2.1/Data/AboutHECIDXAck#:~:text=close%20the%20file.-,About%20channels%20and%20sending%20data,-Sending%20events%20to _, err := uuid.Parse(channelID) if err != nil { return errors.New(responseErrInvalidDataChannel) } return nil } func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) { ctx := req.Context() if req.Method != http.MethodPost { r.failRequest(resp, http.StatusBadRequest, invalidMethodRespBodyPostOnly, errInvalidMethod) return } encoding := req.Header.Get(httpContentEncodingHeader) if encoding != "" && encoding != gzipEncoding { r.failRequest(resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, errInvalidEncoding) return } channelID, extracted := r.extractChannel(req) if extracted { if channelErr := r.validateChannelHeader(channelID); channelErr != nil { r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr) return } } bodyReader := req.Body if encoding == gzipEncoding { reader := r.gzipReaderPool.Get().(*gzip.Reader) err := reader.Reset(bodyReader) if err != nil { r.failRequest(resp, http.StatusBadRequest, errGzipReaderRespBody, err) return } bodyReader = reader defer r.gzipReaderPool.Put(reader) } if req.ContentLength == 0 { r.failRequest(resp, http.StatusBadRequest, noDataRespBody, nil) return } dec := jsoniter.NewDecoder(bodyReader) var events []*splunk.Event var metricEvents []*splunk.Event for dec.More() { var msg splunk.Event err := dec.Decode(&msg) if err != nil { r.failRequest(resp, http.StatusBadRequest, invalidFormatRespBody, err) return } if msg.Event == nil { r.failRequest(resp, http.StatusBadRequest, eventRequiredRespBody, nil) return } if msg.Event == "" { r.failRequest(resp, http.StatusBadRequest, eventBlankRespBody, nil) return } for _, v := range msg.Fields { if !isFlatJSONField(v) { r.failRequest(resp, http.StatusBadRequest, []byte(fmt.Sprintf(responseErrHandlingIndexedFields, len(events)+len(metricEvents))), nil) return } } if msg.IsMetric() { if r.metricsConsumer == nil { r.failRequest(resp, http.StatusBadRequest, errUnsupportedMetricEvent, err) return } metricEvents = append(metricEvents, &msg) } else { if r.logsConsumer == nil { r.failRequest(resp, http.StatusBadRequest, errUnsupportedLogEvent, err) return } events = append(events, &msg) } } resourceCustomizer := r.createResourceCustomizer(req) if r.logsConsumer != nil && len(events) > 0 { ld, err := splunkHecToLogData(r.settings.Logger, events, resourceCustomizer, r.config) if err != nil { r.failRequest(resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) return } ctx = r.obsrecv.StartLogsOp(ctx) decodeErr := r.logsConsumer.ConsumeLogs(ctx, ld) r.obsrecv.EndLogsOp(ctx, metadata.Type.String(), len(events), nil) if decodeErr != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr) return } } if r.metricsConsumer != nil && len(metricEvents) > 0 { md, _ := splunkHecToMetricsData(r.settings.Logger, metricEvents, resourceCustomizer, r.config) ctx = r.obsrecv.StartMetricsOp(ctx) decodeErr := r.metricsConsumer.ConsumeMetrics(ctx, md) r.obsrecv.EndMetricsOp(ctx, metadata.Type.String(), len(metricEvents), nil) if decodeErr != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, decodeErr) return } } var ackErr error if len(channelID) > 0 && r.ackExt != nil { ackErr = r.processSuccessResponseWithAck(resp, channelID) } else { ackErr = r.processSuccessResponse(resp, okRespBody) } if ackErr != nil { r.failRequest(resp, http.StatusInternalServerError, errInternalServerError, ackErr) } } func (r *splunkReceiver) createResourceCustomizer(req *http.Request) func(resource pcommon.Resource) { if r.config.AccessTokenPassthrough { accessToken := req.Header.Get("Authorization") if strings.HasPrefix(accessToken, splunk.HECTokenHeader+" ") { accessTokenValue := accessToken[len(splunk.HECTokenHeader)+1:] return func(resource pcommon.Resource) { resource.Attributes().PutStr(splunk.HecTokenLabel, accessTokenValue) } } } return nil } func (r *splunkReceiver) failRequest( resp http.ResponseWriter, httpStatusCode int, jsonResponse []byte, err error, ) { resp.WriteHeader(httpStatusCode) if len(jsonResponse) > 0 { // The response needs to be written as a JSON string. resp.Header().Add("Content-Type", "application/json") _, writeErr := resp.Write(jsonResponse) if writeErr != nil { r.settings.Logger.Warn("Error writing HTTP response message", zap.Error(writeErr)) } } if r.settings.Logger.Core().Enabled(zap.DebugLevel) { msg := string(jsonResponse) r.settings.Logger.Debug( "Splunk HEC receiver request failed", zap.Int("http_status_code", httpStatusCode), zap.String("msg", msg), zap.Error(err), // It handles nil error ) } } func (r *splunkReceiver) handleHealthReq(writer http.ResponseWriter, _ *http.Request) { writer.Header().Add("Content-Type", "application/json") writer.WriteHeader(http.StatusOK) _, _ = writer.Write([]byte(responseHecHealthy)) } func isFlatJSONField(field any) bool { switch value := field.(type) { case map[string]any: return false case []any: for _, v := range value { switch v.(type) { case map[string]any, []any: return false } } } return true }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" import ( "bufio" "errors" "io" "net/url" "sort" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) const ( // splunk metadata index = "index" source = "source" sourcetype = "sourcetype" host = "host" queryTime = "time" ) var errCannotConvertValue = errors.New("cannot convert field value to attribute") // splunkHecToLogData transforms splunk events into logs func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (plog.Logs, error) { ld := plog.NewLogs() scopeLogsMap := make(map[[4]string]plog.ScopeLogs) for _, event := range events { key := [4]string{event.Host, event.Source, event.SourceType, event.Index} var sl plog.ScopeLogs var found bool if sl, found = scopeLogsMap[key]; !found { rl := ld.ResourceLogs().AppendEmpty() sl = rl.ScopeLogs().AppendEmpty() scopeLogsMap[key] = sl appendSplunkMetadata(rl, config.HecToOtelAttrs, event.Host, event.Source, event.SourceType, event.Index) if resourceCustomizer != nil { resourceCustomizer(rl.Resource()) } } // The SourceType field is the most logical "name" of the event. logRecord := sl.LogRecords().AppendEmpty() if err := convertToValue(logger, event.Event, logRecord.Body()); err != nil { return ld, err } // Splunk timestamps are in seconds so convert to nanos by multiplying // by 1 billion. logRecord.SetTimestamp(pcommon.Timestamp(event.Time * 1e9)) // Set event fields first, so the specialized attributes overwrite them if needed. keys := make([]string, 0, len(event.Fields)) for k := range event.Fields { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { val := event.Fields[key] err := convertToValue(logger, val, logRecord.Attributes().PutEmpty(key)) if err != nil { return ld, err } } } return ld, nil } // splunkHecRawToLogData transforms raw splunk event into log func splunkHecRawToLogData(bodyReader io.Reader, query url.Values, resourceCustomizer func(pcommon.Resource), config *Config, timestamp pcommon.Timestamp) (plog.Logs, int, error) { ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() appendSplunkMetadata(rl, config.HecToOtelAttrs, query.Get(host), query.Get(source), query.Get(sourcetype), query.Get(index)) if resourceCustomizer != nil { resourceCustomizer(rl.Resource()) } sl := rl.ScopeLogs().AppendEmpty() if config.Splitting == SplittingStrategyNone { b, err := io.ReadAll(bodyReader) if err != nil { return ld, 0, err } logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStr(string(b)) logRecord.SetTimestamp(timestamp) } else { sc := bufio.NewScanner(bodyReader) for sc.Scan() { logRecord := sl.LogRecords().AppendEmpty() logLine := sc.Text() logRecord.Body().SetStr(logLine) logRecord.SetTimestamp(timestamp) } } return ld, sl.LogRecords().Len(), nil } func appendSplunkMetadata(rl plog.ResourceLogs, attrs splunk.HecToOtelAttrs, host, source, sourceType, index string) { if host != "" { rl.Resource().Attributes().PutStr(attrs.Host, host) } if source != "" { rl.Resource().Attributes().PutStr(attrs.Source, source) } if sourceType != "" { rl.Resource().Attributes().PutStr(attrs.SourceType, sourceType) } if index != "" { rl.Resource().Attributes().PutStr(attrs.Index, index) } } func convertToValue(logger *zap.Logger, src any, dest pcommon.Value) error { switch value := src.(type) { case nil: case string: dest.SetStr(value) case int64: dest.SetInt(value) case float64: dest.SetDouble(value) case bool: dest.SetBool(value) case map[string]any: return convertToAttributeMap(logger, value, dest) case []any: return convertToSliceVal(logger, value, dest) default: logger.Debug("Unsupported value conversion", zap.Any("value", src)) return errCannotConvertValue } return nil } func convertToSliceVal(logger *zap.Logger, value []any, dest pcommon.Value) error { arr := dest.SetEmptySlice() for _, elt := range value { err := convertToValue(logger, elt, arr.AppendEmpty()) if err != nil { return err } } return nil } func convertToAttributeMap(logger *zap.Logger, value map[string]any, dest pcommon.Value) error { attrMap := dest.SetEmptyMap() keys := make([]string, 0, len(value)) for k := range value { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { v := value[k] if err := convertToValue(logger, v, attrMap.PutEmpty(k)); err != nil { return err } } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package splunkhecreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" import ( "fmt" "strconv" "strings" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) // splunkHecToMetricsData converts Splunk HEC metric points to // pmetric.Metrics. Returning the converted data and the number of // dropped time series. func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (pmetric.Metrics, int) { numDroppedTimeSeries := 0 md := pmetric.NewMetrics() scopeMetricsMap := make(map[[4]string]pmetric.ScopeMetrics) for _, event := range events { values := event.GetMetricValues() labels := buildAttributes(event.Fields) metrics := pmetric.NewMetricSlice() for metricName, metricValue := range values { pointTimestamp := convertTimestamp(event.Time) metric := pmetric.NewMetric() metric.SetName(metricName) switch v := metricValue.(type) { case int64: addIntGauge(metrics, metricName, v, pointTimestamp, labels) case *int64: addIntGauge(metrics, metricName, *v, pointTimestamp, labels) case float64: addDoubleGauge(metrics, metricName, v, pointTimestamp, labels) case *float64: addDoubleGauge(metrics, metricName, *v, pointTimestamp, labels) case string: convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, v, labels) case *string: convertString(logger, &numDroppedTimeSeries, metrics, metricName, pointTimestamp, *v, labels) default: // drop this point as we do not know how to extract a value from it numDroppedTimeSeries++ logger.Debug("Cannot convert metric, unknown input type", zap.String("metric", metricName)) } } if metrics.Len() == 0 { continue } key := [4]string{event.Host, event.Source, event.SourceType, event.Index} var sm pmetric.ScopeMetrics var found bool if sm, found = scopeMetricsMap[key]; !found { resourceMetrics := md.ResourceMetrics().AppendEmpty() sm = resourceMetrics.ScopeMetrics().AppendEmpty() scopeMetricsMap[key] = sm attrs := resourceMetrics.Resource().Attributes() if event.Host != "" { attrs.PutStr(config.HecToOtelAttrs.Host, event.Host) } if event.Source != "" { attrs.PutStr(config.HecToOtelAttrs.Source, event.Source) } if event.SourceType != "" { attrs.PutStr(config.HecToOtelAttrs.SourceType, event.SourceType) } if event.Index != "" { attrs.PutStr(config.HecToOtelAttrs.Index, event.Index) } if resourceCustomizer != nil { resourceCustomizer(resourceMetrics.Resource()) } } metrics.MoveAndAppendTo(sm.Metrics()) } return md, numDroppedTimeSeries } func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pmetric.MetricSlice, metricName string, pointTimestamp pcommon.Timestamp, s string, attributes pcommon.Map) { // best effort, cast to string and turn into a number dbl, err := strconv.ParseFloat(s, 64) if err != nil { *numDroppedTimeSeries++ logger.Debug("Cannot convert metric value from string to number", zap.String("metric", metricName)) } else { addDoubleGauge(metrics, metricName, dbl, pointTimestamp, attributes) } } func addIntGauge(metrics pmetric.MetricSlice, metricName string, value int64, ts pcommon.Timestamp, attributes pcommon.Map) { metric := metrics.AppendEmpty() metric.SetName(metricName) intPt := metric.SetEmptyGauge().DataPoints().AppendEmpty() intPt.SetTimestamp(ts) intPt.SetIntValue(value) attributes.CopyTo(intPt.Attributes()) } func addDoubleGauge(metrics pmetric.MetricSlice, metricName string, value float64, ts pcommon.Timestamp, attributes pcommon.Map) { metric := metrics.AppendEmpty() metric.SetName(metricName) doublePt := metric.SetEmptyGauge().DataPoints().AppendEmpty() doublePt.SetTimestamp(ts) doublePt.SetDoubleValue(value) attributes.CopyTo(doublePt.Attributes()) } func convertTimestamp(sec float64) pcommon.Timestamp { return pcommon.Timestamp(sec * 1e9) } // Extract dimensions from the Splunk event fields to populate metric data point attributes. func buildAttributes(dimensions map[string]any) pcommon.Map { attributes := pcommon.NewMap() attributes.EnsureCapacity(len(dimensions)) for key, val := range dimensions { if strings.HasPrefix(key, "metric_name") || key == "_value" { continue } if key == "" || val == nil { // TODO: Log or metric for this odd ball? continue } attributes.PutStr(key, fmt.Sprintf("%v", val)) } return attributes }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver" import ( "errors" "time" "go.opentelemetry.io/collector/config/confighttp" "go.uber.org/multierr" ) var ( errMissingEndpointFromConfig = errors.New("missing receiver server endpoint from config") errReadTimeoutExceedsMaxValue = errors.New("the duration specified for read_timeout exceeds the maximum allowed value of 10s") errWriteTimeoutExceedsMaxValue = errors.New("the duration specified for write_timeout exceeds the maximum allowed value of 10s") errRequiredHeader = errors.New("both key and value are required to assign a required_header") ) // Config defines configuration for the Generic Webhook receiver. type Config struct { confighttp.ServerConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct ReadTimeout string `mapstructure:"read_timeout"` // wait time for reading request headers in ms. Default is 500ms. WriteTimeout string `mapstructure:"write_timeout"` // wait time for writing request response in ms. Default is 500ms. Path string `mapstructure:"path"` // path for data collection. Default is /events HealthPath string `mapstructure:"health_path"` // path for health check api. Default is /health_check RequiredHeader RequiredHeader `mapstructure:"required_header"` // optional setting to set a required header for all requests to have } type RequiredHeader struct { Key string `mapstructure:"key"` Value string `mapstructure:"value"` } func (cfg *Config) Validate() error { var errs error maxReadWriteTimeout, _ := time.ParseDuration("10s") if cfg.ServerConfig.Endpoint == "" { errs = multierr.Append(errs, errMissingEndpointFromConfig) } // If a user defines a custom read/write timeout there is a maximum value // of 10s imposed here. if cfg.ReadTimeout != "" { readTimeout, err := time.ParseDuration(cfg.ReadTimeout) if err != nil { errs = multierr.Append(errs, err) } if readTimeout > maxReadWriteTimeout { errs = multierr.Append(errs, errReadTimeoutExceedsMaxValue) } } if cfg.WriteTimeout != "" { writeTimeout, err := time.ParseDuration(cfg.WriteTimeout) if err != nil { errs = multierr.Append(errs, err) } if writeTimeout > maxReadWriteTimeout { errs = multierr.Append(errs, errWriteTimeoutExceedsMaxValue) } } if (cfg.RequiredHeader.Key != "" && cfg.RequiredHeader.Value == "") || (cfg.RequiredHeader.Value != "" && cfg.RequiredHeader.Key == "") { errs = multierr.Append(errs, errRequiredHeader) } return errs }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata" ) var scopeLogName = "otlp/" + metadata.Type.String() const ( // might add this later, for now I wish to require a valid // endpoint to be declared by the user. // Default endpoints to bind to. // defaultEndpoint = "localhost:8080" defaultReadTimeout = "500ms" defaultWriteTimeout = "500ms" defaultPath = "/events" defaultHealthPath = "/health_check" ) // NewFactory creates a factory for Generic Webhook Receiver. func NewFactory() receiver.Factory { return receiver.NewFactory( metadata.Type, createDefaultConfig, receiver.WithLogs(createLogsReceiver, metadata.LogsStability), ) } // Default configuration for the generic webhook receiver func createDefaultConfig() component.Config { return &Config{ Path: defaultPath, HealthPath: defaultHealthPath, ReadTimeout: defaultReadTimeout, WriteTimeout: defaultWriteTimeout, } } // createLogsReceiver creates a logs receiver based on provided config. func createLogsReceiver( _ context.Context, params receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { conf := cfg.(*Config) return newLogsReceiver(params, *conf, consumer) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver" import ( "bufio" "compress/gzip" "context" "errors" "io" "net/http" "sync" "time" jsoniter "github.com/json-iterator/go" "github.com/julienschmidt/httprouter" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata" ) var ( errNilLogsConsumer = errors.New("missing a logs consumer") errMissingEndpoint = errors.New("missing a receiver endpoint") errInvalidRequestMethod = errors.New("invalid method. Valid method is POST") errInvalidEncodingType = errors.New("invalid encoding type") errEmptyResponseBody = errors.New("request body content length is zero") errMissingRequiredHeader = errors.New("request was missing required header or incorrect header value") ) const healthyResponse = `{"text": "Webhookevent receiver is healthy"}` type eventReceiver struct { settings receiver.Settings cfg *Config logConsumer consumer.Logs server *http.Server shutdownWG sync.WaitGroup obsrecv *receiverhelper.ObsReport gzipPool *sync.Pool } func newLogsReceiver(params receiver.Settings, cfg Config, consumer consumer.Logs) (receiver.Logs, error) { if consumer == nil { return nil, errNilLogsConsumer } if cfg.Endpoint == "" { return nil, errMissingEndpoint } transport := "http" if cfg.TLSSetting != nil { transport = "https" } obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: params.ID, Transport: transport, ReceiverCreateSettings: params, }) if err != nil { return nil, err } // create eventReceiver instance er := &eventReceiver{ settings: params, cfg: &cfg, logConsumer: consumer, obsrecv: obsrecv, gzipPool: &sync.Pool{New: func() any { return new(gzip.Reader) }}, } return er, nil } // Start function manages receiver startup tasks. part of the receiver.Logs interface. func (er *eventReceiver) Start(ctx context.Context, host component.Host) error { // noop if not nil. if start has not been called before these values should be nil. if er.server != nil && er.server.Handler != nil { return nil } // create listener from config ln, err := er.cfg.ServerConfig.ToListener(ctx) if err != nil { return err } // set up router. router := httprouter.New() router.POST(er.cfg.Path, er.handleReq) router.GET(er.cfg.HealthPath, er.handleHealthCheck) // webhook server standup and configuration er.server, err = er.cfg.ServerConfig.ToServer(ctx, host, er.settings.TelemetrySettings, router) if err != nil { return err } readTimeout, err := time.ParseDuration(er.cfg.ReadTimeout) if err != nil { return err } writeTimeout, err := time.ParseDuration(er.cfg.WriteTimeout) if err != nil { return err } // set timeouts er.server.ReadHeaderTimeout = readTimeout er.server.WriteTimeout = writeTimeout // shutdown er.shutdownWG.Add(1) go func() { defer er.shutdownWG.Done() if errHTTP := er.server.Serve(ln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil } // Shutdown function manages receiver shutdown tasks. part of the receiver.Logs interface. func (er *eventReceiver) Shutdown(_ context.Context) error { // server must exist to be closed. if er.server == nil { return nil } err := er.server.Close() er.shutdownWG.Wait() return err } // handleReq handles incoming request from webhook. On success returns a 200 response code to the webhook func (er *eventReceiver) handleReq(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { ctx := r.Context() ctx = er.obsrecv.StartLogsOp(ctx) if r.Method != http.MethodPost { er.failBadReq(ctx, w, http.StatusBadRequest, errInvalidRequestMethod) return } if er.cfg.RequiredHeader.Key != "" { requiredHeaderValue := r.Header.Get(er.cfg.RequiredHeader.Key) if requiredHeaderValue != er.cfg.RequiredHeader.Value { er.failBadReq(ctx, w, http.StatusUnauthorized, errMissingRequiredHeader) return } } encoding := r.Header.Get("Content-Encoding") // only support gzip if encoding header is set. if encoding != "" && encoding != "gzip" { er.failBadReq(ctx, w, http.StatusUnsupportedMediaType, errInvalidEncodingType) return } if r.ContentLength == 0 { er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, nil) er.failBadReq(ctx, w, http.StatusBadRequest, errEmptyResponseBody) } bodyReader := r.Body // gzip encoded case if encoding == "gzip" || encoding == "x-gzip" { reader := er.gzipPool.Get().(*gzip.Reader) err := reader.Reset(bodyReader) if err != nil { er.failBadReq(ctx, w, http.StatusBadRequest, err) _, _ = io.ReadAll(r.Body) _ = r.Body.Close() return } bodyReader = reader defer er.gzipPool.Put(reader) } // send body into a scanner and then convert the request body into a log sc := bufio.NewScanner(bodyReader) ld, numLogs := reqToLog(sc, r.URL.Query(), er.cfg, er.settings) consumerErr := er.logConsumer.ConsumeLogs(ctx, ld) _ = bodyReader.Close() if consumerErr != nil { er.failBadReq(ctx, w, http.StatusInternalServerError, consumerErr) } else { w.WriteHeader(http.StatusOK) } er.obsrecv.EndLogsOp(ctx, metadata.Type.String(), numLogs, consumerErr) } // Simple healthcheck endpoint. func (er *eventReceiver) handleHealthCheck(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { w.Header().Add("Content-Type", "application/json") w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(healthyResponse)) } // write response on a failed/bad request. Generates a small json body based on the thrown by // the handle func and the appropriate http status code. many webhooks will either log these responses or // notify webhook users should a none 2xx code be detected. func (er *eventReceiver) failBadReq(_ context.Context, w http.ResponseWriter, httpStatusCode int, err error, ) { jsonResp, err := jsoniter.Marshal(err.Error()) if err != nil { er.settings.Logger.Warn("failed to marshall error to json") } // write response to webhook w.WriteHeader(httpStatusCode) if len(jsonResp) > 0 { w.Header().Add("Content-Type", "application/json") _, err = w.Write(jsonResp) if err != nil { er.settings.Logger.Warn("failed to write json response", zap.Error(err)) } } // log bad webhook request if debug is enabled if er.settings.Logger.Core().Enabled(zap.DebugLevel) { msg := string(jsonResp) er.settings.Logger.Debug(msg, zap.Int("http_status_code", httpStatusCode), zap.Error(err)) } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package webhookeventreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver" import ( "bufio" "net/url" "time" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/receiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver/internal/metadata" ) func reqToLog(sc *bufio.Scanner, query url.Values, _ *Config, settings receiver.Settings, ) (plog.Logs, int) { // we simply dont split the data passed into scan (i.e. scan the whole thing) // the downside to this approach is that only 1 log per request can be handled. // NOTE: logs will contain these newline characters which could have formatting // consequences downstream. split := func(data []byte, atEOF bool) (advance int, token []byte, err error) { if !atEOF { return 0, nil, nil } return 0, data, bufio.ErrFinalToken } sc.Split(split) log := plog.NewLogs() resourceLog := log.ResourceLogs().AppendEmpty() appendMetadata(resourceLog, query) scopeLog := resourceLog.ScopeLogs().AppendEmpty() scopeLog.Scope().SetName(scopeLogName) scopeLog.Scope().SetVersion(settings.BuildInfo.Version) scopeLog.Scope().Attributes().PutStr("source", settings.ID.String()) scopeLog.Scope().Attributes().PutStr("receiver", metadata.Type.String()) for sc.Scan() { logRecord := scopeLog.LogRecords().AppendEmpty() logRecord.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Now())) line := sc.Text() logRecord.Body().SetStr(line) } return log, scopeLog.LogRecords().Len() } // append query parameters and webhook source as resource attributes func appendMetadata(resourceLog plog.ResourceLogs, query url.Values) { for k := range query { if query.Get(k) != "" { resourceLog.Resource().Attributes().PutStr(k, query.Get(k)) } } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/data" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // LogRecord are experimental implementation of OpenTelemetry Log Data Model. // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewLogRecord function to create new instances. // Important: zero-initialized instance is not valid for use. type LogRecord struct { orig *otlplogs.LogRecord state *internal.State } func newLogRecord(orig *otlplogs.LogRecord, state *internal.State) LogRecord { return LogRecord{orig: orig, state: state} } // NewLogRecord creates a new empty LogRecord. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewLogRecord() LogRecord { state := internal.StateMutable return newLogRecord(&otlplogs.LogRecord{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms LogRecord) MoveTo(dest LogRecord) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlplogs.LogRecord{} } // ObservedTimestamp returns the observedtimestamp associated with this LogRecord. func (ms LogRecord) ObservedTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.ObservedTimeUnixNano) } // SetObservedTimestamp replaces the observedtimestamp associated with this LogRecord. func (ms LogRecord) SetObservedTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.ObservedTimeUnixNano = uint64(v) } // Timestamp returns the timestamp associated with this LogRecord. func (ms LogRecord) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this LogRecord. func (ms LogRecord) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // TraceID returns the traceid associated with this LogRecord. func (ms LogRecord) TraceID() pcommon.TraceID { return pcommon.TraceID(ms.orig.TraceId) } // SetTraceID replaces the traceid associated with this LogRecord. func (ms LogRecord) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() ms.orig.TraceId = data.TraceID(v) } // SpanID returns the spanid associated with this LogRecord. func (ms LogRecord) SpanID() pcommon.SpanID { return pcommon.SpanID(ms.orig.SpanId) } // SetSpanID replaces the spanid associated with this LogRecord. func (ms LogRecord) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() ms.orig.SpanId = data.SpanID(v) } // Flags returns the flags associated with this LogRecord. func (ms LogRecord) Flags() LogRecordFlags { return LogRecordFlags(ms.orig.Flags) } // SetFlags replaces the flags associated with this LogRecord. func (ms LogRecord) SetFlags(v LogRecordFlags) { ms.state.AssertMutable() ms.orig.Flags = uint32(v) } // EventName returns the eventname associated with this LogRecord. func (ms LogRecord) EventName() string { return ms.orig.EventName } // SetEventName replaces the eventname associated with this LogRecord. func (ms LogRecord) SetEventName(v string) { ms.state.AssertMutable() ms.orig.EventName = v } // SeverityText returns the severitytext associated with this LogRecord. func (ms LogRecord) SeverityText() string { return ms.orig.SeverityText } // SetSeverityText replaces the severitytext associated with this LogRecord. func (ms LogRecord) SetSeverityText(v string) { ms.state.AssertMutable() ms.orig.SeverityText = v } // SeverityNumber returns the severitynumber associated with this LogRecord. func (ms LogRecord) SeverityNumber() SeverityNumber { return SeverityNumber(ms.orig.SeverityNumber) } // SetSeverityNumber replaces the severitynumber associated with this LogRecord. func (ms LogRecord) SetSeverityNumber(v SeverityNumber) { ms.state.AssertMutable() ms.orig.SeverityNumber = otlplogs.SeverityNumber(v) } // Body returns the body associated with this LogRecord. func (ms LogRecord) Body() pcommon.Value { return pcommon.Value(internal.NewValue(&ms.orig.Body, ms.state)) } // Attributes returns the Attributes associated with this LogRecord. func (ms LogRecord) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this LogRecord. func (ms LogRecord) DroppedAttributesCount() uint32 { return ms.orig.DroppedAttributesCount } // SetDroppedAttributesCount replaces the droppedattributescount associated with this LogRecord. func (ms LogRecord) SetDroppedAttributesCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedAttributesCount = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms LogRecord) CopyTo(dest LogRecord) { dest.state.AssertMutable() copyOrigLogRecord(dest.orig, ms.orig) } func copyOrigLogRecord(dest, src *otlplogs.LogRecord) { dest.ObservedTimeUnixNano = src.ObservedTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.TraceId = src.TraceId dest.SpanId = src.SpanId dest.Flags = src.Flags dest.EventName = src.EventName dest.SeverityText = src.SeverityText dest.SeverityNumber = src.SeverityNumber internal.CopyOrigValue(&dest.Body, &src.Body) dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.DroppedAttributesCount = src.DroppedAttributesCount }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // LogRecordSlice logically represents a slice of LogRecord. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewLogRecordSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type LogRecordSlice struct { orig *[]*otlplogs.LogRecord state *internal.State } func newLogRecordSlice(orig *[]*otlplogs.LogRecord, state *internal.State) LogRecordSlice { return LogRecordSlice{orig: orig, state: state} } // NewLogRecordSlice creates a LogRecordSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewLogRecordSlice() LogRecordSlice { orig := []*otlplogs.LogRecord(nil) state := internal.StateMutable return newLogRecordSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewLogRecordSlice()". func (es LogRecordSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es LogRecordSlice) At(i int) LogRecord { return newLogRecord((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es LogRecordSlice) All() iter.Seq2[int, LogRecord] { return func(yield func(int, LogRecord) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new LogRecordSlice can be initialized: // // es := NewLogRecordSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es LogRecordSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlplogs.LogRecord, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty LogRecord. // It returns the newly added LogRecord. func (es LogRecordSlice) AppendEmpty() LogRecord { es.state.AssertMutable() *es.orig = append(*es.orig, &otlplogs.LogRecord{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es LogRecordSlice) MoveAndAppendTo(dest LogRecordSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es LogRecordSlice) RemoveIf(f func(LogRecord) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es LogRecordSlice) CopyTo(dest LogRecordSlice) { dest.state.AssertMutable() *dest.orig = copyOrigLogRecordSlice(*dest.orig, *es.orig) } // Sort sorts the LogRecord elements within LogRecordSlice given the // provided less function so that two instances of LogRecordSlice // can be compared. func (es LogRecordSlice) Sort(less func(a, b LogRecord) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigLogRecordSlice(dest, src []*otlplogs.LogRecord) []*otlplogs.LogRecord { if cap(dest) < len(src) { dest = make([]*otlplogs.LogRecord, len(src)) data := make([]otlplogs.LogRecord, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigLogRecord(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ResourceLogs is a collection of logs from a Resource. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewResourceLogs function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceLogs struct { orig *otlplogs.ResourceLogs state *internal.State } func newResourceLogs(orig *otlplogs.ResourceLogs, state *internal.State) ResourceLogs { return ResourceLogs{orig: orig, state: state} } // NewResourceLogs creates a new empty ResourceLogs. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceLogs() ResourceLogs { state := internal.StateMutable return newResourceLogs(&otlplogs.ResourceLogs{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ResourceLogs) MoveTo(dest ResourceLogs) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlplogs.ResourceLogs{} } // Resource returns the resource associated with this ResourceLogs. func (ms ResourceLogs) Resource() pcommon.Resource { return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) } // SchemaUrl returns the schemaurl associated with this ResourceLogs. func (ms ResourceLogs) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ResourceLogs. func (ms ResourceLogs) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // ScopeLogs returns the ScopeLogs associated with this ResourceLogs. func (ms ResourceLogs) ScopeLogs() ScopeLogsSlice { return newScopeLogsSlice(&ms.orig.ScopeLogs, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceLogs) CopyTo(dest ResourceLogs) { dest.state.AssertMutable() copyOrigResourceLogs(dest.orig, ms.orig) } func copyOrigResourceLogs(dest, src *otlplogs.ResourceLogs) { internal.CopyOrigResource(&dest.Resource, &src.Resource) dest.SchemaUrl = src.SchemaUrl dest.ScopeLogs = copyOrigScopeLogsSlice(dest.ScopeLogs, src.ScopeLogs) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // ResourceLogsSlice logically represents a slice of ResourceLogs. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewResourceLogsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceLogsSlice struct { orig *[]*otlplogs.ResourceLogs state *internal.State } func newResourceLogsSlice(orig *[]*otlplogs.ResourceLogs, state *internal.State) ResourceLogsSlice { return ResourceLogsSlice{orig: orig, state: state} } // NewResourceLogsSlice creates a ResourceLogsSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceLogsSlice() ResourceLogsSlice { orig := []*otlplogs.ResourceLogs(nil) state := internal.StateMutable return newResourceLogsSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewResourceLogsSlice()". func (es ResourceLogsSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ResourceLogsSlice) At(i int) ResourceLogs { return newResourceLogs((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ResourceLogsSlice) All() iter.Seq2[int, ResourceLogs] { return func(yield func(int, ResourceLogs) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ResourceLogsSlice can be initialized: // // es := NewResourceLogsSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ResourceLogsSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlplogs.ResourceLogs, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ResourceLogs. // It returns the newly added ResourceLogs. func (es ResourceLogsSlice) AppendEmpty() ResourceLogs { es.state.AssertMutable() *es.orig = append(*es.orig, &otlplogs.ResourceLogs{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ResourceLogsSlice) MoveAndAppendTo(dest ResourceLogsSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ResourceLogsSlice) RemoveIf(f func(ResourceLogs) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ResourceLogsSlice) CopyTo(dest ResourceLogsSlice) { dest.state.AssertMutable() *dest.orig = copyOrigResourceLogsSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceLogs elements within ResourceLogsSlice given the // provided less function so that two instances of ResourceLogsSlice // can be compared. func (es ResourceLogsSlice) Sort(less func(a, b ResourceLogs) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigResourceLogsSlice(dest, src []*otlplogs.ResourceLogs) []*otlplogs.ResourceLogs { if cap(dest) < len(src) { dest = make([]*otlplogs.ResourceLogs, len(src)) data := make([]otlplogs.ResourceLogs, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigResourceLogs(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ScopeLogs is a collection of logs from a LibraryInstrumentation. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewScopeLogs function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeLogs struct { orig *otlplogs.ScopeLogs state *internal.State } func newScopeLogs(orig *otlplogs.ScopeLogs, state *internal.State) ScopeLogs { return ScopeLogs{orig: orig, state: state} } // NewScopeLogs creates a new empty ScopeLogs. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeLogs() ScopeLogs { state := internal.StateMutable return newScopeLogs(&otlplogs.ScopeLogs{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ScopeLogs) MoveTo(dest ScopeLogs) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlplogs.ScopeLogs{} } // Scope returns the scope associated with this ScopeLogs. func (ms ScopeLogs) Scope() pcommon.InstrumentationScope { return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) } // SchemaUrl returns the schemaurl associated with this ScopeLogs. func (ms ScopeLogs) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ScopeLogs. func (ms ScopeLogs) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // LogRecords returns the LogRecords associated with this ScopeLogs. func (ms ScopeLogs) LogRecords() LogRecordSlice { return newLogRecordSlice(&ms.orig.LogRecords, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeLogs) CopyTo(dest ScopeLogs) { dest.state.AssertMutable() copyOrigScopeLogs(dest.orig, ms.orig) } func copyOrigScopeLogs(dest, src *otlplogs.ScopeLogs) { internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) dest.SchemaUrl = src.SchemaUrl dest.LogRecords = copyOrigLogRecordSlice(dest.LogRecords, src.LogRecords) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plog import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // ScopeLogsSlice logically represents a slice of ScopeLogs. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewScopeLogsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeLogsSlice struct { orig *[]*otlplogs.ScopeLogs state *internal.State } func newScopeLogsSlice(orig *[]*otlplogs.ScopeLogs, state *internal.State) ScopeLogsSlice { return ScopeLogsSlice{orig: orig, state: state} } // NewScopeLogsSlice creates a ScopeLogsSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeLogsSlice() ScopeLogsSlice { orig := []*otlplogs.ScopeLogs(nil) state := internal.StateMutable return newScopeLogsSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewScopeLogsSlice()". func (es ScopeLogsSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ScopeLogsSlice) At(i int) ScopeLogs { return newScopeLogs((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ScopeLogsSlice) All() iter.Seq2[int, ScopeLogs] { return func(yield func(int, ScopeLogs) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ScopeLogsSlice can be initialized: // // es := NewScopeLogsSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ScopeLogsSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlplogs.ScopeLogs, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ScopeLogs. // It returns the newly added ScopeLogs. func (es ScopeLogsSlice) AppendEmpty() ScopeLogs { es.state.AssertMutable() *es.orig = append(*es.orig, &otlplogs.ScopeLogs{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ScopeLogsSlice) MoveAndAppendTo(dest ScopeLogsSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ScopeLogsSlice) RemoveIf(f func(ScopeLogs) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ScopeLogsSlice) CopyTo(dest ScopeLogsSlice) { dest.state.AssertMutable() *dest.orig = copyOrigScopeLogsSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeLogs elements within ScopeLogsSlice given the // provided less function so that two instances of ScopeLogsSlice // can be compared. func (es ScopeLogsSlice) Sort(less func(a, b ScopeLogs) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigScopeLogsSlice(dest, src []*otlplogs.ScopeLogs) []*otlplogs.ScopeLogs { if cap(dest) < len(src) { dest = make([]*otlplogs.ScopeLogs, len(src)) data := make([]otlplogs.ScopeLogs, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigScopeLogs(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( "bytes" "fmt" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // JSONMarshaler marshals pdata.Logs to JSON bytes using the OTLP/JSON format. type JSONMarshaler struct{} // MarshalLogs to the OTLP/JSON format. func (*JSONMarshaler) MarshalLogs(ld Logs) ([]byte, error) { buf := bytes.Buffer{} pb := internal.LogsToProto(internal.Logs(ld)) err := json.Marshal(&buf, &pb) return buf.Bytes(), err } var _ Unmarshaler = (*JSONUnmarshaler)(nil) // JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Logs. type JSONUnmarshaler struct{} // UnmarshalLogs from OTLP/JSON format into pdata.Logs. func (*JSONUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { iter := jsoniter.ConfigFastest.BorrowIterator(buf) defer jsoniter.ConfigFastest.ReturnIterator(iter) ld := NewLogs() ld.unmarshalJsoniter(iter) if iter.Error != nil { return Logs{}, iter.Error } otlp.MigrateLogs(ld.getOrig().ResourceLogs) return ld, nil } func (ms Logs) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resource_logs", "resourceLogs": iter.ReadArrayCB(func(*jsoniter.Iterator) bool { ms.ResourceLogs().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms ResourceLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resource": json.ReadResource(iter, &ms.orig.Resource) case "scope_logs", "scopeLogs": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.ScopeLogs().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (ms ScopeLogs) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "scope": json.ReadScope(iter, &ms.orig.Scope) case "log_records", "logRecords": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.LogRecords().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (ms LogRecord) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "observed_time_unix_nano", "observedTimeUnixNano": ms.orig.ObservedTimeUnixNano = json.ReadUint64(iter) case "severity_number", "severityNumber": ms.orig.SeverityNumber = otlplogs.SeverityNumber(json.ReadEnumValue(iter, otlplogs.SeverityNumber_value)) case "severity_text", "severityText": ms.orig.SeverityText = iter.ReadString() case "event_name", "eventName": ms.orig.EventName = iter.ReadString() case "body": json.ReadValue(iter, &ms.orig.Body) case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) return true }) case "droppedAttributesCount", "dropped_attributes_count": ms.orig.DroppedAttributesCount = json.ReadUint32(iter) case "flags": ms.orig.Flags = json.ReadUint32(iter) case "traceId", "trace_id": if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readLog.traceId", fmt.Sprintf("parse trace_id:%v", err)) } case "spanId", "span_id": if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readLog.spanId", fmt.Sprintf("parse span_id:%v", err)) } default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plog // import "go.opentelemetry.io/collector/pdata/plog" const isSampledMask = uint32(1) var DefaultLogRecordFlags = LogRecordFlags(0) // LogRecordFlags defines flags for the LogRecord. The 8 least significant bits are the trace flags as // defined in W3C Trace Context specification. 24 most significant bits are reserved and must be set to 0. type LogRecordFlags uint32 // IsSampled returns true if the LogRecordFlags contains the IsSampled flag. func (ms LogRecordFlags) IsSampled() bool { return uint32(ms)&isSampledMask != 0 } // WithIsSampled returns a new LogRecordFlags, with the IsSampled flag set to the given value. func (ms LogRecordFlags) WithIsSampled(b bool) LogRecordFlags { orig := uint32(ms) if b { orig |= isSampledMask } else { orig &^= isSampledMask } return LogRecordFlags(orig) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" ) // Logs is the top-level struct that is propagated through the logs pipeline. // Use NewLogs to create new instance, zero-initialized instance is not valid for use. type Logs internal.Logs func newLogs(orig *otlpcollectorlog.ExportLogsServiceRequest) Logs { state := internal.StateMutable return Logs(internal.NewLogs(orig, &state)) } func (ms Logs) getOrig() *otlpcollectorlog.ExportLogsServiceRequest { return internal.GetOrigLogs(internal.Logs(ms)) } func (ms Logs) getState() *internal.State { return internal.GetLogsState(internal.Logs(ms)) } // NewLogs creates a new Logs struct. func NewLogs() Logs { return newLogs(&otlpcollectorlog.ExportLogsServiceRequest{}) } // IsReadOnly returns true if this Logs instance is read-only. func (ms Logs) IsReadOnly() bool { return *ms.getState() == internal.StateReadOnly } // CopyTo copies the Logs instance overriding the destination. func (ms Logs) CopyTo(dest Logs) { ms.ResourceLogs().CopyTo(dest.ResourceLogs()) } // LogRecordCount calculates the total number of log records. func (ms Logs) LogRecordCount() int { logCount := 0 rss := ms.ResourceLogs() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) ill := rs.ScopeLogs() for i := 0; i < ill.Len(); i++ { logs := ill.At(i) logCount += logs.LogRecords().Len() } } return logCount } // ResourceLogs returns the ResourceLogsSlice associated with this Logs. func (ms Logs) ResourceLogs() ResourceLogsSlice { return newResourceLogsSlice(&ms.getOrig().ResourceLogs, internal.GetLogsState(internal.Logs(ms))) } // MarkReadOnly marks the Logs as shared so that no further modifications can be done on it. func (ms Logs) MarkReadOnly() { internal.SetLogsState(internal.Logs(ms), internal.StateReadOnly) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( "go.opentelemetry.io/collector/pdata/internal" otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalLogs(ld Logs) ([]byte, error) { pb := internal.LogsToProto(internal.Logs(ld)) return pb.Marshal() } func (e *ProtoMarshaler) LogsSize(ld Logs) int { pb := internal.LogsToProto(internal.Logs(ld)) return pb.Size() } func (e *ProtoMarshaler) ResourceLogsSize(rl ResourceLogs) int { return rl.orig.Size() } func (e *ProtoMarshaler) ScopeLogsSize(sl ScopeLogs) int { return sl.orig.Size() } func (e *ProtoMarshaler) LogRecordSize(lr LogRecord) int { return lr.orig.Size() } var _ Unmarshaler = (*ProtoUnmarshaler)(nil) type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalLogs(buf []byte) (Logs, error) { pb := otlplogs.LogsData{} err := pb.Unmarshal(buf) return Logs(internal.LogsFromProto(pb)), err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package plogotlp import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { orig *otlpcollectorlog.ExportLogsPartialSuccess state *internal.State } func newExportPartialSuccess(orig *otlpcollectorlog.ExportLogsPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } // NewExportPartialSuccess creates a new empty ExportPartialSuccess. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { state := internal.StateMutable return newExportPartialSuccess(&otlpcollectorlog.ExportLogsPartialSuccess{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpcollectorlog.ExportLogsPartialSuccess{} } // RejectedLogRecords returns the rejectedlogrecords associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) RejectedLogRecords() int64 { return ms.orig.RejectedLogRecords } // SetRejectedLogRecords replaces the rejectedlogrecords associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetRejectedLogRecords(v int64) { ms.state.AssertMutable() ms.orig.RejectedLogRecords = v } // ErrorMessage returns the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) ErrorMessage() string { return ms.orig.ErrorMessage } // SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetErrorMessage(v string) { ms.state.AssertMutable() ms.orig.ErrorMessage = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() copyOrigExportPartialSuccess(dest.orig, ms.orig) } func copyOrigExportPartialSuccess(dest, src *otlpcollectorlog.ExportLogsPartialSuccess) { dest.RejectedLogRecords = src.RejectedLogRecords dest.ErrorMessage = src.ErrorMessage }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" import ( "context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // GRPCClient is the client API for OTLP-GRPC Logs service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GRPCClient interface { // Export plog.Logs to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) // unexported disallow implementation of the GRPCClient. unexported() } // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { return &grpcClient{rawClient: otlpcollectorlog.NewLogsServiceClient(cc)} } type grpcClient struct { rawClient otlpcollectorlog.LogsServiceClient } func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { rsp, err := c.rawClient.Export(ctx, request.orig, opts...) if err != nil { return ExportResponse{}, err } state := internal.StateMutable return ExportResponse{orig: rsp, state: &state}, err } func (c *grpcClient) unexported() {} // GRPCServer is the server API for OTLP gRPC LogsService service. // Implementations MUST embed UnimplementedGRPCServer. type GRPCServer interface { // Export is called every time a new request is received. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(context.Context, ExportRequest) (ExportResponse, error) // unexported disallow implementation of the GRPCServer. unexported() } var _ GRPCServer = (*UnimplementedGRPCServer)(nil) // UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. type UnimplementedGRPCServer struct{} func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") } func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the Server to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { otlpcollectorlog.RegisterLogsServiceServer(s, &rawLogsServer{srv: srv}) } type rawLogsServer struct { srv GRPCServer } func (s rawLogsServer) Export(ctx context.Context, request *otlpcollectorlog.ExportLogsServiceRequest) (*otlpcollectorlog.ExportLogsServiceResponse, error) { otlp.MigrateLogs(request.ResourceLogs) state := internal.StateMutable rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) return rsp.orig, err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" import ( "bytes" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/plog" ) var jsonUnmarshaler = &plog.JSONUnmarshaler{} // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for plog.Logs data. type ExportRequest struct { orig *otlpcollectorlog.ExportLogsServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { state := internal.StateMutable return ExportRequest{ orig: &otlpcollectorlog.ExportLogsServiceRequest{}, state: &state, } } // NewExportRequestFromLogs returns a ExportRequest from plog.Logs. // Because ExportRequest is a wrapper for plog.Logs, // any changes to the provided Logs struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromLogs(ld plog.Logs) ExportRequest { return ExportRequest{ orig: internal.GetOrigLogs(internal.Logs(ld)), state: internal.GetLogsState(internal.Logs(ld)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { if err := ms.orig.Unmarshal(data); err != nil { return err } otlp.MigrateLogs(ms.orig.ResourceLogs) return nil } // MarshalJSON marshals ExportRequest into JSON bytes. func (ms ExportRequest) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportRequest from JSON bytes. func (ms ExportRequest) UnmarshalJSON(data []byte) error { ld, err := jsonUnmarshaler.UnmarshalLogs(data) if err != nil { return err } *ms.orig = *internal.GetOrigLogs(internal.Logs(ld)) return nil } func (ms ExportRequest) Logs() plog.Logs { return plog.Logs(internal.NewLogs(ms.orig, ms.state)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plogotlp // import "go.opentelemetry.io/collector/pdata/plog/plogotlp" import ( "bytes" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" "go.opentelemetry.io/collector/pdata/internal/json" ) // ExportResponse represents the response for gRPC/HTTP client/server. type ExportResponse struct { orig *otlpcollectorlog.ExportLogsServiceResponse state *internal.State } // NewExportResponse returns an empty ExportResponse. func NewExportResponse() ExportResponse { state := internal.StateMutable return ExportResponse{ orig: &otlpcollectorlog.ExportLogsServiceResponse{}, state: &state, } } // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { return ms.orig.Unmarshal(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportResponse from JSON bytes. func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := jsoniter.ConfigFastest.BorrowIterator(data) defer jsoniter.ConfigFastest.ReturnIterator(iter) ms.unmarshalJsoniter(iter) return iter.Error } // PartialSuccess returns the ExportPartialSuccess associated with this ExportResponse. func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) } func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "partial_success", "partialSuccess": ms.PartialSuccess().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { switch f { case "rejected_log_records", "rejectedLogRecords": ms.orig.RejectedLogRecords = json.ReadInt64(iter) case "error_message", "errorMessage": ms.orig.ErrorMessage = iter.ReadString() default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package plog // import "go.opentelemetry.io/collector/pdata/plog" import ( otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" ) // SeverityNumber represents severity number of a log record. type SeverityNumber int32 const ( SeverityNumberUnspecified = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED) SeverityNumberTrace = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE) SeverityNumberTrace2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE2) SeverityNumberTrace3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE3) SeverityNumberTrace4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_TRACE4) SeverityNumberDebug = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG) SeverityNumberDebug2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG2) SeverityNumberDebug3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG3) SeverityNumberDebug4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_DEBUG4) SeverityNumberInfo = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO) SeverityNumberInfo2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO2) SeverityNumberInfo3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO3) SeverityNumberInfo4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_INFO4) SeverityNumberWarn = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN) SeverityNumberWarn2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN2) SeverityNumberWarn3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN3) SeverityNumberWarn4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_WARN4) SeverityNumberError = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR) SeverityNumberError2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR2) SeverityNumberError3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR3) SeverityNumberError4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_ERROR4) SeverityNumberFatal = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL) SeverityNumberFatal2 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL2) SeverityNumberFatal3 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL3) SeverityNumberFatal4 = SeverityNumber(otlplogs.SeverityNumber_SEVERITY_NUMBER_FATAL4) ) // String returns the string representation of the SeverityNumber. func (sn SeverityNumber) String() string { switch sn { case SeverityNumberUnspecified: return "Unspecified" case SeverityNumberTrace: return "Trace" case SeverityNumberTrace2: return "Trace2" case SeverityNumberTrace3: return "Trace3" case SeverityNumberTrace4: return "Trace4" case SeverityNumberDebug: return "Debug" case SeverityNumberDebug2: return "Debug2" case SeverityNumberDebug3: return "Debug3" case SeverityNumberDebug4: return "Debug4" case SeverityNumberInfo: return "Info" case SeverityNumberInfo2: return "Info2" case SeverityNumberInfo3: return "Info3" case SeverityNumberInfo4: return "Info4" case SeverityNumberWarn: return "Warn" case SeverityNumberWarn2: return "Warn2" case SeverityNumberWarn3: return "Warn3" case SeverityNumberWarn4: return "Warn4" case SeverityNumberError: return "Error" case SeverityNumberError2: return "Error2" case SeverityNumberError3: return "Error3" case SeverityNumberError4: return "Error4" case SeverityNumberFatal: return "Fatal" case SeverityNumberFatal2: return "Fatal2" case SeverityNumberFatal3: return "Fatal3" case SeverityNumberFatal4: return "Fatal4" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // AggregationTemporality defines how a metric aggregator reports aggregated values. // It describes how those values relate to the time interval over which they are aggregated. type AggregationTemporality int32 const ( // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used. AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) // AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time. AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) // AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time. AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) ) // String returns the string representation of the AggregationTemporality. func (at AggregationTemporality) String() string { switch at { case AggregationTemporalityUnspecified: return "Unspecified" case AggregationTemporalityDelta: return "Delta" case AggregationTemporalityCumulative: return "Cumulative" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" // ExemplarValueType specifies the type of Exemplar measurement value. type ExemplarValueType int32 const ( // ExemplarValueTypeEmpty means that exemplar value is unset. ExemplarValueTypeEmpty ExemplarValueType = iota ExemplarValueTypeInt ExemplarValueTypeDouble ) // String returns the string representation of the ExemplarValueType. func (nt ExemplarValueType) String() string { switch nt { case ExemplarValueTypeEmpty: return "Empty" case ExemplarValueTypeInt: return "Int" case ExemplarValueTypeDouble: return "Double" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/data" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // Exemplar is a sample input double measurement. // // Exemplars also hold information about the environment when the measurement was recorded, // for example the span and trace ID of the active span when the exemplar was recorded. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExemplar function to create new instances. // Important: zero-initialized instance is not valid for use. type Exemplar struct { orig *otlpmetrics.Exemplar state *internal.State } func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar { return Exemplar{orig: orig, state: state} } // NewExemplar creates a new empty Exemplar. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExemplar() Exemplar { state := internal.StateMutable return newExemplar(&otlpmetrics.Exemplar{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Exemplar) MoveTo(dest Exemplar) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Exemplar{} } // Timestamp returns the timestamp associated with this Exemplar. func (ms Exemplar) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this Exemplar. func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // ValueType returns the type of the value for this Exemplar. // Calling this function on zero-initialized Exemplar will cause a panic. func (ms Exemplar) ValueType() ExemplarValueType { switch ms.orig.Value.(type) { case *otlpmetrics.Exemplar_AsDouble: return ExemplarValueTypeDouble case *otlpmetrics.Exemplar_AsInt: return ExemplarValueTypeInt } return ExemplarValueTypeEmpty } // DoubleValue returns the double associated with this Exemplar. func (ms Exemplar) DoubleValue() float64 { return ms.orig.GetAsDouble() } // SetDoubleValue replaces the double associated with this Exemplar. func (ms Exemplar) SetDoubleValue(v float64) { ms.state.AssertMutable() ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{ AsDouble: v, } } // IntValue returns the int associated with this Exemplar. func (ms Exemplar) IntValue() int64 { return ms.orig.GetAsInt() } // SetIntValue replaces the int associated with this Exemplar. func (ms Exemplar) SetIntValue(v int64) { ms.state.AssertMutable() ms.orig.Value = &otlpmetrics.Exemplar_AsInt{ AsInt: v, } } // FilteredAttributes returns the FilteredAttributes associated with this Exemplar. func (ms Exemplar) FilteredAttributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.FilteredAttributes, ms.state)) } // TraceID returns the traceid associated with this Exemplar. func (ms Exemplar) TraceID() pcommon.TraceID { return pcommon.TraceID(ms.orig.TraceId) } // SetTraceID replaces the traceid associated with this Exemplar. func (ms Exemplar) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() ms.orig.TraceId = data.TraceID(v) } // SpanID returns the spanid associated with this Exemplar. func (ms Exemplar) SpanID() pcommon.SpanID { return pcommon.SpanID(ms.orig.SpanId) } // SetSpanID replaces the spanid associated with this Exemplar. func (ms Exemplar) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() ms.orig.SpanId = data.SpanID(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Exemplar) CopyTo(dest Exemplar) { dest.state.AssertMutable() copyOrigExemplar(dest.orig, ms.orig) } func copyOrigExemplar(dest, src *otlpmetrics.Exemplar) { dest.TimeUnixNano = src.TimeUnixNano switch t := src.Value.(type) { case *otlpmetrics.Exemplar_AsDouble: dest.Value = &otlpmetrics.Exemplar_AsDouble{AsDouble: t.AsDouble} case *otlpmetrics.Exemplar_AsInt: dest.Value = &otlpmetrics.Exemplar_AsInt{AsInt: t.AsInt} } dest.FilteredAttributes = internal.CopyOrigMap(dest.FilteredAttributes, src.FilteredAttributes) dest.TraceId = src.TraceId dest.SpanId = src.SpanId }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExemplarSlice logically represents a slice of Exemplar. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewExemplarSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ExemplarSlice struct { orig *[]otlpmetrics.Exemplar state *internal.State } func newExemplarSlice(orig *[]otlpmetrics.Exemplar, state *internal.State) ExemplarSlice { return ExemplarSlice{orig: orig, state: state} } // NewExemplarSlice creates a ExemplarSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewExemplarSlice() ExemplarSlice { orig := []otlpmetrics.Exemplar(nil) state := internal.StateMutable return newExemplarSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewExemplarSlice()". func (es ExemplarSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ExemplarSlice) At(i int) Exemplar { return newExemplar(&(*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ExemplarSlice) All() iter.Seq2[int, Exemplar] { return func(yield func(int, Exemplar) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ExemplarSlice can be initialized: // // es := NewExemplarSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ExemplarSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]otlpmetrics.Exemplar, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty Exemplar. // It returns the newly added Exemplar. func (es ExemplarSlice) AppendEmpty() Exemplar { es.state.AssertMutable() *es.orig = append(*es.orig, otlpmetrics.Exemplar{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ExemplarSlice) MoveAndAppendTo(dest ExemplarSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ExemplarSlice) CopyTo(dest ExemplarSlice) { dest.state.AssertMutable() *dest.orig = copyOrigExemplarSlice(*dest.orig, *es.orig) } func copyOrigExemplarSlice(dest, src []otlpmetrics.Exemplar) []otlpmetrics.Exemplar { if cap(dest) < len(src) { dest = make([]otlpmetrics.Exemplar, len(src)) } dest = dest[:len(src)] for i := range src { copyOrigExemplar(&dest[i], &src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExponentialHistogram represents the type of a metric that is calculated by aggregating // as a ExponentialHistogram of all reported double measurements over a time interval. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExponentialHistogram function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogram struct { orig *otlpmetrics.ExponentialHistogram state *internal.State } func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *internal.State) ExponentialHistogram { return ExponentialHistogram{orig: orig, state: state} } // NewExponentialHistogram creates a new empty ExponentialHistogram. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogram() ExponentialHistogram { state := internal.StateMutable return newExponentialHistogram(&otlpmetrics.ExponentialHistogram{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.ExponentialHistogram{} } // AggregationTemporality returns the aggregationtemporality associated with this ExponentialHistogram. func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality { return AggregationTemporality(ms.orig.AggregationTemporality) } // SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram. func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) } // DataPoints returns the DataPoints associated with this ExponentialHistogram. func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice { return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) { dest.state.AssertMutable() copyOrigExponentialHistogram(dest.orig, ms.orig) } func copyOrigExponentialHistogram(dest, src *otlpmetrics.ExponentialHistogram) { dest.AggregationTemporality = src.AggregationTemporality dest.DataPoints = copyOrigExponentialHistogramDataPointSlice(dest.DataPoints, src.DataPoints) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ExponentialHistogramDataPoint is a single data point in a timeseries that describes the // time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains // summary statistics for a population of values, it may optionally contain the // distribution of those values across a set of buckets. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExponentialHistogramDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPoint struct { orig *otlpmetrics.ExponentialHistogramDataPoint state *internal.State } func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint { return ExponentialHistogramDataPoint{orig: orig, state: state} } // NewExponentialHistogramDataPoint creates a new empty ExponentialHistogramDataPoint. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint { state := internal.StateMutable return newExponentialHistogramDataPoint(&otlpmetrics.ExponentialHistogramDataPoint{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoint) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.ExponentialHistogramDataPoint{} } // Attributes returns the Attributes associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) StartTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.StartTimeUnixNano) } // SetStartTimestamp replaces the starttimestamp associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.StartTimeUnixNano = uint64(v) } // Timestamp returns the timestamp associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // Count returns the count associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Count() uint64 { return ms.orig.Count } // SetCount replaces the count associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetCount(v uint64) { ms.state.AssertMutable() ms.orig.Count = v } // Scale returns the scale associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Scale() int32 { return ms.orig.Scale } // SetScale replaces the scale associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetScale(v int32) { ms.state.AssertMutable() ms.orig.Scale = v } // ZeroCount returns the zerocount associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) ZeroCount() uint64 { return ms.orig.ZeroCount } // SetZeroCount replaces the zerocount associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetZeroCount(v uint64) { ms.state.AssertMutable() ms.orig.ZeroCount = v } // Positive returns the positive associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Positive() ExponentialHistogramDataPointBuckets { return newExponentialHistogramDataPointBuckets(&ms.orig.Positive, ms.state) } // Negative returns the negative associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Negative() ExponentialHistogramDataPointBuckets { return newExponentialHistogramDataPointBuckets(&ms.orig.Negative, ms.state) } // Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice { return newExemplarSlice(&ms.orig.Exemplars, ms.state) } // Flags returns the flags associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Flags() DataPointFlags { return DataPointFlags(ms.orig.Flags) } // SetFlags replaces the flags associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetFlags(v DataPointFlags) { ms.state.AssertMutable() ms.orig.Flags = uint32(v) } // Sum returns the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Sum() float64 { return ms.orig.GetSum() } // HasSum returns true if the ExponentialHistogramDataPoint contains a // Sum value, false otherwise. func (ms ExponentialHistogramDataPoint) HasSum() bool { return ms.orig.Sum_ != nil } // SetSum replaces the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetSum(v float64) { ms.state.AssertMutable() ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: v} } // RemoveSum removes the sum associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveSum() { ms.state.AssertMutable() ms.orig.Sum_ = nil } // Min returns the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Min() float64 { return ms.orig.GetMin() } // HasMin returns true if the ExponentialHistogramDataPoint contains a // Min value, false otherwise. func (ms ExponentialHistogramDataPoint) HasMin() bool { return ms.orig.Min_ != nil } // SetMin replaces the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetMin(v float64) { ms.state.AssertMutable() ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: v} } // RemoveMin removes the min associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveMin() { ms.state.AssertMutable() ms.orig.Min_ = nil } // Max returns the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) Max() float64 { return ms.orig.GetMax() } // HasMax returns true if the ExponentialHistogramDataPoint contains a // Max value, false otherwise. func (ms ExponentialHistogramDataPoint) HasMax() bool { return ms.orig.Max_ != nil } // SetMax replaces the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetMax(v float64) { ms.state.AssertMutable() ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: v} } // RemoveMax removes the max associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) RemoveMax() { ms.state.AssertMutable() ms.orig.Max_ = nil } // ZeroThreshold returns the zerothreshold associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) ZeroThreshold() float64 { return ms.orig.ZeroThreshold } // SetZeroThreshold replaces the zerothreshold associated with this ExponentialHistogramDataPoint. func (ms ExponentialHistogramDataPoint) SetZeroThreshold(v float64) { ms.state.AssertMutable() ms.orig.ZeroThreshold = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) { dest.state.AssertMutable() copyOrigExponentialHistogramDataPoint(dest.orig, ms.orig) } func copyOrigExponentialHistogramDataPoint(dest, src *otlpmetrics.ExponentialHistogramDataPoint) { dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count dest.Scale = src.Scale dest.ZeroCount = src.ZeroCount copyOrigExponentialHistogramDataPointBuckets(&dest.Positive, &src.Positive) copyOrigExponentialHistogramDataPointBuckets(&dest.Negative, &src.Negative) dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars) dest.Flags = src.Flags if src.Sum_ == nil { dest.Sum_ = nil } else { dest.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: src.GetSum()} } if src.Min_ == nil { dest.Min_ = nil } else { dest.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: src.GetMin()} } if src.Max_ == nil { dest.Max_ = nil } else { dest.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: src.GetMax()} } dest.ZeroThreshold = src.ZeroThreshold }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExponentialHistogramDataPointBuckets function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPointBuckets struct { orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets state *internal.State } func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, state *internal.State) ExponentialHistogramDataPointBuckets { return ExponentialHistogramDataPointBuckets{orig: orig, state: state} } // NewExponentialHistogramDataPointBuckets creates a new empty ExponentialHistogramDataPointBuckets. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets { state := internal.StateMutable return newExponentialHistogramDataPointBuckets(&otlpmetrics.ExponentialHistogramDataPoint_Buckets{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramDataPointBuckets) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.ExponentialHistogramDataPoint_Buckets{} } // Offset returns the offset associated with this ExponentialHistogramDataPointBuckets. func (ms ExponentialHistogramDataPointBuckets) Offset() int32 { return ms.orig.Offset } // SetOffset replaces the offset associated with this ExponentialHistogramDataPointBuckets. func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) { ms.state.AssertMutable() ms.orig.Offset = v } // BucketCounts returns the bucketcounts associated with this ExponentialHistogramDataPointBuckets. func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice { return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) { dest.state.AssertMutable() copyOrigExponentialHistogramDataPointBuckets(dest.orig, ms.orig) } func copyOrigExponentialHistogramDataPointBuckets(dest, src *otlpmetrics.ExponentialHistogramDataPoint_Buckets) { dest.Offset = src.Offset dest.BucketCounts = internal.CopyOrigUInt64Slice(dest.BucketCounts, src.BucketCounts) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewExponentialHistogramDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ExponentialHistogramDataPointSlice struct { orig *[]*otlpmetrics.ExponentialHistogramDataPoint state *internal.State } func newExponentialHistogramDataPointSlice(orig *[]*otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice { return ExponentialHistogramDataPointSlice{orig: orig, state: state} } // NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice { orig := []*otlpmetrics.ExponentialHistogramDataPoint(nil) state := internal.StateMutable return newExponentialHistogramDataPointSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewExponentialHistogramDataPointSlice()". func (es ExponentialHistogramDataPointSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ExponentialHistogramDataPointSlice) At(i int) ExponentialHistogramDataPoint { return newExponentialHistogramDataPoint((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ExponentialHistogramDataPointSlice) All() iter.Seq2[int, ExponentialHistogramDataPoint] { return func(yield func(int, ExponentialHistogramDataPoint) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ExponentialHistogramDataPointSlice can be initialized: // // es := NewExponentialHistogramDataPointSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.ExponentialHistogramDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ExponentialHistogramDataPoint. // It returns the newly added ExponentialHistogramDataPoint. func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.ExponentialHistogramDataPoint{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ExponentialHistogramDataPointSlice) MoveAndAppendTo(dest ExponentialHistogramDataPointSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogramDataPoint) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDataPointSlice) { dest.state.AssertMutable() *dest.orig = copyOrigExponentialHistogramDataPointSlice(*dest.orig, *es.orig) } // Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the // provided less function so that two instances of ExponentialHistogramDataPointSlice // can be compared. func (es ExponentialHistogramDataPointSlice) Sort(less func(a, b ExponentialHistogramDataPoint) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigExponentialHistogramDataPointSlice(dest, src []*otlpmetrics.ExponentialHistogramDataPoint) []*otlpmetrics.ExponentialHistogramDataPoint { if cap(dest) < len(src) { dest = make([]*otlpmetrics.ExponentialHistogramDataPoint, len(src)) data := make([]otlpmetrics.ExponentialHistogramDataPoint, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigExponentialHistogramDataPoint(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Gauge represents the type of a numeric metric that always exports the "current value" for every data point. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewGauge function to create new instances. // Important: zero-initialized instance is not valid for use. type Gauge struct { orig *otlpmetrics.Gauge state *internal.State } func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge { return Gauge{orig: orig, state: state} } // NewGauge creates a new empty Gauge. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewGauge() Gauge { state := internal.StateMutable return newGauge(&otlpmetrics.Gauge{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Gauge) MoveTo(dest Gauge) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Gauge{} } // DataPoints returns the DataPoints associated with this Gauge. func (ms Gauge) DataPoints() NumberDataPointSlice { return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Gauge) CopyTo(dest Gauge) { dest.state.AssertMutable() copyOrigGauge(dest.orig, ms.orig) } func copyOrigGauge(dest, src *otlpmetrics.Gauge) { dest.DataPoints = copyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewHistogram function to create new instances. // Important: zero-initialized instance is not valid for use. type Histogram struct { orig *otlpmetrics.Histogram state *internal.State } func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram { return Histogram{orig: orig, state: state} } // NewHistogram creates a new empty Histogram. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewHistogram() Histogram { state := internal.StateMutable return newHistogram(&otlpmetrics.Histogram{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Histogram) MoveTo(dest Histogram) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Histogram{} } // AggregationTemporality returns the aggregationtemporality associated with this Histogram. func (ms Histogram) AggregationTemporality() AggregationTemporality { return AggregationTemporality(ms.orig.AggregationTemporality) } // SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram. func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) } // DataPoints returns the DataPoints associated with this Histogram. func (ms Histogram) DataPoints() HistogramDataPointSlice { return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Histogram) CopyTo(dest Histogram) { dest.state.AssertMutable() copyOrigHistogram(dest.orig, ms.orig) } func copyOrigHistogram(dest, src *otlpmetrics.Histogram) { dest.AggregationTemporality = src.AggregationTemporality dest.DataPoints = copyOrigHistogramDataPointSlice(dest.DataPoints, src.DataPoints) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewHistogramDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type HistogramDataPoint struct { orig *otlpmetrics.HistogramDataPoint state *internal.State } func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPoint { return HistogramDataPoint{orig: orig, state: state} } // NewHistogramDataPoint creates a new empty HistogramDataPoint. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewHistogramDataPoint() HistogramDataPoint { state := internal.StateMutable return newHistogramDataPoint(&otlpmetrics.HistogramDataPoint{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.HistogramDataPoint{} } // Attributes returns the Attributes associated with this HistogramDataPoint. func (ms HistogramDataPoint) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this HistogramDataPoint. func (ms HistogramDataPoint) StartTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.StartTimeUnixNano) } // SetStartTimestamp replaces the starttimestamp associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.StartTimeUnixNano = uint64(v) } // Timestamp returns the timestamp associated with this HistogramDataPoint. func (ms HistogramDataPoint) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // Count returns the count associated with this HistogramDataPoint. func (ms HistogramDataPoint) Count() uint64 { return ms.orig.Count } // SetCount replaces the count associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetCount(v uint64) { ms.state.AssertMutable() ms.orig.Count = v } // BucketCounts returns the bucketcounts associated with this HistogramDataPoint. func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice { return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) } // ExplicitBounds returns the explicitbounds associated with this HistogramDataPoint. func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice { return pcommon.Float64Slice(internal.NewFloat64Slice(&ms.orig.ExplicitBounds, ms.state)) } // Exemplars returns the Exemplars associated with this HistogramDataPoint. func (ms HistogramDataPoint) Exemplars() ExemplarSlice { return newExemplarSlice(&ms.orig.Exemplars, ms.state) } // Flags returns the flags associated with this HistogramDataPoint. func (ms HistogramDataPoint) Flags() DataPointFlags { return DataPointFlags(ms.orig.Flags) } // SetFlags replaces the flags associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetFlags(v DataPointFlags) { ms.state.AssertMutable() ms.orig.Flags = uint32(v) } // Sum returns the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) Sum() float64 { return ms.orig.GetSum() } // HasSum returns true if the HistogramDataPoint contains a // Sum value, false otherwise. func (ms HistogramDataPoint) HasSum() bool { return ms.orig.Sum_ != nil } // SetSum replaces the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetSum(v float64) { ms.state.AssertMutable() ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: v} } // RemoveSum removes the sum associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveSum() { ms.state.AssertMutable() ms.orig.Sum_ = nil } // Min returns the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) Min() float64 { return ms.orig.GetMin() } // HasMin returns true if the HistogramDataPoint contains a // Min value, false otherwise. func (ms HistogramDataPoint) HasMin() bool { return ms.orig.Min_ != nil } // SetMin replaces the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetMin(v float64) { ms.state.AssertMutable() ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: v} } // RemoveMin removes the min associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveMin() { ms.state.AssertMutable() ms.orig.Min_ = nil } // Max returns the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) Max() float64 { return ms.orig.GetMax() } // HasMax returns true if the HistogramDataPoint contains a // Max value, false otherwise. func (ms HistogramDataPoint) HasMax() bool { return ms.orig.Max_ != nil } // SetMax replaces the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) SetMax(v float64) { ms.state.AssertMutable() ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: v} } // RemoveMax removes the max associated with this HistogramDataPoint. func (ms HistogramDataPoint) RemoveMax() { ms.state.AssertMutable() ms.orig.Max_ = nil } // CopyTo copies all properties from the current struct overriding the destination. func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) { dest.state.AssertMutable() copyOrigHistogramDataPoint(dest.orig, ms.orig) } func copyOrigHistogramDataPoint(dest, src *otlpmetrics.HistogramDataPoint) { dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count dest.BucketCounts = internal.CopyOrigUInt64Slice(dest.BucketCounts, src.BucketCounts) dest.ExplicitBounds = internal.CopyOrigFloat64Slice(dest.ExplicitBounds, src.ExplicitBounds) dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars) dest.Flags = src.Flags if src.Sum_ == nil { dest.Sum_ = nil } else { dest.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: src.GetSum()} } if src.Min_ == nil { dest.Min_ = nil } else { dest.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: src.GetMin()} } if src.Max_ == nil { dest.Max_ = nil } else { dest.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: src.GetMax()} } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // HistogramDataPointSlice logically represents a slice of HistogramDataPoint. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewHistogramDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type HistogramDataPointSlice struct { orig *[]*otlpmetrics.HistogramDataPoint state *internal.State } func newHistogramDataPointSlice(orig *[]*otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPointSlice { return HistogramDataPointSlice{orig: orig, state: state} } // NewHistogramDataPointSlice creates a HistogramDataPointSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewHistogramDataPointSlice() HistogramDataPointSlice { orig := []*otlpmetrics.HistogramDataPoint(nil) state := internal.StateMutable return newHistogramDataPointSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewHistogramDataPointSlice()". func (es HistogramDataPointSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es HistogramDataPointSlice) At(i int) HistogramDataPoint { return newHistogramDataPoint((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es HistogramDataPointSlice) All() iter.Seq2[int, HistogramDataPoint] { return func(yield func(int, HistogramDataPoint) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new HistogramDataPointSlice can be initialized: // // es := NewHistogramDataPointSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es HistogramDataPointSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.HistogramDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty HistogramDataPoint. // It returns the newly added HistogramDataPoint. func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.HistogramDataPoint{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es HistogramDataPointSlice) MoveAndAppendTo(dest HistogramDataPointSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) { dest.state.AssertMutable() *dest.orig = copyOrigHistogramDataPointSlice(*dest.orig, *es.orig) } // Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the // provided less function so that two instances of HistogramDataPointSlice // can be compared. func (es HistogramDataPointSlice) Sort(less func(a, b HistogramDataPoint) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigHistogramDataPointSlice(dest, src []*otlpmetrics.HistogramDataPoint) []*otlpmetrics.HistogramDataPoint { if cap(dest) < len(src) { dest = make([]*otlpmetrics.HistogramDataPoint, len(src)) data := make([]otlpmetrics.HistogramDataPoint, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigHistogramDataPoint(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // Metric represents one metric as a collection of datapoints. // See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewMetric function to create new instances. // Important: zero-initialized instance is not valid for use. type Metric struct { orig *otlpmetrics.Metric state *internal.State } func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric { return Metric{orig: orig, state: state} } // NewMetric creates a new empty Metric. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewMetric() Metric { state := internal.StateMutable return newMetric(&otlpmetrics.Metric{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Metric) MoveTo(dest Metric) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Metric{} } // Name returns the name associated with this Metric. func (ms Metric) Name() string { return ms.orig.Name } // SetName replaces the name associated with this Metric. func (ms Metric) SetName(v string) { ms.state.AssertMutable() ms.orig.Name = v } // Description returns the description associated with this Metric. func (ms Metric) Description() string { return ms.orig.Description } // SetDescription replaces the description associated with this Metric. func (ms Metric) SetDescription(v string) { ms.state.AssertMutable() ms.orig.Description = v } // Unit returns the unit associated with this Metric. func (ms Metric) Unit() string { return ms.orig.Unit } // SetUnit replaces the unit associated with this Metric. func (ms Metric) SetUnit(v string) { ms.state.AssertMutable() ms.orig.Unit = v } // Metadata returns the Metadata associated with this Metric. func (ms Metric) Metadata() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Metadata, ms.state)) } // Type returns the type of the data for this Metric. // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Type() MetricType { switch ms.orig.Data.(type) { case *otlpmetrics.Metric_Gauge: return MetricTypeGauge case *otlpmetrics.Metric_Sum: return MetricTypeSum case *otlpmetrics.Metric_Histogram: return MetricTypeHistogram case *otlpmetrics.Metric_ExponentialHistogram: return MetricTypeExponentialHistogram case *otlpmetrics.Metric_Summary: return MetricTypeSummary } return MetricTypeEmpty } // Gauge returns the gauge associated with this Metric. // // Calling this function when Type() != MetricTypeGauge returns an invalid // zero-initialized instance of Gauge. Note that using such Gauge instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Gauge() Gauge { v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Gauge) if !ok { return Gauge{} } return newGauge(v.Gauge, ms.state) } // SetEmptyGauge sets an empty gauge to this Metric. // // After this, Type() function will return MetricTypeGauge". // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyGauge() Gauge { ms.state.AssertMutable() val := &otlpmetrics.Gauge{} ms.orig.Data = &otlpmetrics.Metric_Gauge{Gauge: val} return newGauge(val, ms.state) } // Sum returns the sum associated with this Metric. // // Calling this function when Type() != MetricTypeSum returns an invalid // zero-initialized instance of Sum. Note that using such Sum instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Sum() Sum { v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Sum) if !ok { return Sum{} } return newSum(v.Sum, ms.state) } // SetEmptySum sets an empty sum to this Metric. // // After this, Type() function will return MetricTypeSum". // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptySum() Sum { ms.state.AssertMutable() val := &otlpmetrics.Sum{} ms.orig.Data = &otlpmetrics.Metric_Sum{Sum: val} return newSum(val, ms.state) } // Histogram returns the histogram associated with this Metric. // // Calling this function when Type() != MetricTypeHistogram returns an invalid // zero-initialized instance of Histogram. Note that using such Histogram instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Histogram() Histogram { v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Histogram) if !ok { return Histogram{} } return newHistogram(v.Histogram, ms.state) } // SetEmptyHistogram sets an empty histogram to this Metric. // // After this, Type() function will return MetricTypeHistogram". // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyHistogram() Histogram { ms.state.AssertMutable() val := &otlpmetrics.Histogram{} ms.orig.Data = &otlpmetrics.Metric_Histogram{Histogram: val} return newHistogram(val, ms.state) } // ExponentialHistogram returns the exponentialhistogram associated with this Metric. // // Calling this function when Type() != MetricTypeExponentialHistogram returns an invalid // zero-initialized instance of ExponentialHistogram. Note that using such ExponentialHistogram instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) ExponentialHistogram() ExponentialHistogram { v, ok := ms.orig.GetData().(*otlpmetrics.Metric_ExponentialHistogram) if !ok { return ExponentialHistogram{} } return newExponentialHistogram(v.ExponentialHistogram, ms.state) } // SetEmptyExponentialHistogram sets an empty exponentialhistogram to this Metric. // // After this, Type() function will return MetricTypeExponentialHistogram". // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram { ms.state.AssertMutable() val := &otlpmetrics.ExponentialHistogram{} ms.orig.Data = &otlpmetrics.Metric_ExponentialHistogram{ExponentialHistogram: val} return newExponentialHistogram(val, ms.state) } // Summary returns the summary associated with this Metric. // // Calling this function when Type() != MetricTypeSummary returns an invalid // zero-initialized instance of Summary. Note that using such Summary instance can cause panic. // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) Summary() Summary { v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Summary) if !ok { return Summary{} } return newSummary(v.Summary, ms.state) } // SetEmptySummary sets an empty summary to this Metric. // // After this, Type() function will return MetricTypeSummary". // // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) SetEmptySummary() Summary { ms.state.AssertMutable() val := &otlpmetrics.Summary{} ms.orig.Data = &otlpmetrics.Metric_Summary{Summary: val} return newSummary(val, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Metric) CopyTo(dest Metric) { dest.state.AssertMutable() copyOrigMetric(dest.orig, ms.orig) } func copyOrigMetric(dest, src *otlpmetrics.Metric) { dest.Name = src.Name dest.Description = src.Description dest.Unit = src.Unit dest.Metadata = internal.CopyOrigMap(dest.Metadata, src.Metadata) switch t := src.Data.(type) { case *otlpmetrics.Metric_Gauge: gauge := &otlpmetrics.Gauge{} copyOrigGauge(gauge, t.Gauge) dest.Data = &otlpmetrics.Metric_Gauge{ Gauge: gauge, } case *otlpmetrics.Metric_Sum: sum := &otlpmetrics.Sum{} copyOrigSum(sum, t.Sum) dest.Data = &otlpmetrics.Metric_Sum{ Sum: sum, } case *otlpmetrics.Metric_Histogram: histogram := &otlpmetrics.Histogram{} copyOrigHistogram(histogram, t.Histogram) dest.Data = &otlpmetrics.Metric_Histogram{ Histogram: histogram, } case *otlpmetrics.Metric_ExponentialHistogram: exponentialhistogram := &otlpmetrics.ExponentialHistogram{} copyOrigExponentialHistogram(exponentialhistogram, t.ExponentialHistogram) dest.Data = &otlpmetrics.Metric_ExponentialHistogram{ ExponentialHistogram: exponentialhistogram, } case *otlpmetrics.Metric_Summary: summary := &otlpmetrics.Summary{} copyOrigSummary(summary, t.Summary) dest.Data = &otlpmetrics.Metric_Summary{ Summary: summary, } } }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // MetricSlice logically represents a slice of Metric. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewMetricSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type MetricSlice struct { orig *[]*otlpmetrics.Metric state *internal.State } func newMetricSlice(orig *[]*otlpmetrics.Metric, state *internal.State) MetricSlice { return MetricSlice{orig: orig, state: state} } // NewMetricSlice creates a MetricSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewMetricSlice() MetricSlice { orig := []*otlpmetrics.Metric(nil) state := internal.StateMutable return newMetricSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewMetricSlice()". func (es MetricSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es MetricSlice) At(i int) Metric { return newMetric((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es MetricSlice) All() iter.Seq2[int, Metric] { return func(yield func(int, Metric) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new MetricSlice can be initialized: // // es := NewMetricSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es MetricSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.Metric, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty Metric. // It returns the newly added Metric. func (es MetricSlice) AppendEmpty() Metric { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.Metric{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es MetricSlice) RemoveIf(f func(Metric) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es MetricSlice) CopyTo(dest MetricSlice) { dest.state.AssertMutable() *dest.orig = copyOrigMetricSlice(*dest.orig, *es.orig) } // Sort sorts the Metric elements within MetricSlice given the // provided less function so that two instances of MetricSlice // can be compared. func (es MetricSlice) Sort(less func(a, b Metric) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigMetricSlice(dest, src []*otlpmetrics.Metric) []*otlpmetrics.Metric { if cap(dest) < len(src) { dest = make([]*otlpmetrics.Metric, len(src)) data := make([]otlpmetrics.Metric, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigMetric(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewNumberDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type NumberDataPoint struct { orig *otlpmetrics.NumberDataPoint state *internal.State } func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPoint { return NumberDataPoint{orig: orig, state: state} } // NewNumberDataPoint creates a new empty NumberDataPoint. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewNumberDataPoint() NumberDataPoint { state := internal.StateMutable return newNumberDataPoint(&otlpmetrics.NumberDataPoint{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.NumberDataPoint{} } // Attributes returns the Attributes associated with this NumberDataPoint. func (ms NumberDataPoint) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this NumberDataPoint. func (ms NumberDataPoint) StartTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.StartTimeUnixNano) } // SetStartTimestamp replaces the starttimestamp associated with this NumberDataPoint. func (ms NumberDataPoint) SetStartTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.StartTimeUnixNano = uint64(v) } // Timestamp returns the timestamp associated with this NumberDataPoint. func (ms NumberDataPoint) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this NumberDataPoint. func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // ValueType returns the type of the value for this NumberDataPoint. // Calling this function on zero-initialized NumberDataPoint will cause a panic. func (ms NumberDataPoint) ValueType() NumberDataPointValueType { switch ms.orig.Value.(type) { case *otlpmetrics.NumberDataPoint_AsDouble: return NumberDataPointValueTypeDouble case *otlpmetrics.NumberDataPoint_AsInt: return NumberDataPointValueTypeInt } return NumberDataPointValueTypeEmpty } // DoubleValue returns the double associated with this NumberDataPoint. func (ms NumberDataPoint) DoubleValue() float64 { return ms.orig.GetAsDouble() } // SetDoubleValue replaces the double associated with this NumberDataPoint. func (ms NumberDataPoint) SetDoubleValue(v float64) { ms.state.AssertMutable() ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{ AsDouble: v, } } // IntValue returns the int associated with this NumberDataPoint. func (ms NumberDataPoint) IntValue() int64 { return ms.orig.GetAsInt() } // SetIntValue replaces the int associated with this NumberDataPoint. func (ms NumberDataPoint) SetIntValue(v int64) { ms.state.AssertMutable() ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{ AsInt: v, } } // Exemplars returns the Exemplars associated with this NumberDataPoint. func (ms NumberDataPoint) Exemplars() ExemplarSlice { return newExemplarSlice(&ms.orig.Exemplars, ms.state) } // Flags returns the flags associated with this NumberDataPoint. func (ms NumberDataPoint) Flags() DataPointFlags { return DataPointFlags(ms.orig.Flags) } // SetFlags replaces the flags associated with this NumberDataPoint. func (ms NumberDataPoint) SetFlags(v DataPointFlags) { ms.state.AssertMutable() ms.orig.Flags = uint32(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) { dest.state.AssertMutable() copyOrigNumberDataPoint(dest.orig, ms.orig) } func copyOrigNumberDataPoint(dest, src *otlpmetrics.NumberDataPoint) { dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano switch t := src.Value.(type) { case *otlpmetrics.NumberDataPoint_AsDouble: dest.Value = &otlpmetrics.NumberDataPoint_AsDouble{AsDouble: t.AsDouble} case *otlpmetrics.NumberDataPoint_AsInt: dest.Value = &otlpmetrics.NumberDataPoint_AsInt{AsInt: t.AsInt} } dest.Exemplars = copyOrigExemplarSlice(dest.Exemplars, src.Exemplars) dest.Flags = src.Flags }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // NumberDataPointSlice logically represents a slice of NumberDataPoint. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewNumberDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type NumberDataPointSlice struct { orig *[]*otlpmetrics.NumberDataPoint state *internal.State } func newNumberDataPointSlice(orig *[]*otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPointSlice { return NumberDataPointSlice{orig: orig, state: state} } // NewNumberDataPointSlice creates a NumberDataPointSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewNumberDataPointSlice() NumberDataPointSlice { orig := []*otlpmetrics.NumberDataPoint(nil) state := internal.StateMutable return newNumberDataPointSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewNumberDataPointSlice()". func (es NumberDataPointSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es NumberDataPointSlice) At(i int) NumberDataPoint { return newNumberDataPoint((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es NumberDataPointSlice) All() iter.Seq2[int, NumberDataPoint] { return func(yield func(int, NumberDataPoint) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new NumberDataPointSlice can be initialized: // // es := NewNumberDataPointSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es NumberDataPointSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.NumberDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty NumberDataPoint. // It returns the newly added NumberDataPoint. func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.NumberDataPoint{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es NumberDataPointSlice) MoveAndAppendTo(dest NumberDataPointSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) { dest.state.AssertMutable() *dest.orig = copyOrigNumberDataPointSlice(*dest.orig, *es.orig) } // Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the // provided less function so that two instances of NumberDataPointSlice // can be compared. func (es NumberDataPointSlice) Sort(less func(a, b NumberDataPoint) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigNumberDataPointSlice(dest, src []*otlpmetrics.NumberDataPoint) []*otlpmetrics.NumberDataPoint { if cap(dest) < len(src) { dest = make([]*otlpmetrics.NumberDataPoint, len(src)) data := make([]otlpmetrics.NumberDataPoint, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigNumberDataPoint(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ResourceMetrics is a collection of metrics from a Resource. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewResourceMetrics function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceMetrics struct { orig *otlpmetrics.ResourceMetrics state *internal.State } func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetrics { return ResourceMetrics{orig: orig, state: state} } // NewResourceMetrics creates a new empty ResourceMetrics. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceMetrics() ResourceMetrics { state := internal.StateMutable return newResourceMetrics(&otlpmetrics.ResourceMetrics{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.ResourceMetrics{} } // Resource returns the resource associated with this ResourceMetrics. func (ms ResourceMetrics) Resource() pcommon.Resource { return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) } // SchemaUrl returns the schemaurl associated with this ResourceMetrics. func (ms ResourceMetrics) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ResourceMetrics. func (ms ResourceMetrics) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics. func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice { return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) { dest.state.AssertMutable() copyOrigResourceMetrics(dest.orig, ms.orig) } func copyOrigResourceMetrics(dest, src *otlpmetrics.ResourceMetrics) { internal.CopyOrigResource(&dest.Resource, &src.Resource) dest.SchemaUrl = src.SchemaUrl dest.ScopeMetrics = copyOrigScopeMetricsSlice(dest.ScopeMetrics, src.ScopeMetrics) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ResourceMetricsSlice logically represents a slice of ResourceMetrics. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewResourceMetricsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceMetricsSlice struct { orig *[]*otlpmetrics.ResourceMetrics state *internal.State } func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetricsSlice { return ResourceMetricsSlice{orig: orig, state: state} } // NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceMetricsSlice() ResourceMetricsSlice { orig := []*otlpmetrics.ResourceMetrics(nil) state := internal.StateMutable return newResourceMetricsSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewResourceMetricsSlice()". func (es ResourceMetricsSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ResourceMetricsSlice) At(i int) ResourceMetrics { return newResourceMetrics((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ResourceMetricsSlice) All() iter.Seq2[int, ResourceMetrics] { return func(yield func(int, ResourceMetrics) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ResourceMetricsSlice can be initialized: // // es := NewResourceMetricsSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ResourceMetricsSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.ResourceMetrics, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ResourceMetrics. // It returns the newly added ResourceMetrics. func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.ResourceMetrics{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) { dest.state.AssertMutable() *dest.orig = copyOrigResourceMetricsSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the // provided less function so that two instances of ResourceMetricsSlice // can be compared. func (es ResourceMetricsSlice) Sort(less func(a, b ResourceMetrics) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigResourceMetricsSlice(dest, src []*otlpmetrics.ResourceMetrics) []*otlpmetrics.ResourceMetrics { if cap(dest) < len(src) { dest = make([]*otlpmetrics.ResourceMetrics, len(src)) data := make([]otlpmetrics.ResourceMetrics, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigResourceMetrics(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ScopeMetrics is a collection of metrics from a LibraryInstrumentation. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewScopeMetrics function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeMetrics struct { orig *otlpmetrics.ScopeMetrics state *internal.State } func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetrics { return ScopeMetrics{orig: orig, state: state} } // NewScopeMetrics creates a new empty ScopeMetrics. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeMetrics() ScopeMetrics { state := internal.StateMutable return newScopeMetrics(&otlpmetrics.ScopeMetrics{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.ScopeMetrics{} } // Scope returns the scope associated with this ScopeMetrics. func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope { return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) } // SchemaUrl returns the schemaurl associated with this ScopeMetrics. func (ms ScopeMetrics) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ScopeMetrics. func (ms ScopeMetrics) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // Metrics returns the Metrics associated with this ScopeMetrics. func (ms ScopeMetrics) Metrics() MetricSlice { return newMetricSlice(&ms.orig.Metrics, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) { dest.state.AssertMutable() copyOrigScopeMetrics(dest.orig, ms.orig) } func copyOrigScopeMetrics(dest, src *otlpmetrics.ScopeMetrics) { internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) dest.SchemaUrl = src.SchemaUrl dest.Metrics = copyOrigMetricSlice(dest.Metrics, src.Metrics) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // ScopeMetricsSlice logically represents a slice of ScopeMetrics. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewScopeMetricsSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeMetricsSlice struct { orig *[]*otlpmetrics.ScopeMetrics state *internal.State } func newScopeMetricsSlice(orig *[]*otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetricsSlice { return ScopeMetricsSlice{orig: orig, state: state} } // NewScopeMetricsSlice creates a ScopeMetricsSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeMetricsSlice() ScopeMetricsSlice { orig := []*otlpmetrics.ScopeMetrics(nil) state := internal.StateMutable return newScopeMetricsSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewScopeMetricsSlice()". func (es ScopeMetricsSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ScopeMetricsSlice) At(i int) ScopeMetrics { return newScopeMetrics((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ScopeMetricsSlice) All() iter.Seq2[int, ScopeMetrics] { return func(yield func(int, ScopeMetrics) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ScopeMetricsSlice can be initialized: // // es := NewScopeMetricsSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ScopeMetricsSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.ScopeMetrics, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ScopeMetrics. // It returns the newly added ScopeMetrics. func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.ScopeMetrics{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ScopeMetricsSlice) MoveAndAppendTo(dest ScopeMetricsSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) { dest.state.AssertMutable() *dest.orig = copyOrigScopeMetricsSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the // provided less function so that two instances of ScopeMetricsSlice // can be compared. func (es ScopeMetricsSlice) Sort(less func(a, b ScopeMetrics) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigScopeMetricsSlice(dest, src []*otlpmetrics.ScopeMetrics) []*otlpmetrics.ScopeMetrics { if cap(dest) < len(src) { dest = make([]*otlpmetrics.ScopeMetrics, len(src)) data := make([]otlpmetrics.ScopeMetrics, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigScopeMetrics(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSum function to create new instances. // Important: zero-initialized instance is not valid for use. type Sum struct { orig *otlpmetrics.Sum state *internal.State } func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum { return Sum{orig: orig, state: state} } // NewSum creates a new empty Sum. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSum() Sum { state := internal.StateMutable return newSum(&otlpmetrics.Sum{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Sum) MoveTo(dest Sum) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Sum{} } // AggregationTemporality returns the aggregationtemporality associated with this Sum. func (ms Sum) AggregationTemporality() AggregationTemporality { return AggregationTemporality(ms.orig.AggregationTemporality) } // SetAggregationTemporality replaces the aggregationtemporality associated with this Sum. func (ms Sum) SetAggregationTemporality(v AggregationTemporality) { ms.state.AssertMutable() ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) } // IsMonotonic returns the ismonotonic associated with this Sum. func (ms Sum) IsMonotonic() bool { return ms.orig.IsMonotonic } // SetIsMonotonic replaces the ismonotonic associated with this Sum. func (ms Sum) SetIsMonotonic(v bool) { ms.state.AssertMutable() ms.orig.IsMonotonic = v } // DataPoints returns the DataPoints associated with this Sum. func (ms Sum) DataPoints() NumberDataPointSlice { return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Sum) CopyTo(dest Sum) { dest.state.AssertMutable() copyOrigSum(dest.orig, ms.orig) } func copyOrigSum(dest, src *otlpmetrics.Sum) { dest.AggregationTemporality = src.AggregationTemporality dest.IsMonotonic = src.IsMonotonic dest.DataPoints = copyOrigNumberDataPointSlice(dest.DataPoints, src.DataPoints) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSummary function to create new instances. // Important: zero-initialized instance is not valid for use. type Summary struct { orig *otlpmetrics.Summary state *internal.State } func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary { return Summary{orig: orig, state: state} } // NewSummary creates a new empty Summary. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummary() Summary { state := internal.StateMutable return newSummary(&otlpmetrics.Summary{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Summary) MoveTo(dest Summary) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.Summary{} } // DataPoints returns the DataPoints associated with this Summary. func (ms Summary) DataPoints() SummaryDataPointSlice { return newSummaryDataPointSlice(&ms.orig.DataPoints, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Summary) CopyTo(dest Summary) { dest.state.AssertMutable() copyOrigSummary(dest.orig, ms.orig) } func copyOrigSummary(dest, src *otlpmetrics.Summary) { dest.DataPoints = copyOrigSummaryDataPointSlice(dest.DataPoints, src.DataPoints) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSummaryDataPoint function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPoint struct { orig *otlpmetrics.SummaryDataPoint state *internal.State } func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPoint { return SummaryDataPoint{orig: orig, state: state} } // NewSummaryDataPoint creates a new empty SummaryDataPoint. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummaryDataPoint() SummaryDataPoint { state := internal.StateMutable return newSummaryDataPoint(&otlpmetrics.SummaryDataPoint{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.SummaryDataPoint{} } // Attributes returns the Attributes associated with this SummaryDataPoint. func (ms SummaryDataPoint) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // StartTimestamp returns the starttimestamp associated with this SummaryDataPoint. func (ms SummaryDataPoint) StartTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.StartTimeUnixNano) } // SetStartTimestamp replaces the starttimestamp associated with this SummaryDataPoint. func (ms SummaryDataPoint) SetStartTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.StartTimeUnixNano = uint64(v) } // Timestamp returns the timestamp associated with this SummaryDataPoint. func (ms SummaryDataPoint) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this SummaryDataPoint. func (ms SummaryDataPoint) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // Count returns the count associated with this SummaryDataPoint. func (ms SummaryDataPoint) Count() uint64 { return ms.orig.Count } // SetCount replaces the count associated with this SummaryDataPoint. func (ms SummaryDataPoint) SetCount(v uint64) { ms.state.AssertMutable() ms.orig.Count = v } // Sum returns the sum associated with this SummaryDataPoint. func (ms SummaryDataPoint) Sum() float64 { return ms.orig.Sum } // SetSum replaces the sum associated with this SummaryDataPoint. func (ms SummaryDataPoint) SetSum(v float64) { ms.state.AssertMutable() ms.orig.Sum = v } // QuantileValues returns the QuantileValues associated with this SummaryDataPoint. func (ms SummaryDataPoint) QuantileValues() SummaryDataPointValueAtQuantileSlice { return newSummaryDataPointValueAtQuantileSlice(&ms.orig.QuantileValues, ms.state) } // Flags returns the flags associated with this SummaryDataPoint. func (ms SummaryDataPoint) Flags() DataPointFlags { return DataPointFlags(ms.orig.Flags) } // SetFlags replaces the flags associated with this SummaryDataPoint. func (ms SummaryDataPoint) SetFlags(v DataPointFlags) { ms.state.AssertMutable() ms.orig.Flags = uint32(v) } // CopyTo copies all properties from the current struct overriding the destination. func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) { dest.state.AssertMutable() copyOrigSummaryDataPoint(dest.orig, ms.orig) } func copyOrigSummaryDataPoint(dest, src *otlpmetrics.SummaryDataPoint) { dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.StartTimeUnixNano = src.StartTimeUnixNano dest.TimeUnixNano = src.TimeUnixNano dest.Count = src.Count dest.Sum = src.Sum dest.QuantileValues = copyOrigSummaryDataPointValueAtQuantileSlice(dest.QuantileValues, src.QuantileValues) dest.Flags = src.Flags }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointSlice logically represents a slice of SummaryDataPoint. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewSummaryDataPointSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointSlice struct { orig *[]*otlpmetrics.SummaryDataPoint state *internal.State } func newSummaryDataPointSlice(orig *[]*otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPointSlice { return SummaryDataPointSlice{orig: orig, state: state} } // NewSummaryDataPointSlice creates a SummaryDataPointSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSummaryDataPointSlice() SummaryDataPointSlice { orig := []*otlpmetrics.SummaryDataPoint(nil) state := internal.StateMutable return newSummaryDataPointSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewSummaryDataPointSlice()". func (es SummaryDataPointSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es SummaryDataPointSlice) At(i int) SummaryDataPoint { return newSummaryDataPoint((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es SummaryDataPointSlice) All() iter.Seq2[int, SummaryDataPoint] { return func(yield func(int, SummaryDataPoint) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new SummaryDataPointSlice can be initialized: // // es := NewSummaryDataPointSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es SummaryDataPointSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.SummaryDataPoint, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty SummaryDataPoint. // It returns the newly added SummaryDataPoint. func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es SummaryDataPointSlice) MoveAndAppendTo(dest SummaryDataPointSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) { dest.state.AssertMutable() *dest.orig = copyOrigSummaryDataPointSlice(*dest.orig, *es.orig) } // Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the // provided less function so that two instances of SummaryDataPointSlice // can be compared. func (es SummaryDataPointSlice) Sort(less func(a, b SummaryDataPoint) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigSummaryDataPointSlice(dest, src []*otlpmetrics.SummaryDataPoint) []*otlpmetrics.SummaryDataPoint { if cap(dest) < len(src) { dest = make([]*otlpmetrics.SummaryDataPoint, len(src)) data := make([]otlpmetrics.SummaryDataPoint, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigSummaryDataPoint(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointValueAtQuantile is a quantile value within a Summary data point. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSummaryDataPointValueAtQuantile function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointValueAtQuantile struct { orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile state *internal.State } func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile { return SummaryDataPointValueAtQuantile{orig: orig, state: state} } // NewSummaryDataPointValueAtQuantile creates a new empty SummaryDataPointValueAtQuantile. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile { state := internal.StateMutable return newSummaryDataPointValueAtQuantile(&otlpmetrics.SummaryDataPoint_ValueAtQuantile{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQuantile) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpmetrics.SummaryDataPoint_ValueAtQuantile{} } // Quantile returns the quantile associated with this SummaryDataPointValueAtQuantile. func (ms SummaryDataPointValueAtQuantile) Quantile() float64 { return ms.orig.Quantile } // SetQuantile replaces the quantile associated with this SummaryDataPointValueAtQuantile. func (ms SummaryDataPointValueAtQuantile) SetQuantile(v float64) { ms.state.AssertMutable() ms.orig.Quantile = v } // Value returns the value associated with this SummaryDataPointValueAtQuantile. func (ms SummaryDataPointValueAtQuantile) Value() float64 { return ms.orig.Value } // SetValue replaces the value associated with this SummaryDataPointValueAtQuantile. func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) { ms.state.AssertMutable() ms.orig.Value = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) { dest.state.AssertMutable() copyOrigSummaryDataPointValueAtQuantile(dest.orig, ms.orig) } func copyOrigSummaryDataPointValueAtQuantile(dest, src *otlpmetrics.SummaryDataPoint_ValueAtQuantile) { dest.Quantile = src.Quantile dest.Value = src.Value }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetric import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) // SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SummaryDataPointValueAtQuantileSlice struct { orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile state *internal.State } func newSummaryDataPointValueAtQuantileSlice(orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice { return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state} } // NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice { orig := []*otlpmetrics.SummaryDataPoint_ValueAtQuantile(nil) state := internal.StateMutable return newSummaryDataPointValueAtQuantileSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewSummaryDataPointValueAtQuantileSlice()". func (es SummaryDataPointValueAtQuantileSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es SummaryDataPointValueAtQuantileSlice) At(i int) SummaryDataPointValueAtQuantile { return newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es SummaryDataPointValueAtQuantileSlice) All() iter.Seq2[int, SummaryDataPointValueAtQuantile] { return func(yield func(int, SummaryDataPointValueAtQuantile) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new SummaryDataPointValueAtQuantileSlice can be initialized: // // es := NewSummaryDataPointValueAtQuantileSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty SummaryDataPointValueAtQuantile. // It returns the newly added SummaryDataPointValueAtQuantile. func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile { es.state.AssertMutable() *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es SummaryDataPointValueAtQuantileSlice) MoveAndAppendTo(dest SummaryDataPointValueAtQuantileSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointValueAtQuantile) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValueAtQuantileSlice) { dest.state.AssertMutable() *dest.orig = copyOrigSummaryDataPointValueAtQuantileSlice(*dest.orig, *es.orig) } // Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the // provided less function so that two instances of SummaryDataPointValueAtQuantileSlice // can be compared. func (es SummaryDataPointValueAtQuantileSlice) Sort(less func(a, b SummaryDataPointValueAtQuantile) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigSummaryDataPointValueAtQuantileSlice(dest, src []*otlpmetrics.SummaryDataPoint_ValueAtQuantile) []*otlpmetrics.SummaryDataPoint_ValueAtQuantile { if cap(dest) < len(src) { dest = make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src)) data := make([]otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigSummaryDataPointValueAtQuantile(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( "bytes" "fmt" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) var _ Marshaler = (*JSONMarshaler)(nil) // JSONMarshaler marshals pdata.Metrics to JSON bytes using the OTLP/JSON format. type JSONMarshaler struct{} // MarshalMetrics to the OTLP/JSON format. func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { buf := bytes.Buffer{} pb := internal.MetricsToProto(internal.Metrics(md)) err := json.Marshal(&buf, &pb) return buf.Bytes(), err } // JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Metrics. type JSONUnmarshaler struct{} // UnmarshalMetrics from OTLP/JSON format into pdata.Metrics. func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { iter := jsoniter.ConfigFastest.BorrowIterator(buf) defer jsoniter.ConfigFastest.ReturnIterator(iter) md := NewMetrics() md.unmarshalJsoniter(iter) if iter.Error != nil { return Metrics{}, iter.Error } otlp.MigrateMetrics(md.getOrig().ResourceMetrics) return md, nil } func (ms Metrics) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resource_metrics", "resourceMetrics": iter.ReadArrayCB(func(*jsoniter.Iterator) bool { ms.ResourceMetrics().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms ResourceMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resource": json.ReadResource(iter, &ms.orig.Resource) case "scopeMetrics", "scope_metrics": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.ScopeMetrics().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (ms ScopeMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "scope": json.ReadScope(iter, &ms.orig.Scope) case "metrics": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.Metrics().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (ms Metric) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "name": ms.orig.Name = iter.ReadString() case "description": ms.orig.Description = iter.ReadString() case "unit": ms.orig.Unit = iter.ReadString() case "metadata": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Metadata = append(ms.orig.Metadata, json.ReadAttribute(iter)) return true }) case "sum": ms.SetEmptySum().unmarshalJsoniter(iter) case "gauge": ms.SetEmptyGauge().unmarshalJsoniter(iter) case "histogram": ms.SetEmptyHistogram().unmarshalJsoniter(iter) case "exponential_histogram", "exponentialHistogram": ms.SetEmptyExponentialHistogram().unmarshalJsoniter(iter) case "summary": ms.SetEmptySummary().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } func (ms Sum) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "aggregation_temporality", "aggregationTemporality": ms.orig.AggregationTemporality = readAggregationTemporality(iter) case "is_monotonic", "isMonotonic": ms.orig.IsMonotonic = iter.ReadBool() case "data_points", "dataPoints": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms Gauge) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "data_points", "dataPoints": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms Histogram) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "data_points", "dataPoints": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) return true }) case "aggregation_temporality", "aggregationTemporality": ms.orig.AggregationTemporality = readAggregationTemporality(iter) default: iter.Skip() } return true }) } func (ms ExponentialHistogram) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "data_points", "dataPoints": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) return true }) case "aggregation_temporality", "aggregationTemporality": ms.orig.AggregationTemporality = readAggregationTemporality(iter) default: iter.Skip() } return true }) } func (ms Summary) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "data_points", "dataPoints": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms NumberDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "start_time_unix_nano", "startTimeUnixNano": ms.orig.StartTimeUnixNano = json.ReadUint64(iter) case "as_int", "asInt": ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{ AsInt: json.ReadInt64(iter), } case "as_double", "asDouble": ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{ AsDouble: json.ReadFloat64(iter), } case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) return true }) case "exemplars": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) return true }) case "flags": ms.orig.Flags = json.ReadUint32(iter) default: iter.Skip() } return true }) } func (ms HistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "start_time_unix_nano", "startTimeUnixNano": ms.orig.StartTimeUnixNano = json.ReadUint64(iter) case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) return true }) case "count": ms.orig.Count = json.ReadUint64(iter) case "sum": ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: json.ReadFloat64(iter)} case "bucket_counts", "bucketCounts": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter)) return true }) case "explicit_bounds", "explicitBounds": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.ExplicitBounds = append(ms.orig.ExplicitBounds, json.ReadFloat64(iter)) return true }) case "exemplars": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) return true }) case "flags": ms.orig.Flags = json.ReadUint32(iter) case "max": ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{ Max: json.ReadFloat64(iter), } case "min": ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{ Min: json.ReadFloat64(iter), } default: iter.Skip() } return true }) } func (ms ExponentialHistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "start_time_unix_nano", "startTimeUnixNano": ms.orig.StartTimeUnixNano = json.ReadUint64(iter) case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) return true }) case "count": ms.orig.Count = json.ReadUint64(iter) case "sum": ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{ Sum: json.ReadFloat64(iter), } case "scale": ms.orig.Scale = iter.ReadInt32() case "zero_count", "zeroCount": ms.orig.ZeroCount = json.ReadUint64(iter) case "positive": ms.Positive().unmarshalJsoniter(iter) case "negative": ms.Negative().unmarshalJsoniter(iter) case "exemplars": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) return true }) case "flags": ms.orig.Flags = json.ReadUint32(iter) case "max": ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{ Max: json.ReadFloat64(iter), } case "min": ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{ Min: json.ReadFloat64(iter), } default: iter.Skip() } return true }) } func (ms SummaryDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "start_time_unix_nano", "startTimeUnixNano": ms.orig.StartTimeUnixNano = json.ReadUint64(iter) case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) return true }) case "count": ms.orig.Count = json.ReadUint64(iter) case "sum": ms.orig.Sum = json.ReadFloat64(iter) case "quantile_values", "quantileValues": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.QuantileValues().AppendEmpty().unmarshalJsoniter(iter) return true }) case "flags": ms.orig.Flags = json.ReadUint32(iter) default: iter.Skip() } return true }) } func (ms ExponentialHistogramDataPointBuckets) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "bucket_counts", "bucketCounts": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter)) return true }) case "offset": ms.orig.Offset = iter.ReadInt32() default: iter.Skip() } return true }) } func (ms SummaryDataPointValueAtQuantile) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "quantile": ms.orig.Quantile = json.ReadFloat64(iter) case "value": ms.orig.Value = json.ReadFloat64(iter) default: iter.Skip() } return true }) } func (ms Exemplar) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "filtered_attributes", "filteredAttributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.orig.FilteredAttributes = append(ms.orig.FilteredAttributes, json.ReadAttribute(iter)) return true }) case "timeUnixNano", "time_unix_nano": ms.orig.TimeUnixNano = json.ReadUint64(iter) case "as_int", "asInt": ms.orig.Value = &otlpmetrics.Exemplar_AsInt{ AsInt: json.ReadInt64(iter), } case "as_double", "asDouble": ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{ AsDouble: json.ReadFloat64(iter), } case "traceId", "trace_id": if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("exemplar.traceId", fmt.Sprintf("parse trace_id:%v", err)) } case "spanId", "span_id": if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("exemplar.spanId", fmt.Sprintf("parse span_id:%v", err)) } default: iter.Skip() } return true }) } func readAggregationTemporality(iter *jsoniter.Iterator) otlpmetrics.AggregationTemporality { return otlpmetrics.AggregationTemporality(json.ReadEnumValue(iter, otlpmetrics.AggregationTemporality_value)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" const noRecordValueMask = uint32(1) var DefaultDataPointFlags = DataPointFlags(0) // DataPointFlags defines how a metric aggregator reports aggregated values. // It describes how those values relate to the time interval over which they are aggregated. type DataPointFlags uint32 // NoRecordedValue returns true if the DataPointFlags contains the NoRecordedValue flag. func (ms DataPointFlags) NoRecordedValue() bool { return uint32(ms)&noRecordValueMask != 0 } // WithNoRecordedValue returns a new DataPointFlags, with the NoRecordedValue flag set to the given value. func (ms DataPointFlags) WithNoRecordedValue(b bool) DataPointFlags { orig := uint32(ms) if b { orig |= noRecordValueMask } else { orig &^= noRecordValueMask } return DataPointFlags(orig) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" // MetricType specifies the type of data in a Metric. type MetricType int32 const ( // MetricTypeEmpty means that metric type is unset. MetricTypeEmpty MetricType = iota MetricTypeGauge MetricTypeSum MetricTypeHistogram MetricTypeExponentialHistogram MetricTypeSummary ) // String returns the string representation of the MetricType. func (mdt MetricType) String() string { switch mdt { case MetricTypeEmpty: return "Empty" case MetricTypeGauge: return "Gauge" case MetricTypeSum: return "Sum" case MetricTypeHistogram: return "Histogram" case MetricTypeExponentialHistogram: return "ExponentialHistogram" case MetricTypeSummary: return "Summary" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" ) // Metrics is the top-level struct that is propagated through the metrics pipeline. // Use NewMetrics to create new instance, zero-initialized instance is not valid for use. type Metrics internal.Metrics func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest) Metrics { state := internal.StateMutable return Metrics(internal.NewMetrics(orig, &state)) } func (ms Metrics) getOrig() *otlpcollectormetrics.ExportMetricsServiceRequest { return internal.GetOrigMetrics(internal.Metrics(ms)) } func (ms Metrics) getState() *internal.State { return internal.GetMetricsState(internal.Metrics(ms)) } // NewMetrics creates a new Metrics struct. func NewMetrics() Metrics { return newMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{}) } // IsReadOnly returns true if this Metrics instance is read-only. func (ms Metrics) IsReadOnly() bool { return *ms.getState() == internal.StateReadOnly } // CopyTo copies the Metrics instance overriding the destination. func (ms Metrics) CopyTo(dest Metrics) { ms.ResourceMetrics().CopyTo(dest.ResourceMetrics()) } // ResourceMetrics returns the ResourceMetricsSlice associated with this Metrics. func (ms Metrics) ResourceMetrics() ResourceMetricsSlice { return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, internal.GetMetricsState(internal.Metrics(ms))) } // MetricCount calculates the total number of metrics. func (ms Metrics) MetricCount() int { metricCount := 0 rms := ms.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rm := rms.At(i) ilms := rm.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { ilm := ilms.At(j) metricCount += ilm.Metrics().Len() } } return metricCount } // DataPointCount calculates the total number of data points. func (ms Metrics) DataPointCount() (dataPointCount int) { rms := ms.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rm := rms.At(i) ilms := rm.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { ilm := ilms.At(j) ms := ilm.Metrics() for k := 0; k < ms.Len(); k++ { m := ms.At(k) switch m.Type() { case MetricTypeGauge: dataPointCount += m.Gauge().DataPoints().Len() case MetricTypeSum: dataPointCount += m.Sum().DataPoints().Len() case MetricTypeHistogram: dataPointCount += m.Histogram().DataPoints().Len() case MetricTypeExponentialHistogram: dataPointCount += m.ExponentialHistogram().DataPoints().Len() case MetricTypeSummary: dataPointCount += m.Summary().DataPoints().Len() } } } } return } // MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it. func (ms Metrics) MarkReadOnly() { internal.SetMetricsState(internal.Metrics(ms), internal.StateReadOnly) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" // NumberDataPointValueType specifies the type of NumberDataPoint value. type NumberDataPointValueType int32 const ( // NumberDataPointValueTypeEmpty means that data point value is unset. NumberDataPointValueTypeEmpty NumberDataPointValueType = iota NumberDataPointValueTypeInt NumberDataPointValueTypeDouble ) // String returns the string representation of the NumberDataPointValueType. func (nt NumberDataPointValueType) String() string { switch nt { case NumberDataPointValueTypeEmpty: return "Empty" case NumberDataPointValueTypeInt: return "Int" case NumberDataPointValueTypeDouble: return "Double" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" import ( "go.opentelemetry.io/collector/pdata/internal" otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" ) var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { pb := internal.MetricsToProto(internal.Metrics(md)) return pb.Marshal() } func (e *ProtoMarshaler) MetricsSize(md Metrics) int { pb := internal.MetricsToProto(internal.Metrics(md)) return pb.Size() } func (e *ProtoMarshaler) ResourceMetricsSize(rm ResourceMetrics) int { return rm.orig.Size() } func (e *ProtoMarshaler) ScopeMetricsSize(sm ScopeMetrics) int { return sm.orig.Size() } func (e *ProtoMarshaler) MetricSize(m Metric) int { return m.orig.Size() } func (e *ProtoMarshaler) NumberDataPointSize(ndp NumberDataPoint) int { return ndp.orig.Size() } func (e *ProtoMarshaler) SummaryDataPointSize(sdps SummaryDataPoint) int { return sdps.orig.Size() } func (e *ProtoMarshaler) HistogramDataPointSize(hdp HistogramDataPoint) int { return hdp.orig.Size() } func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(ehdp ExponentialHistogramDataPoint) int { return ehdp.orig.Size() } type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { pb := otlpmetrics.MetricsData{} err := pb.Unmarshal(buf) return Metrics(internal.MetricsFromProto(pb)), err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pmetricotlp import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { orig *otlpcollectormetrics.ExportMetricsPartialSuccess state *internal.State } func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } // NewExportPartialSuccess creates a new empty ExportPartialSuccess. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { state := internal.StateMutable return newExportPartialSuccess(&otlpcollectormetrics.ExportMetricsPartialSuccess{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpcollectormetrics.ExportMetricsPartialSuccess{} } // RejectedDataPoints returns the rejecteddatapoints associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) RejectedDataPoints() int64 { return ms.orig.RejectedDataPoints } // SetRejectedDataPoints replaces the rejecteddatapoints associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetRejectedDataPoints(v int64) { ms.state.AssertMutable() ms.orig.RejectedDataPoints = v } // ErrorMessage returns the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) ErrorMessage() string { return ms.orig.ErrorMessage } // SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetErrorMessage(v string) { ms.state.AssertMutable() ms.orig.ErrorMessage = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() copyOrigExportPartialSuccess(dest.orig, ms.orig) } func copyOrigExportPartialSuccess(dest, src *otlpcollectormetrics.ExportMetricsPartialSuccess) { dest.RejectedDataPoints = src.RejectedDataPoints dest.ErrorMessage = src.ErrorMessage }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" import ( "context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // GRPCClient is the client API for OTLP-GRPC Metrics service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GRPCClient interface { // Export pmetric.Metrics to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) // unexported disallow implementation of the GRPCClient. unexported() } // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { return &grpcClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)} } type grpcClient struct { rawClient otlpcollectormetrics.MetricsServiceClient } func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { rsp, err := c.rawClient.Export(ctx, request.orig, opts...) if err != nil { return ExportResponse{}, err } state := internal.StateMutable return ExportResponse{orig: rsp, state: &state}, err } func (c *grpcClient) unexported() {} // GRPCServer is the server API for OTLP gRPC MetricsService service. // Implementations MUST embed UnimplementedGRPCServer. type GRPCServer interface { // Export is called every time a new request is received. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(context.Context, ExportRequest) (ExportResponse, error) // unexported disallow implementation of the GRPCServer. unexported() } var _ GRPCServer = (*UnimplementedGRPCServer)(nil) // UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. type UnimplementedGRPCServer struct{} func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") } func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) } type rawMetricsServer struct { srv GRPCServer } func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { otlp.MigrateMetrics(request.ResourceMetrics) state := internal.StateMutable rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) return rsp.orig, err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" import ( "bytes" "go.opentelemetry.io/collector/pdata/internal" otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/pmetric" ) var jsonUnmarshaler = &pmetric.JSONUnmarshaler{} // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for pmetric.Metrics data. type ExportRequest struct { orig *otlpcollectormetrics.ExportMetricsServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { state := internal.StateMutable return ExportRequest{ orig: &otlpcollectormetrics.ExportMetricsServiceRequest{}, state: &state, } } // NewExportRequestFromMetrics returns a ExportRequest from pmetric.Metrics. // Because ExportRequest is a wrapper for pmetric.Metrics, // any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest { return ExportRequest{ orig: internal.GetOrigMetrics(internal.Metrics(md)), state: internal.GetMetricsState(internal.Metrics(md)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { return ms.orig.Unmarshal(data) } // MarshalJSON marshals ExportRequest into JSON bytes. func (ms ExportRequest) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportRequest from JSON bytes. func (ms ExportRequest) UnmarshalJSON(data []byte) error { md, err := jsonUnmarshaler.UnmarshalMetrics(data) if err != nil { return err } *ms.orig = *internal.GetOrigMetrics(internal.Metrics(md)) return nil } func (ms ExportRequest) Metrics() pmetric.Metrics { return pmetric.Metrics(internal.NewMetrics(ms.orig, ms.state)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" import ( "bytes" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" "go.opentelemetry.io/collector/pdata/internal/json" ) // ExportResponse represents the response for gRPC/HTTP client/server. type ExportResponse struct { orig *otlpcollectormetrics.ExportMetricsServiceResponse state *internal.State } // NewExportResponse returns an empty ExportResponse. func NewExportResponse() ExportResponse { state := internal.StateMutable return ExportResponse{ orig: &otlpcollectormetrics.ExportMetricsServiceResponse{}, state: &state, } } // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { return ms.orig.Unmarshal(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportResponse from JSON bytes. func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := jsoniter.ConfigFastest.BorrowIterator(data) defer jsoniter.ConfigFastest.ReturnIterator(iter) ms.unmarshalJsoniter(iter) return iter.Error } // PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) } func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "partial_success", "partialSuccess": ms.PartialSuccess().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { switch f { case "rejected_data_points", "rejectedDataPoints": ms.orig.RejectedDataPoints = json.ReadInt64(iter) case "error_message", "errorMessage": ms.orig.ErrorMessage = iter.ReadString() default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package pprofileotlp import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" ) // ExportPartialSuccess represents the details of a partially successful export request. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { orig *otlpcollectorprofile.ExportProfilesPartialSuccess state *internal.State } func newExportPartialSuccess(orig *otlpcollectorprofile.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } // NewExportPartialSuccess creates a new empty ExportPartialSuccess. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { state := internal.StateMutable return newExportPartialSuccess(&otlpcollectorprofile.ExportProfilesPartialSuccess{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpcollectorprofile.ExportProfilesPartialSuccess{} } // RejectedProfiles returns the rejectedprofiles associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) RejectedProfiles() int64 { return ms.orig.RejectedProfiles } // SetRejectedProfiles replaces the rejectedprofiles associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetRejectedProfiles(v int64) { ms.state.AssertMutable() ms.orig.RejectedProfiles = v } // ErrorMessage returns the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) ErrorMessage() string { return ms.orig.ErrorMessage } // SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetErrorMessage(v string) { ms.state.AssertMutable() ms.orig.ErrorMessage = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() copyOrigExportPartialSuccess(dest.orig, ms.orig) } func copyOrigExportPartialSuccess(dest, src *otlpcollectorprofile.ExportProfilesPartialSuccess) { dest.RejectedProfiles = src.RejectedProfiles dest.ErrorMessage = src.ErrorMessage }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" import ( "context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // GRPCClient is the client API for OTLP-GRPC Profiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GRPCClient interface { // Export pprofile.Profiles to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) // unexported disallow implementation of the GRPCClient. unexported() } // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { return &grpcClient{rawClient: otlpcollectorprofile.NewProfilesServiceClient(cc)} } type grpcClient struct { rawClient otlpcollectorprofile.ProfilesServiceClient } // Export implements the Client interface. func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { rsp, err := c.rawClient.Export(ctx, request.orig, opts...) if err != nil { return ExportResponse{}, err } state := internal.StateMutable return ExportResponse{orig: rsp, state: &state}, err } func (c *grpcClient) unexported() {} // GRPCServer is the server API for OTLP gRPC ProfilesService service. // Implementations MUST embed UnimplementedGRPCServer. type GRPCServer interface { // Export is called every time a new request is received. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(context.Context, ExportRequest) (ExportResponse, error) // unexported disallow implementation of the GRPCServer. unexported() } var _ GRPCServer = (*UnimplementedGRPCServer)(nil) // UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. type UnimplementedGRPCServer struct{} func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") } func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { otlpcollectorprofile.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv}) } type rawProfilesServer struct { srv GRPCServer } func (s rawProfilesServer) Export(ctx context.Context, request *otlpcollectorprofile.ExportProfilesServiceRequest) (*otlpcollectorprofile.ExportProfilesServiceResponse, error) { otlp.MigrateProfiles(request.ResourceProfiles) state := internal.StateMutable rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) return rsp.orig, err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" import ( "bytes" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/pprofile" ) var jsonUnmarshaler = &pprofile.JSONUnmarshaler{} // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for pprofile.Profiles data. type ExportRequest struct { orig *otlpcollectorprofile.ExportProfilesServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { state := internal.StateMutable return ExportRequest{ orig: &otlpcollectorprofile.ExportProfilesServiceRequest{}, state: &state, } } // NewExportRequestFromProfiles returns a ExportRequest from pprofile.Profiles. // Because ExportRequest is a wrapper for pprofile.Profiles, // any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest { return ExportRequest{ orig: internal.GetOrigProfiles(internal.Profiles(td)), state: internal.GetProfilesState(internal.Profiles(td)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { if err := ms.orig.Unmarshal(data); err != nil { return err } otlp.MigrateProfiles(ms.orig.ResourceProfiles) return nil } // MarshalJSON marshals ExportRequest into JSON bytes. func (ms ExportRequest) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportRequest from JSON bytes. func (ms ExportRequest) UnmarshalJSON(data []byte) error { td, err := jsonUnmarshaler.UnmarshalProfiles(data) if err != nil { return err } *ms.orig = *internal.GetOrigProfiles(internal.Profiles(td)) return nil } func (ms ExportRequest) Profiles() pprofile.Profiles { return pprofile.Profiles(internal.NewProfiles(ms.orig, ms.state)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" import ( "bytes" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" "go.opentelemetry.io/collector/pdata/internal/json" ) // ExportResponse represents the response for gRPC/HTTP client/server. type ExportResponse struct { orig *otlpcollectorprofile.ExportProfilesServiceResponse state *internal.State } // NewExportResponse returns an empty ExportResponse. func NewExportResponse() ExportResponse { state := internal.StateMutable return ExportResponse{ orig: &otlpcollectorprofile.ExportProfilesServiceResponse{}, state: &state, } } // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { return ms.orig.Unmarshal(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportResponse from JSON bytes. func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := jsoniter.ConfigFastest.BorrowIterator(data) defer jsoniter.ConfigFastest.ReturnIterator(iter) ms.unmarshalJsoniter(iter) return iter.Error } func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "partial_success", "partialSuccess": ms.PartialSuccess().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } // PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) } func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { switch f { case "rejected_profiles", "rejectedProfiles": ms.orig.RejectedProfiles = json.ReadInt64(iter) case "error_message", "errorMessage": ms.orig.ErrorMessage = iter.ReadString() default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ResourceSpans is a collection of spans from a Resource. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewResourceSpans function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceSpans struct { orig *otlptrace.ResourceSpans state *internal.State } func newResourceSpans(orig *otlptrace.ResourceSpans, state *internal.State) ResourceSpans { return ResourceSpans{orig: orig, state: state} } // NewResourceSpans creates a new empty ResourceSpans. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewResourceSpans() ResourceSpans { state := internal.StateMutable return newResourceSpans(&otlptrace.ResourceSpans{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ResourceSpans) MoveTo(dest ResourceSpans) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.ResourceSpans{} } // Resource returns the resource associated with this ResourceSpans. func (ms ResourceSpans) Resource() pcommon.Resource { return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) } // SchemaUrl returns the schemaurl associated with this ResourceSpans. func (ms ResourceSpans) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ResourceSpans. func (ms ResourceSpans) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // ScopeSpans returns the ScopeSpans associated with this ResourceSpans. func (ms ResourceSpans) ScopeSpans() ScopeSpansSlice { return newScopeSpansSlice(&ms.orig.ScopeSpans, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ResourceSpans) CopyTo(dest ResourceSpans) { dest.state.AssertMutable() copyOrigResourceSpans(dest.orig, ms.orig) } func copyOrigResourceSpans(dest, src *otlptrace.ResourceSpans) { internal.CopyOrigResource(&dest.Resource, &src.Resource) dest.SchemaUrl = src.SchemaUrl dest.ScopeSpans = copyOrigScopeSpansSlice(dest.ScopeSpans, src.ScopeSpans) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // ResourceSpansSlice logically represents a slice of ResourceSpans. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewResourceSpansSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ResourceSpansSlice struct { orig *[]*otlptrace.ResourceSpans state *internal.State } func newResourceSpansSlice(orig *[]*otlptrace.ResourceSpans, state *internal.State) ResourceSpansSlice { return ResourceSpansSlice{orig: orig, state: state} } // NewResourceSpansSlice creates a ResourceSpansSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewResourceSpansSlice() ResourceSpansSlice { orig := []*otlptrace.ResourceSpans(nil) state := internal.StateMutable return newResourceSpansSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewResourceSpansSlice()". func (es ResourceSpansSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ResourceSpansSlice) At(i int) ResourceSpans { return newResourceSpans((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ResourceSpansSlice) All() iter.Seq2[int, ResourceSpans] { return func(yield func(int, ResourceSpans) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ResourceSpansSlice can be initialized: // // es := NewResourceSpansSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ResourceSpansSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlptrace.ResourceSpans, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ResourceSpans. // It returns the newly added ResourceSpans. func (es ResourceSpansSlice) AppendEmpty() ResourceSpans { es.state.AssertMutable() *es.orig = append(*es.orig, &otlptrace.ResourceSpans{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ResourceSpansSlice) MoveAndAppendTo(dest ResourceSpansSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ResourceSpansSlice) RemoveIf(f func(ResourceSpans) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ResourceSpansSlice) CopyTo(dest ResourceSpansSlice) { dest.state.AssertMutable() *dest.orig = copyOrigResourceSpansSlice(*dest.orig, *es.orig) } // Sort sorts the ResourceSpans elements within ResourceSpansSlice given the // provided less function so that two instances of ResourceSpansSlice // can be compared. func (es ResourceSpansSlice) Sort(less func(a, b ResourceSpans) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigResourceSpansSlice(dest, src []*otlptrace.ResourceSpans) []*otlptrace.ResourceSpans { if cap(dest) < len(src) { dest = make([]*otlptrace.ResourceSpans, len(src)) data := make([]otlptrace.ResourceSpans, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigResourceSpans(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // ScopeSpans is a collection of spans from a LibraryInstrumentation. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewScopeSpans function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeSpans struct { orig *otlptrace.ScopeSpans state *internal.State } func newScopeSpans(orig *otlptrace.ScopeSpans, state *internal.State) ScopeSpans { return ScopeSpans{orig: orig, state: state} } // NewScopeSpans creates a new empty ScopeSpans. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewScopeSpans() ScopeSpans { state := internal.StateMutable return newScopeSpans(&otlptrace.ScopeSpans{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ScopeSpans) MoveTo(dest ScopeSpans) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.ScopeSpans{} } // Scope returns the scope associated with this ScopeSpans. func (ms ScopeSpans) Scope() pcommon.InstrumentationScope { return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) } // SchemaUrl returns the schemaurl associated with this ScopeSpans. func (ms ScopeSpans) SchemaUrl() string { return ms.orig.SchemaUrl } // SetSchemaUrl replaces the schemaurl associated with this ScopeSpans. func (ms ScopeSpans) SetSchemaUrl(v string) { ms.state.AssertMutable() ms.orig.SchemaUrl = v } // Spans returns the Spans associated with this ScopeSpans. func (ms ScopeSpans) Spans() SpanSlice { return newSpanSlice(&ms.orig.Spans, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms ScopeSpans) CopyTo(dest ScopeSpans) { dest.state.AssertMutable() copyOrigScopeSpans(dest.orig, ms.orig) } func copyOrigScopeSpans(dest, src *otlptrace.ScopeSpans) { internal.CopyOrigInstrumentationScope(&dest.Scope, &src.Scope) dest.SchemaUrl = src.SchemaUrl dest.Spans = copyOrigSpanSlice(dest.Spans, src.Spans) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // ScopeSpansSlice logically represents a slice of ScopeSpans. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewScopeSpansSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type ScopeSpansSlice struct { orig *[]*otlptrace.ScopeSpans state *internal.State } func newScopeSpansSlice(orig *[]*otlptrace.ScopeSpans, state *internal.State) ScopeSpansSlice { return ScopeSpansSlice{orig: orig, state: state} } // NewScopeSpansSlice creates a ScopeSpansSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewScopeSpansSlice() ScopeSpansSlice { orig := []*otlptrace.ScopeSpans(nil) state := internal.StateMutable return newScopeSpansSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewScopeSpansSlice()". func (es ScopeSpansSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es ScopeSpansSlice) At(i int) ScopeSpans { return newScopeSpans((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es ScopeSpansSlice) All() iter.Seq2[int, ScopeSpans] { return func(yield func(int, ScopeSpans) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new ScopeSpansSlice can be initialized: // // es := NewScopeSpansSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es ScopeSpansSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlptrace.ScopeSpans, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty ScopeSpans. // It returns the newly added ScopeSpans. func (es ScopeSpansSlice) AppendEmpty() ScopeSpans { es.state.AssertMutable() *es.orig = append(*es.orig, &otlptrace.ScopeSpans{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es ScopeSpansSlice) MoveAndAppendTo(dest ScopeSpansSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es ScopeSpansSlice) RemoveIf(f func(ScopeSpans) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es ScopeSpansSlice) CopyTo(dest ScopeSpansSlice) { dest.state.AssertMutable() *dest.orig = copyOrigScopeSpansSlice(*dest.orig, *es.orig) } // Sort sorts the ScopeSpans elements within ScopeSpansSlice given the // provided less function so that two instances of ScopeSpansSlice // can be compared. func (es ScopeSpansSlice) Sort(less func(a, b ScopeSpans) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigScopeSpansSlice(dest, src []*otlptrace.ScopeSpans) []*otlptrace.ScopeSpans { if cap(dest) < len(src) { dest = make([]*otlptrace.ScopeSpans, len(src)) data := make([]otlptrace.ScopeSpans, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigScopeSpans(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/data" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // Span represents a single operation within a trace. // See Span definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSpan function to create new instances. // Important: zero-initialized instance is not valid for use. type Span struct { orig *otlptrace.Span state *internal.State } func newSpan(orig *otlptrace.Span, state *internal.State) Span { return Span{orig: orig, state: state} } // NewSpan creates a new empty Span. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpan() Span { state := internal.StateMutable return newSpan(&otlptrace.Span{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Span) MoveTo(dest Span) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.Span{} } // TraceID returns the traceid associated with this Span. func (ms Span) TraceID() pcommon.TraceID { return pcommon.TraceID(ms.orig.TraceId) } // SetTraceID replaces the traceid associated with this Span. func (ms Span) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() ms.orig.TraceId = data.TraceID(v) } // SpanID returns the spanid associated with this Span. func (ms Span) SpanID() pcommon.SpanID { return pcommon.SpanID(ms.orig.SpanId) } // SetSpanID replaces the spanid associated with this Span. func (ms Span) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() ms.orig.SpanId = data.SpanID(v) } // TraceState returns the tracestate associated with this Span. func (ms Span) TraceState() pcommon.TraceState { return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) } // ParentSpanID returns the parentspanid associated with this Span. func (ms Span) ParentSpanID() pcommon.SpanID { return pcommon.SpanID(ms.orig.ParentSpanId) } // SetParentSpanID replaces the parentspanid associated with this Span. func (ms Span) SetParentSpanID(v pcommon.SpanID) { ms.state.AssertMutable() ms.orig.ParentSpanId = data.SpanID(v) } // Name returns the name associated with this Span. func (ms Span) Name() string { return ms.orig.Name } // SetName replaces the name associated with this Span. func (ms Span) SetName(v string) { ms.state.AssertMutable() ms.orig.Name = v } // Flags returns the flags associated with this Span. func (ms Span) Flags() uint32 { return ms.orig.Flags } // SetFlags replaces the flags associated with this Span. func (ms Span) SetFlags(v uint32) { ms.state.AssertMutable() ms.orig.Flags = v } // Kind returns the kind associated with this Span. func (ms Span) Kind() SpanKind { return SpanKind(ms.orig.Kind) } // SetKind replaces the kind associated with this Span. func (ms Span) SetKind(v SpanKind) { ms.state.AssertMutable() ms.orig.Kind = otlptrace.Span_SpanKind(v) } // StartTimestamp returns the starttimestamp associated with this Span. func (ms Span) StartTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.StartTimeUnixNano) } // SetStartTimestamp replaces the starttimestamp associated with this Span. func (ms Span) SetStartTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.StartTimeUnixNano = uint64(v) } // EndTimestamp returns the endtimestamp associated with this Span. func (ms Span) EndTimestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.EndTimeUnixNano) } // SetEndTimestamp replaces the endtimestamp associated with this Span. func (ms Span) SetEndTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.EndTimeUnixNano = uint64(v) } // Attributes returns the Attributes associated with this Span. func (ms Span) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this Span. func (ms Span) DroppedAttributesCount() uint32 { return ms.orig.DroppedAttributesCount } // SetDroppedAttributesCount replaces the droppedattributescount associated with this Span. func (ms Span) SetDroppedAttributesCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedAttributesCount = v } // Events returns the Events associated with this Span. func (ms Span) Events() SpanEventSlice { return newSpanEventSlice(&ms.orig.Events, ms.state) } // DroppedEventsCount returns the droppedeventscount associated with this Span. func (ms Span) DroppedEventsCount() uint32 { return ms.orig.DroppedEventsCount } // SetDroppedEventsCount replaces the droppedeventscount associated with this Span. func (ms Span) SetDroppedEventsCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedEventsCount = v } // Links returns the Links associated with this Span. func (ms Span) Links() SpanLinkSlice { return newSpanLinkSlice(&ms.orig.Links, ms.state) } // DroppedLinksCount returns the droppedlinkscount associated with this Span. func (ms Span) DroppedLinksCount() uint32 { return ms.orig.DroppedLinksCount } // SetDroppedLinksCount replaces the droppedlinkscount associated with this Span. func (ms Span) SetDroppedLinksCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedLinksCount = v } // Status returns the status associated with this Span. func (ms Span) Status() Status { return newStatus(&ms.orig.Status, ms.state) } // CopyTo copies all properties from the current struct overriding the destination. func (ms Span) CopyTo(dest Span) { dest.state.AssertMutable() copyOrigSpan(dest.orig, ms.orig) } func copyOrigSpan(dest, src *otlptrace.Span) { dest.TraceId = src.TraceId dest.SpanId = src.SpanId internal.CopyOrigTraceState(&dest.TraceState, &src.TraceState) dest.ParentSpanId = src.ParentSpanId dest.Name = src.Name dest.Flags = src.Flags dest.Kind = src.Kind dest.StartTimeUnixNano = src.StartTimeUnixNano dest.EndTimeUnixNano = src.EndTimeUnixNano dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.DroppedAttributesCount = src.DroppedAttributesCount dest.Events = copyOrigSpanEventSlice(dest.Events, src.Events) dest.DroppedEventsCount = src.DroppedEventsCount dest.Links = copyOrigSpanLinkSlice(dest.Links, src.Links) dest.DroppedLinksCount = src.DroppedLinksCount copyOrigStatus(&dest.Status, &src.Status) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. See OTLP for event definition. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSpanEvent function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanEvent struct { orig *otlptrace.Span_Event state *internal.State } func newSpanEvent(orig *otlptrace.Span_Event, state *internal.State) SpanEvent { return SpanEvent{orig: orig, state: state} } // NewSpanEvent creates a new empty SpanEvent. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpanEvent() SpanEvent { state := internal.StateMutable return newSpanEvent(&otlptrace.Span_Event{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms SpanEvent) MoveTo(dest SpanEvent) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.Span_Event{} } // Timestamp returns the timestamp associated with this SpanEvent. func (ms SpanEvent) Timestamp() pcommon.Timestamp { return pcommon.Timestamp(ms.orig.TimeUnixNano) } // SetTimestamp replaces the timestamp associated with this SpanEvent. func (ms SpanEvent) SetTimestamp(v pcommon.Timestamp) { ms.state.AssertMutable() ms.orig.TimeUnixNano = uint64(v) } // Name returns the name associated with this SpanEvent. func (ms SpanEvent) Name() string { return ms.orig.Name } // SetName replaces the name associated with this SpanEvent. func (ms SpanEvent) SetName(v string) { ms.state.AssertMutable() ms.orig.Name = v } // Attributes returns the Attributes associated with this SpanEvent. func (ms SpanEvent) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this SpanEvent. func (ms SpanEvent) DroppedAttributesCount() uint32 { return ms.orig.DroppedAttributesCount } // SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanEvent. func (ms SpanEvent) SetDroppedAttributesCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedAttributesCount = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms SpanEvent) CopyTo(dest SpanEvent) { dest.state.AssertMutable() copyOrigSpanEvent(dest.orig, ms.orig) } func copyOrigSpanEvent(dest, src *otlptrace.Span_Event) { dest.TimeUnixNano = src.TimeUnixNano dest.Name = src.Name dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.DroppedAttributesCount = src.DroppedAttributesCount }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanEventSlice logically represents a slice of SpanEvent. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewSpanEventSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanEventSlice struct { orig *[]*otlptrace.Span_Event state *internal.State } func newSpanEventSlice(orig *[]*otlptrace.Span_Event, state *internal.State) SpanEventSlice { return SpanEventSlice{orig: orig, state: state} } // NewSpanEventSlice creates a SpanEventSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanEventSlice() SpanEventSlice { orig := []*otlptrace.Span_Event(nil) state := internal.StateMutable return newSpanEventSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewSpanEventSlice()". func (es SpanEventSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es SpanEventSlice) At(i int) SpanEvent { return newSpanEvent((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es SpanEventSlice) All() iter.Seq2[int, SpanEvent] { return func(yield func(int, SpanEvent) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new SpanEventSlice can be initialized: // // es := NewSpanEventSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es SpanEventSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlptrace.Span_Event, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty SpanEvent. // It returns the newly added SpanEvent. func (es SpanEventSlice) AppendEmpty() SpanEvent { es.state.AssertMutable() *es.orig = append(*es.orig, &otlptrace.Span_Event{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es SpanEventSlice) MoveAndAppendTo(dest SpanEventSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es SpanEventSlice) RemoveIf(f func(SpanEvent) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es SpanEventSlice) CopyTo(dest SpanEventSlice) { dest.state.AssertMutable() *dest.orig = copyOrigSpanEventSlice(*dest.orig, *es.orig) } // Sort sorts the SpanEvent elements within SpanEventSlice given the // provided less function so that two instances of SpanEventSlice // can be compared. func (es SpanEventSlice) Sort(less func(a, b SpanEvent) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigSpanEventSlice(dest, src []*otlptrace.Span_Event) []*otlptrace.Span_Event { if cap(dest) < len(src) { dest = make([]*otlptrace.Span_Event, len(src)) data := make([]otlptrace.Span_Event, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigSpanEvent(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" "go.opentelemetry.io/collector/pdata/internal/data" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/pcommon" ) // SpanLink is a pointer from the current span to another span in the same trace or in a // different trace. // See Link definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewSpanLink function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanLink struct { orig *otlptrace.Span_Link state *internal.State } func newSpanLink(orig *otlptrace.Span_Link, state *internal.State) SpanLink { return SpanLink{orig: orig, state: state} } // NewSpanLink creates a new empty SpanLink. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewSpanLink() SpanLink { state := internal.StateMutable return newSpanLink(&otlptrace.Span_Link{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms SpanLink) MoveTo(dest SpanLink) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.Span_Link{} } // TraceID returns the traceid associated with this SpanLink. func (ms SpanLink) TraceID() pcommon.TraceID { return pcommon.TraceID(ms.orig.TraceId) } // SetTraceID replaces the traceid associated with this SpanLink. func (ms SpanLink) SetTraceID(v pcommon.TraceID) { ms.state.AssertMutable() ms.orig.TraceId = data.TraceID(v) } // SpanID returns the spanid associated with this SpanLink. func (ms SpanLink) SpanID() pcommon.SpanID { return pcommon.SpanID(ms.orig.SpanId) } // SetSpanID replaces the spanid associated with this SpanLink. func (ms SpanLink) SetSpanID(v pcommon.SpanID) { ms.state.AssertMutable() ms.orig.SpanId = data.SpanID(v) } // TraceState returns the tracestate associated with this SpanLink. func (ms SpanLink) TraceState() pcommon.TraceState { return pcommon.TraceState(internal.NewTraceState(&ms.orig.TraceState, ms.state)) } // Flags returns the flags associated with this SpanLink. func (ms SpanLink) Flags() uint32 { return ms.orig.Flags } // SetFlags replaces the flags associated with this SpanLink. func (ms SpanLink) SetFlags(v uint32) { ms.state.AssertMutable() ms.orig.Flags = v } // Attributes returns the Attributes associated with this SpanLink. func (ms SpanLink) Attributes() pcommon.Map { return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) } // DroppedAttributesCount returns the droppedattributescount associated with this SpanLink. func (ms SpanLink) DroppedAttributesCount() uint32 { return ms.orig.DroppedAttributesCount } // SetDroppedAttributesCount replaces the droppedattributescount associated with this SpanLink. func (ms SpanLink) SetDroppedAttributesCount(v uint32) { ms.state.AssertMutable() ms.orig.DroppedAttributesCount = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms SpanLink) CopyTo(dest SpanLink) { dest.state.AssertMutable() copyOrigSpanLink(dest.orig, ms.orig) } func copyOrigSpanLink(dest, src *otlptrace.Span_Link) { dest.TraceId = src.TraceId dest.SpanId = src.SpanId internal.CopyOrigTraceState(&dest.TraceState, &src.TraceState) dest.Flags = src.Flags dest.Attributes = internal.CopyOrigMap(dest.Attributes, src.Attributes) dest.DroppedAttributesCount = src.DroppedAttributesCount }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanLinkSlice logically represents a slice of SpanLink. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewSpanLinkSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanLinkSlice struct { orig *[]*otlptrace.Span_Link state *internal.State } func newSpanLinkSlice(orig *[]*otlptrace.Span_Link, state *internal.State) SpanLinkSlice { return SpanLinkSlice{orig: orig, state: state} } // NewSpanLinkSlice creates a SpanLinkSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanLinkSlice() SpanLinkSlice { orig := []*otlptrace.Span_Link(nil) state := internal.StateMutable return newSpanLinkSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewSpanLinkSlice()". func (es SpanLinkSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es SpanLinkSlice) At(i int) SpanLink { return newSpanLink((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es SpanLinkSlice) All() iter.Seq2[int, SpanLink] { return func(yield func(int, SpanLink) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new SpanLinkSlice can be initialized: // // es := NewSpanLinkSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es SpanLinkSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlptrace.Span_Link, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty SpanLink. // It returns the newly added SpanLink. func (es SpanLinkSlice) AppendEmpty() SpanLink { es.state.AssertMutable() *es.orig = append(*es.orig, &otlptrace.Span_Link{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es SpanLinkSlice) MoveAndAppendTo(dest SpanLinkSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es SpanLinkSlice) RemoveIf(f func(SpanLink) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es SpanLinkSlice) CopyTo(dest SpanLinkSlice) { dest.state.AssertMutable() *dest.orig = copyOrigSpanLinkSlice(*dest.orig, *es.orig) } // Sort sorts the SpanLink elements within SpanLinkSlice given the // provided less function so that two instances of SpanLinkSlice // can be compared. func (es SpanLinkSlice) Sort(less func(a, b SpanLink) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigSpanLinkSlice(dest, src []*otlptrace.Span_Link) []*otlptrace.Span_Link { if cap(dest) < len(src) { dest = make([]*otlptrace.Span_Link, len(src)) data := make([]otlptrace.Span_Link, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigSpanLink(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "iter" "sort" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanSlice logically represents a slice of Span. // // This is a reference type. If passed by value and callee modifies it, the // caller will see the modification. // // Must use NewSpanSlice function to create new instances. // Important: zero-initialized instance is not valid for use. type SpanSlice struct { orig *[]*otlptrace.Span state *internal.State } func newSpanSlice(orig *[]*otlptrace.Span, state *internal.State) SpanSlice { return SpanSlice{orig: orig, state: state} } // NewSpanSlice creates a SpanSlice with 0 elements. // Can use "EnsureCapacity" to initialize with a given capacity. func NewSpanSlice() SpanSlice { orig := []*otlptrace.Span(nil) state := internal.StateMutable return newSpanSlice(&orig, &state) } // Len returns the number of elements in the slice. // // Returns "0" for a newly instance created with "NewSpanSlice()". func (es SpanSlice) Len() int { return len(*es.orig) } // At returns the element at the given index. // // This function is used mostly for iterating over all the values in the slice: // // for i := 0; i < es.Len(); i++ { // e := es.At(i) // ... // Do something with the element // } func (es SpanSlice) At(i int) Span { return newSpan((*es.orig)[i], es.state) } // All returns an iterator over index-value pairs in the slice. // // for i, v := range es.All() { // ... // Do something with index-value pair // } func (es SpanSlice) All() iter.Seq2[int, Span] { return func(yield func(int, Span) bool) { for i := 0; i < es.Len(); i++ { if !yield(i, es.At(i)) { return } } } } // EnsureCapacity is an operation that ensures the slice has at least the specified capacity. // 1. If the newCap <= cap then no change in capacity. // 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. // // Here is how a new SpanSlice can be initialized: // // es := NewSpanSlice() // es.EnsureCapacity(4) // for i := 0; i < 4; i++ { // e := es.AppendEmpty() // // Here should set all the values for e. // } func (es SpanSlice) EnsureCapacity(newCap int) { es.state.AssertMutable() oldCap := cap(*es.orig) if newCap <= oldCap { return } newOrig := make([]*otlptrace.Span, len(*es.orig), newCap) copy(newOrig, *es.orig) *es.orig = newOrig } // AppendEmpty will append to the end of the slice an empty Span. // It returns the newly added Span. func (es SpanSlice) AppendEmpty() Span { es.state.AssertMutable() *es.orig = append(*es.orig, &otlptrace.Span{}) return es.At(es.Len() - 1) } // MoveAndAppendTo moves all elements from the current slice and appends them to the dest. // The current slice will be cleared. func (es SpanSlice) MoveAndAppendTo(dest SpanSlice) { es.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if es.orig == dest.orig { return } if *dest.orig == nil { // We can simply move the entire vector and avoid any allocations. *dest.orig = *es.orig } else { *dest.orig = append(*dest.orig, *es.orig...) } *es.orig = nil } // RemoveIf calls f sequentially for each element present in the slice. // If f returns true, the element is removed from the slice. func (es SpanSlice) RemoveIf(f func(Span) bool) { es.state.AssertMutable() newLen := 0 for i := 0; i < len(*es.orig); i++ { if f(es.At(i)) { continue } if newLen == i { // Nothing to move, element is at the right place. newLen++ continue } (*es.orig)[newLen] = (*es.orig)[i] newLen++ } *es.orig = (*es.orig)[:newLen] } // CopyTo copies all elements from the current slice overriding the destination. func (es SpanSlice) CopyTo(dest SpanSlice) { dest.state.AssertMutable() *dest.orig = copyOrigSpanSlice(*dest.orig, *es.orig) } // Sort sorts the Span elements within SpanSlice given the // provided less function so that two instances of SpanSlice // can be compared. func (es SpanSlice) Sort(less func(a, b Span) bool) { es.state.AssertMutable() sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) } func copyOrigSpanSlice(dest, src []*otlptrace.Span) []*otlptrace.Span { if cap(dest) < len(src) { dest = make([]*otlptrace.Span, len(src)) data := make([]otlptrace.Span, len(src)) for i := range src { dest[i] = &data[i] } } dest = dest[:len(src)] for i := range src { copyOrigSpan(dest[i], src[i]) } return dest }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptrace import ( "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // Status is an optional final status for this span. Semantically, when Status was not // set, that means the span ended without errors and to assume Status.Ok (code = 0). // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewStatus function to create new instances. // Important: zero-initialized instance is not valid for use. type Status struct { orig *otlptrace.Status state *internal.State } func newStatus(orig *otlptrace.Status, state *internal.State) Status { return Status{orig: orig, state: state} } // NewStatus creates a new empty Status. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewStatus() Status { state := internal.StateMutable return newStatus(&otlptrace.Status{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms Status) MoveTo(dest Status) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlptrace.Status{} } // Code returns the code associated with this Status. func (ms Status) Code() StatusCode { return StatusCode(ms.orig.Code) } // SetCode replaces the code associated with this Status. func (ms Status) SetCode(v StatusCode) { ms.state.AssertMutable() ms.orig.Code = otlptrace.Status_StatusCode(v) } // Message returns the message associated with this Status. func (ms Status) Message() string { return ms.orig.Message } // SetMessage replaces the message associated with this Status. func (ms Status) SetMessage(v string) { ms.state.AssertMutable() ms.orig.Message = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms Status) CopyTo(dest Status) { dest.state.AssertMutable() copyOrigStatus(dest.orig, ms.orig) } func copyOrigStatus(dest, src *otlptrace.Status) { dest.Code = src.Code dest.Message = src.Message }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( "bytes" "fmt" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // JSONMarshaler marshals pdata.Traces to JSON bytes using the OTLP/JSON format. type JSONMarshaler struct{} // MarshalTraces to the OTLP/JSON format. func (*JSONMarshaler) MarshalTraces(td Traces) ([]byte, error) { buf := bytes.Buffer{} pb := internal.TracesToProto(internal.Traces(td)) err := json.Marshal(&buf, &pb) return buf.Bytes(), err } // JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pdata.Traces. type JSONUnmarshaler struct{} // UnmarshalTraces from OTLP/JSON format into pdata.Traces. func (*JSONUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { iter := jsoniter.ConfigFastest.BorrowIterator(buf) defer jsoniter.ConfigFastest.ReturnIterator(iter) td := NewTraces() td.unmarshalJsoniter(iter) if iter.Error != nil { return Traces{}, iter.Error } otlp.MigrateTraces(td.getOrig().ResourceSpans) return td, nil } func (ms Traces) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resourceSpans", "resource_spans": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.ResourceSpans().AppendEmpty().unmarshalJsoniter(iter) return true }) default: iter.Skip() } return true }) } func (ms ResourceSpans) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "resource": json.ReadResource(iter, internal.GetOrigResource(internal.Resource(ms.Resource()))) case "scopeSpans", "scope_spans": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.ScopeSpans().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (ms ScopeSpans) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "scope": json.ReadScope(iter, &ms.orig.Scope) case "spans": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { ms.Spans().AppendEmpty().unmarshalJsoniter(iter) return true }) case "schemaUrl", "schema_url": ms.orig.SchemaUrl = iter.ReadString() default: iter.Skip() } return true }) } func (dest Span) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "traceId", "trace_id": if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpan.traceId", fmt.Sprintf("parse trace_id:%v", err)) } case "spanId", "span_id": if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpan.spanId", fmt.Sprintf("parse span_id:%v", err)) } case "traceState", "trace_state": dest.TraceState().FromRaw(iter.ReadString()) case "parentSpanId", "parent_span_id": if err := dest.orig.ParentSpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpan.parentSpanId", fmt.Sprintf("parse parent_span_id:%v", err)) } case "flags": dest.orig.Flags = json.ReadUint32(iter) case "name": dest.orig.Name = iter.ReadString() case "kind": dest.orig.Kind = otlptrace.Span_SpanKind(json.ReadEnumValue(iter, otlptrace.Span_SpanKind_value)) case "startTimeUnixNano", "start_time_unix_nano": dest.orig.StartTimeUnixNano = json.ReadUint64(iter) case "endTimeUnixNano", "end_time_unix_nano": dest.orig.EndTimeUnixNano = json.ReadUint64(iter) case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) return true }) case "droppedAttributesCount", "dropped_attributes_count": dest.orig.DroppedAttributesCount = json.ReadUint32(iter) case "events": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { dest.Events().AppendEmpty().unmarshalJsoniter(iter) return true }) case "droppedEventsCount", "dropped_events_count": dest.orig.DroppedEventsCount = json.ReadUint32(iter) case "links": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { dest.Links().AppendEmpty().unmarshalJsoniter(iter) return true }) case "droppedLinksCount", "dropped_links_count": dest.orig.DroppedLinksCount = json.ReadUint32(iter) case "status": dest.Status().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } func (dest Status) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "message": dest.orig.Message = iter.ReadString() case "code": dest.orig.Code = otlptrace.Status_StatusCode(json.ReadEnumValue(iter, otlptrace.Status_StatusCode_value)) default: iter.Skip() } return true }) } func (dest SpanLink) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "traceId", "trace_id": if err := dest.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpanLink", fmt.Sprintf("parse trace_id:%v", err)) } case "spanId", "span_id": if err := dest.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpanLink", fmt.Sprintf("parse span_id:%v", err)) } case "traceState", "trace_state": dest.orig.TraceState = iter.ReadString() case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) return true }) case "droppedAttributesCount", "dropped_attributes_count": dest.orig.DroppedAttributesCount = json.ReadUint32(iter) case "flags": dest.orig.Flags = json.ReadUint32(iter) default: iter.Skip() } return true }) } func (dest SpanEvent) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "timeUnixNano", "time_unix_nano": dest.orig.TimeUnixNano = json.ReadUint64(iter) case "name": dest.orig.Name = iter.ReadString() case "attributes": iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { dest.orig.Attributes = append(dest.orig.Attributes, json.ReadAttribute(iter)) return true }) case "droppedAttributesCount", "dropped_attributes_count": dest.orig.DroppedAttributesCount = json.ReadUint32(iter) default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( "go.opentelemetry.io/collector/pdata/internal" otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) var _ MarshalSizer = (*ProtoMarshaler)(nil) type ProtoMarshaler struct{} func (e *ProtoMarshaler) MarshalTraces(td Traces) ([]byte, error) { pb := internal.TracesToProto(internal.Traces(td)) return pb.Marshal() } func (e *ProtoMarshaler) TracesSize(td Traces) int { pb := internal.TracesToProto(internal.Traces(td)) return pb.Size() } func (e *ProtoMarshaler) ResourceSpansSize(rs ResourceSpans) int { return rs.orig.Size() } func (e *ProtoMarshaler) ScopeSpansSize(ss ScopeSpans) int { return ss.orig.Size() } func (e *ProtoMarshaler) SpanSize(span Span) int { return span.orig.Size() } type ProtoUnmarshaler struct{} func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) { pb := otlptrace.TracesData{} err := pb.Unmarshal(buf) return Traces(internal.TracesFromProto(pb)), err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 // Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. // To regenerate this file run "make genpdata". package ptraceotlp import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" ) // ExportPartialSuccess represents the details of a partially successful export request. // // This is a reference type, if passed by value and callee modifies it the // caller will see the modification. // // Must use NewExportPartialSuccess function to create new instances. // Important: zero-initialized instance is not valid for use. type ExportPartialSuccess struct { orig *otlpcollectortrace.ExportTracePartialSuccess state *internal.State } func newExportPartialSuccess(orig *otlpcollectortrace.ExportTracePartialSuccess, state *internal.State) ExportPartialSuccess { return ExportPartialSuccess{orig: orig, state: state} } // NewExportPartialSuccess creates a new empty ExportPartialSuccess. // // This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, // OR directly access the member if this is embedded in another struct. func NewExportPartialSuccess() ExportPartialSuccess { state := internal.StateMutable return newExportPartialSuccess(&otlpcollectortrace.ExportTracePartialSuccess{}, &state) } // MoveTo moves all properties from the current struct overriding the destination and // resetting the current instance to its zero value func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { ms.state.AssertMutable() dest.state.AssertMutable() // If they point to the same data, they are the same, nothing to do. if ms.orig == dest.orig { return } *dest.orig = *ms.orig *ms.orig = otlpcollectortrace.ExportTracePartialSuccess{} } // RejectedSpans returns the rejectedspans associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) RejectedSpans() int64 { return ms.orig.RejectedSpans } // SetRejectedSpans replaces the rejectedspans associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetRejectedSpans(v int64) { ms.state.AssertMutable() ms.orig.RejectedSpans = v } // ErrorMessage returns the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) ErrorMessage() string { return ms.orig.ErrorMessage } // SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. func (ms ExportPartialSuccess) SetErrorMessage(v string) { ms.state.AssertMutable() ms.orig.ErrorMessage = v } // CopyTo copies all properties from the current struct overriding the destination. func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { dest.state.AssertMutable() copyOrigExportPartialSuccess(dest.orig, ms.orig) } func copyOrigExportPartialSuccess(dest, src *otlpcollectortrace.ExportTracePartialSuccess) { dest.RejectedSpans = src.RejectedSpans dest.ErrorMessage = src.ErrorMessage }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" import ( "context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "go.opentelemetry.io/collector/pdata/internal" otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" "go.opentelemetry.io/collector/pdata/internal/otlp" ) // GRPCClient is the client API for OTLP-GRPC Traces service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GRPCClient interface { // Export ptrace.Traces to the server. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) // unexported disallow implementation of the GRPCClient. unexported() } // NewGRPCClient returns a new GRPCClient connected using the given connection. func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { return &grpcClient{rawClient: otlpcollectortrace.NewTraceServiceClient(cc)} } type grpcClient struct { rawClient otlpcollectortrace.TraceServiceClient } // Export implements the Client interface. func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { rsp, err := c.rawClient.Export(ctx, request.orig, opts...) if err != nil { return ExportResponse{}, err } state := internal.StateMutable return ExportResponse{orig: rsp, state: &state}, err } func (c *grpcClient) unexported() {} // GRPCServer is the server API for OTLP gRPC TracesService service. // Implementations MUST embed UnimplementedGRPCServer. type GRPCServer interface { // Export is called every time a new request is received. // // For performance reasons, it is recommended to keep this RPC // alive for the entire life of the application. Export(context.Context, ExportRequest) (ExportResponse, error) // unexported disallow implementation of the GRPCServer. unexported() } var _ GRPCServer = (*UnimplementedGRPCServer)(nil) // UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. type UnimplementedGRPCServer struct{} func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") } func (*UnimplementedGRPCServer) unexported() {} // RegisterGRPCServer registers the GRPCServer to the grpc.Server. func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { otlpcollectortrace.RegisterTraceServiceServer(s, &rawTracesServer{srv: srv}) } type rawTracesServer struct { srv GRPCServer } func (s rawTracesServer) Export(ctx context.Context, request *otlpcollectortrace.ExportTraceServiceRequest) (*otlpcollectortrace.ExportTraceServiceResponse, error) { otlp.MigrateTraces(request.ResourceSpans) state := internal.StateMutable rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) return rsp.orig, err }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" import ( "bytes" "go.opentelemetry.io/collector/pdata/internal" otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" "go.opentelemetry.io/collector/pdata/internal/otlp" "go.opentelemetry.io/collector/pdata/ptrace" ) var jsonUnmarshaler = &ptrace.JSONUnmarshaler{} // ExportRequest represents the request for gRPC/HTTP client/server. // It's a wrapper for ptrace.Traces data. type ExportRequest struct { orig *otlpcollectortrace.ExportTraceServiceRequest state *internal.State } // NewExportRequest returns an empty ExportRequest. func NewExportRequest() ExportRequest { state := internal.StateMutable return ExportRequest{ orig: &otlpcollectortrace.ExportTraceServiceRequest{}, state: &state, } } // NewExportRequestFromTraces returns a ExportRequest from ptrace.Traces. // Because ExportRequest is a wrapper for ptrace.Traces, // any changes to the provided Traces struct will be reflected in the ExportRequest and vice versa. func NewExportRequestFromTraces(td ptrace.Traces) ExportRequest { return ExportRequest{ orig: internal.GetOrigTraces(internal.Traces(td)), state: internal.GetTracesState(internal.Traces(td)), } } // MarshalProto marshals ExportRequest into proto bytes. func (ms ExportRequest) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportRequest from proto bytes. func (ms ExportRequest) UnmarshalProto(data []byte) error { if err := ms.orig.Unmarshal(data); err != nil { return err } otlp.MigrateTraces(ms.orig.ResourceSpans) return nil } // MarshalJSON marshals ExportRequest into JSON bytes. func (ms ExportRequest) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportRequest from JSON bytes. func (ms ExportRequest) UnmarshalJSON(data []byte) error { td, err := jsonUnmarshaler.UnmarshalTraces(data) if err != nil { return err } *ms.orig = *internal.GetOrigTraces(internal.Traces(td)) return nil } func (ms ExportRequest) Traces() ptrace.Traces { return ptrace.Traces(internal.NewTraces(ms.orig, ms.state)) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptraceotlp // import "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" import ( "bytes" jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/pdata/internal" otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" "go.opentelemetry.io/collector/pdata/internal/json" ) // ExportResponse represents the response for gRPC/HTTP client/server. type ExportResponse struct { orig *otlpcollectortrace.ExportTraceServiceResponse state *internal.State } // NewExportResponse returns an empty ExportResponse. func NewExportResponse() ExportResponse { state := internal.StateMutable return ExportResponse{ orig: &otlpcollectortrace.ExportTraceServiceResponse{}, state: &state, } } // MarshalProto marshals ExportResponse into proto bytes. func (ms ExportResponse) MarshalProto() ([]byte, error) { return ms.orig.Marshal() } // UnmarshalProto unmarshalls ExportResponse from proto bytes. func (ms ExportResponse) UnmarshalProto(data []byte) error { return ms.orig.Unmarshal(data) } // MarshalJSON marshals ExportResponse into JSON bytes. func (ms ExportResponse) MarshalJSON() ([]byte, error) { var buf bytes.Buffer if err := json.Marshal(&buf, ms.orig); err != nil { return nil, err } return buf.Bytes(), nil } // UnmarshalJSON unmarshalls ExportResponse from JSON bytes. func (ms ExportResponse) UnmarshalJSON(data []byte) error { iter := jsoniter.ConfigFastest.BorrowIterator(data) defer jsoniter.ConfigFastest.ReturnIterator(iter) ms.unmarshalJsoniter(iter) return iter.Error } func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { switch f { case "partial_success", "partialSuccess": ms.PartialSuccess().unmarshalJsoniter(iter) default: iter.Skip() } return true }) } // PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) } func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { switch f { case "rejected_spans", "rejectedSpans": ms.orig.RejectedSpans = json.ReadInt64(iter) case "error_message", "errorMessage": ms.orig.ErrorMessage = iter.ReadString() default: iter.Skip() } return true }) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // SpanKind is the type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. type SpanKind int32 const ( // SpanKindUnspecified represents that the SpanKind is unspecified, it MUST NOT be used. SpanKindUnspecified = SpanKind(otlptrace.Span_SPAN_KIND_UNSPECIFIED) // SpanKindInternal indicates that the span represents an internal operation within an application, // as opposed to an operation happening at the boundaries. Default value. SpanKindInternal = SpanKind(otlptrace.Span_SPAN_KIND_INTERNAL) // SpanKindServer indicates that the span covers server-side handling of an RPC or other // remote network request. SpanKindServer = SpanKind(otlptrace.Span_SPAN_KIND_SERVER) // SpanKindClient indicates that the span describes a request to some remote service. SpanKindClient = SpanKind(otlptrace.Span_SPAN_KIND_CLIENT) // SpanKindProducer indicates that the span describes a producer sending a message to a broker. // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship // between producer and consumer spans. // A PRODUCER span ends when the message was accepted by the broker while the logical processing of // the message might span a much longer time. SpanKindProducer = SpanKind(otlptrace.Span_SPAN_KIND_PRODUCER) // SpanKindConsumer indicates that the span describes consumer receiving a message from a broker. // Like the PRODUCER kind, there is often no direct critical path latency relationship between // producer and consumer spans. SpanKindConsumer = SpanKind(otlptrace.Span_SPAN_KIND_CONSUMER) ) // String returns the string representation of the SpanKind. func (sk SpanKind) String() string { switch sk { case SpanKindUnspecified: return "Unspecified" case SpanKindInternal: return "Internal" case SpanKindServer: return "Server" case SpanKindClient: return "Client" case SpanKindProducer: return "Producer" case SpanKindConsumer: return "Consumer" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" ) // StatusCode mirrors the codes defined at // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( StatusCodeUnset = StatusCode(otlptrace.Status_STATUS_CODE_UNSET) StatusCodeOk = StatusCode(otlptrace.Status_STATUS_CODE_OK) StatusCodeError = StatusCode(otlptrace.Status_STATUS_CODE_ERROR) ) // String returns the string representation of the StatusCode. func (sc StatusCode) String() string { switch sc { case StatusCodeUnset: return "Unset" case StatusCodeOk: return "Ok" case StatusCodeError: return "Error" } return "" }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package ptrace // import "go.opentelemetry.io/collector/pdata/ptrace" import ( "go.opentelemetry.io/collector/pdata/internal" otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" ) // Traces is the top-level struct that is propagated through the traces pipeline. // Use NewTraces to create new instance, zero-initialized instance is not valid for use. type Traces internal.Traces func newTraces(orig *otlpcollectortrace.ExportTraceServiceRequest) Traces { state := internal.StateMutable return Traces(internal.NewTraces(orig, &state)) } func (ms Traces) getOrig() *otlpcollectortrace.ExportTraceServiceRequest { return internal.GetOrigTraces(internal.Traces(ms)) } func (ms Traces) getState() *internal.State { return internal.GetTracesState(internal.Traces(ms)) } // NewTraces creates a new Traces struct. func NewTraces() Traces { return newTraces(&otlpcollectortrace.ExportTraceServiceRequest{}) } // IsReadOnly returns true if this Traces instance is read-only. func (ms Traces) IsReadOnly() bool { return *ms.getState() == internal.StateReadOnly } // CopyTo copies the Traces instance overriding the destination. func (ms Traces) CopyTo(dest Traces) { ms.ResourceSpans().CopyTo(dest.ResourceSpans()) } // SpanCount calculates the total number of spans. func (ms Traces) SpanCount() int { spanCount := 0 rss := ms.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) ilss := rs.ScopeSpans() for j := 0; j < ilss.Len(); j++ { spanCount += ilss.At(j).Spans().Len() } } return spanCount } // ResourceSpans returns the ResourceSpansSlice associated with this Metrics. func (ms Traces) ResourceSpans() ResourceSpansSlice { return newResourceSpansSlice(&ms.getOrig().ResourceSpans, internal.GetTracesState(internal.Traces(ms))) } // MarkReadOnly marks the Traces as shared so that no further modifications can be done on it. func (ms Traces) MarkReadOnly() { internal.SetTracesState(internal.Traces(ms), internal.StateReadOnly) }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver" import ( "encoding" "errors" "fmt" "net/url" "path" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configoptional" ) type SanitizedURLPath string var _ encoding.TextUnmarshaler = (*SanitizedURLPath)(nil) func (s *SanitizedURLPath) UnmarshalText(text []byte) error { u, err := url.Parse(string(text)) if err != nil { return fmt.Errorf("invalid HTTP URL path set for signal: %w", err) } if !path.IsAbs(u.Path) { u.Path = "/" + u.Path } *s = SanitizedURLPath(u.Path) return nil } type HTTPConfig struct { ServerConfig confighttp.ServerConfig `mapstructure:",squash"` // The URL path to receive traces on. If omitted "/v1/traces" will be used. TracesURLPath SanitizedURLPath `mapstructure:"traces_url_path,omitempty"` // The URL path to receive metrics on. If omitted "/v1/metrics" will be used. MetricsURLPath SanitizedURLPath `mapstructure:"metrics_url_path,omitempty"` // The URL path to receive logs on. If omitted "/v1/logs" will be used. LogsURLPath SanitizedURLPath `mapstructure:"logs_url_path,omitempty"` // prevent unkeyed literal initialization _ struct{} } // Protocols is the configuration for the supported protocols. type Protocols struct { GRPC configoptional.Optional[configgrpc.ServerConfig] `mapstructure:"grpc"` HTTP configoptional.Optional[HTTPConfig] `mapstructure:"http"` // prevent unkeyed literal initialization _ struct{} } // Config defines configuration for OTLP receiver. type Config struct { // Protocols is the configuration for the supported protocols, currently gRPC and HTTP (Proto and JSON). Protocols `mapstructure:"protocols"` } var _ component.Config = (*Config)(nil) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { if !cfg.GRPC.HasValue() && !cfg.HTTP.HasValue() { return errors.New("must specify at least one protocol when using the OTLP receiver") } return nil }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver" import ( "bytes" "github.com/gogo/protobuf/jsonpb" "github.com/gogo/protobuf/proto" spb "google.golang.org/genproto/googleapis/rpc/status" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) const ( pbContentType = "application/x-protobuf" jsonContentType = "application/json" ) var ( pbEncoder = &protoEncoder{} jsEncoder = &jsonEncoder{} jsonPbMarshaler = &jsonpb.Marshaler{} ) type encoder interface { unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error) marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error) marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error) marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error) marshalStatus(rsp *spb.Status) ([]byte, error) contentType() string } type protoEncoder struct{} func (protoEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) { req := ptraceotlp.NewExportRequest() err := req.UnmarshalProto(buf) return req, err } func (protoEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) { req := pmetricotlp.NewExportRequest() err := req.UnmarshalProto(buf) return req, err } func (protoEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) { req := plogotlp.NewExportRequest() err := req.UnmarshalProto(buf) return req, err } func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { req := pprofileotlp.NewExportRequest() err := req.UnmarshalProto(buf) return req, err } func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } func (protoEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } func (protoEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { return proto.Marshal(resp) } func (protoEncoder) contentType() string { return pbContentType } type jsonEncoder struct{} func (jsonEncoder) unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) { req := ptraceotlp.NewExportRequest() err := req.UnmarshalJSON(buf) return req, err } func (jsonEncoder) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) { req := pmetricotlp.NewExportRequest() err := req.UnmarshalJSON(buf) return req, err } func (jsonEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) { req := plogotlp.NewExportRequest() err := req.UnmarshalJSON(buf) return req, err } func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { req := pprofileotlp.NewExportRequest() err := req.UnmarshalJSON(buf) return req, err } func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } func (jsonEncoder) marshalMetricsResponse(resp pmetricotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } func (jsonEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { buf := new(bytes.Buffer) err := jsonPbMarshaler.Marshal(buf, resp) return buf.Bytes(), err } func (jsonEncoder) contentType() string { return jsonContentType }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver" import ( "context" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configoptional" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/sharedcomponent" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata" "go.opentelemetry.io/collector/receiver/xreceiver" ) const ( defaultTracesURLPath = "/v1/traces" defaultMetricsURLPath = "/v1/metrics" defaultLogsURLPath = "/v1/logs" defaultProfilesURLPath = "/v1development/profiles" ) // NewFactory creates a new OTLP receiver factory. func NewFactory() receiver.Factory { return xreceiver.NewFactory( metadata.Type, createDefaultConfig, xreceiver.WithTraces(createTraces, metadata.TracesStability), xreceiver.WithMetrics(createMetrics, metadata.MetricsStability), xreceiver.WithLogs(createLog, metadata.LogsStability), xreceiver.WithProfiles(createProfiles, metadata.ProfilesStability), ) } // createDefaultConfig creates the default configuration for receiver. func createDefaultConfig() component.Config { grpcCfg := configgrpc.NewDefaultServerConfig() grpcCfg.NetAddr = confignet.NewDefaultAddrConfig() grpcCfg.NetAddr.Endpoint = "localhost:4317" grpcCfg.NetAddr.Transport = confignet.TransportTypeTCP // We almost write 0 bytes, so no need to tune WriteBufferSize. grpcCfg.ReadBufferSize = 512 * 1024 httpCfg := confighttp.NewDefaultServerConfig() httpCfg.Endpoint = "localhost:4318" // For backward compatibility: httpCfg.TLS = nil httpCfg.WriteTimeout = 0 httpCfg.ReadHeaderTimeout = 0 httpCfg.IdleTimeout = 0 return &Config{ Protocols: Protocols{ GRPC: configoptional.Default(grpcCfg), HTTP: configoptional.Default(HTTPConfig{ ServerConfig: httpCfg, TracesURLPath: defaultTracesURLPath, MetricsURLPath: defaultMetricsURLPath, LogsURLPath: defaultLogsURLPath, }), }, } } // createTraces creates a trace receiver based on provided config. func createTraces( _ context.Context, set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { oCfg := cfg.(*Config) r, err := receivers.LoadOrStore( oCfg, func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, ) if err != nil { return nil, err } r.Unwrap().registerTraceConsumer(nextConsumer) return r, nil } // createMetrics creates a metrics receiver based on provided config. func createMetrics( _ context.Context, set receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { oCfg := cfg.(*Config) r, err := receivers.LoadOrStore( oCfg, func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, ) if err != nil { return nil, err } r.Unwrap().registerMetricsConsumer(consumer) return r, nil } // createLog creates a log receiver based on provided config. func createLog( _ context.Context, set receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { oCfg := cfg.(*Config) r, err := receivers.LoadOrStore( oCfg, func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, ) if err != nil { return nil, err } r.Unwrap().registerLogsConsumer(consumer) return r, nil } // createProfiles creates a trace receiver based on provided config. func createProfiles( _ context.Context, set receiver.Settings, cfg component.Config, nextConsumer xconsumer.Profiles, ) (xreceiver.Profiles, error) { oCfg := cfg.(*Config) r, err := receivers.LoadOrStore( oCfg, func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, ) if err != nil { return nil, err } r.Unwrap().registerProfilesConsumer(nextConsumer) return r, nil } // This is the map of already created OTLP receivers for particular configurations. // We maintain this map because the receiver.Factory is asked trace and metric receivers separately // when it gets CreateTraces() and CreateMetrics() but they must not // create separate objects, they must use one otlpReceiver object per configuration. // When the receiver is shutdown it should be removed from this map so the same configuration // can be recreated successfully. var receivers = sharedcomponent.NewMap[*Config, *otlpReceiver]()
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver" import ( "context" "errors" "net" "net/http" "sync" "go.uber.org/zap" "google.golang.org/grpc" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/telemetry" "go.opentelemetry.io/collector/internal/telemetry/componentattribute" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" "go.opentelemetry.io/collector/receiver/receiverhelper" ) // otlpReceiver is the type that exposes Trace and Metrics reception. type otlpReceiver struct { cfg *Config serverGRPC *grpc.Server serverHTTP *http.Server nextTraces consumer.Traces nextMetrics consumer.Metrics nextLogs consumer.Logs nextProfiles xconsumer.Profiles shutdownWG sync.WaitGroup obsrepGRPC *receiverhelper.ObsReport obsrepHTTP *receiverhelper.ObsReport settings *receiver.Settings } // newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's // responsibility to invoke the respective Start*Reception methods as well // as the various Stop*Reception methods to end it. func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) { set.TelemetrySettings = telemetry.WithoutAttributes(set.TelemetrySettings, componentattribute.SignalKey) set.Logger.Debug("created signal-agnostic logger") r := &otlpReceiver{ cfg: cfg, nextTraces: nil, nextMetrics: nil, nextLogs: nil, nextProfiles: nil, settings: set, } var err error r.obsrepGRPC, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: "grpc", ReceiverCreateSettings: *set, }) if err != nil { return nil, err } r.obsrepHTTP, err = receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: "http", ReceiverCreateSettings: *set, }) if err != nil { return nil, err } return r, nil } func (r *otlpReceiver) startGRPCServer(host component.Host) error { // If GRPC is not enabled, nothing to start. if !r.cfg.GRPC.HasValue() { return nil } grpcCfg := r.cfg.GRPC.Get() var err error if r.serverGRPC, err = grpcCfg.ToServer(context.Background(), host, r.settings.TelemetrySettings); err != nil { return err } if r.nextTraces != nil { ptraceotlp.RegisterGRPCServer(r.serverGRPC, trace.New(r.nextTraces, r.obsrepGRPC)) } if r.nextMetrics != nil { pmetricotlp.RegisterGRPCServer(r.serverGRPC, metrics.New(r.nextMetrics, r.obsrepGRPC)) } if r.nextLogs != nil { plogotlp.RegisterGRPCServer(r.serverGRPC, logs.New(r.nextLogs, r.obsrepGRPC)) } if r.nextProfiles != nil { pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles)) } r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", grpcCfg.NetAddr.Endpoint)) var gln net.Listener if gln, err = grpcCfg.NetAddr.Listen(context.Background()); err != nil { return err } r.shutdownWG.Add(1) go func() { defer r.shutdownWG.Done() if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } }() return nil } func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) error { // If HTTP is not enabled, nothing to start. if !r.cfg.HTTP.HasValue() { return nil } httpCfg := r.cfg.HTTP.Get() httpMux := http.NewServeMux() if r.nextTraces != nil { httpTracesReceiver := trace.New(r.nextTraces, r.obsrepHTTP) httpMux.HandleFunc(string(httpCfg.TracesURLPath), func(resp http.ResponseWriter, req *http.Request) { handleTraces(resp, req, httpTracesReceiver) }) } if r.nextMetrics != nil { httpMetricsReceiver := metrics.New(r.nextMetrics, r.obsrepHTTP) httpMux.HandleFunc(string(httpCfg.MetricsURLPath), func(resp http.ResponseWriter, req *http.Request) { handleMetrics(resp, req, httpMetricsReceiver) }) } if r.nextLogs != nil { httpLogsReceiver := logs.New(r.nextLogs, r.obsrepHTTP) httpMux.HandleFunc(string(httpCfg.LogsURLPath), func(resp http.ResponseWriter, req *http.Request) { handleLogs(resp, req, httpLogsReceiver) }) } if r.nextProfiles != nil { httpProfilesReceiver := profiles.New(r.nextProfiles) httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) { handleProfiles(resp, req, httpProfilesReceiver) }) } var err error if r.serverHTTP, err = httpCfg.ServerConfig.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { return err } r.settings.Logger.Info("Starting HTTP server", zap.String("endpoint", httpCfg.ServerConfig.Endpoint)) var hln net.Listener if hln, err = httpCfg.ServerConfig.ToListener(ctx); err != nil { return err } r.shutdownWG.Add(1) go func() { defer r.shutdownWG.Done() if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) { componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil } // Start runs the trace receiver on the gRPC server. Currently // it also enables the metrics receiver too. func (r *otlpReceiver) Start(ctx context.Context, host component.Host) error { if err := r.startGRPCServer(host); err != nil { return err } if err := r.startHTTPServer(ctx, host); err != nil { // It's possible that a valid GRPC server configuration was specified, // but an invalid HTTP configuration. If that's the case, the successfully // started GRPC server must be shutdown to ensure no goroutines are leaked. return errors.Join(err, r.Shutdown(ctx)) } return nil } // Shutdown is a method to turn off receiving. func (r *otlpReceiver) Shutdown(ctx context.Context) error { var err error if r.serverHTTP != nil { err = r.serverHTTP.Shutdown(ctx) } if r.serverGRPC != nil { r.serverGRPC.GracefulStop() } r.shutdownWG.Wait() return err } func (r *otlpReceiver) registerTraceConsumer(tc consumer.Traces) { r.nextTraces = tc } func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) { r.nextMetrics = mc } func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) { r.nextLogs = lc } func (r *otlpReceiver) registerProfilesConsumer(tc xconsumer.Profiles) { r.nextProfiles = tc }
// Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package otlpreceiver // import "go.opentelemetry.io/collector/receiver/otlpreceiver" import ( "fmt" "io" "mime" "net/http" "strconv" "time" "google.golang.org/grpc/status" "go.opentelemetry.io/collector/internal/statusutil" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) // Pre-computed status with code=Internal to be used in case of a marshaling error. var fallbackMsg = []byte(`{"code": 13, "message": "failed to marshal error message"}`) const fallbackContentType = "application/json" func handleTraces(resp http.ResponseWriter, req *http.Request, tracesReceiver *trace.Receiver) { enc, ok := readContentType(resp, req) if !ok { return } body, ok := readAndCloseBody(resp, req, enc) if !ok { return } otlpReq, err := enc.unmarshalTracesRequest(body) if err != nil { writeError(resp, enc, err, http.StatusBadRequest) return } otlpResp, err := tracesReceiver.Export(req.Context(), otlpReq) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } msg, err := enc.marshalTracesResponse(otlpResp) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } writeResponse(resp, enc.contentType(), http.StatusOK, msg) } func handleMetrics(resp http.ResponseWriter, req *http.Request, metricsReceiver *metrics.Receiver) { enc, ok := readContentType(resp, req) if !ok { return } body, ok := readAndCloseBody(resp, req, enc) if !ok { return } otlpReq, err := enc.unmarshalMetricsRequest(body) if err != nil { writeError(resp, enc, err, http.StatusBadRequest) return } otlpResp, err := metricsReceiver.Export(req.Context(), otlpReq) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } msg, err := enc.marshalMetricsResponse(otlpResp) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } writeResponse(resp, enc.contentType(), http.StatusOK, msg) } func handleLogs(resp http.ResponseWriter, req *http.Request, logsReceiver *logs.Receiver) { enc, ok := readContentType(resp, req) if !ok { return } body, ok := readAndCloseBody(resp, req, enc) if !ok { return } otlpReq, err := enc.unmarshalLogsRequest(body) if err != nil { writeError(resp, enc, err, http.StatusBadRequest) return } otlpResp, err := logsReceiver.Export(req.Context(), otlpReq) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } msg, err := enc.marshalLogsResponse(otlpResp) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } writeResponse(resp, enc.contentType(), http.StatusOK, msg) } func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceiver *profiles.Receiver) { enc, ok := readContentType(resp, req) if !ok { return } body, ok := readAndCloseBody(resp, req, enc) if !ok { return } otlpReq, err := enc.unmarshalProfilesRequest(body) if err != nil { writeError(resp, enc, err, http.StatusBadRequest) return } otlpResp, err := profilesReceiver.Export(req.Context(), otlpReq) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } msg, err := enc.marshalProfilesResponse(otlpResp) if err != nil { writeError(resp, enc, err, http.StatusInternalServerError) return } writeResponse(resp, enc.contentType(), http.StatusOK, msg) } func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) { if req.Method != http.MethodPost { handleUnmatchedMethod(resp) return nil, false } switch getMimeTypeFromContentType(req.Header.Get("Content-Type")) { case pbContentType: return pbEncoder, true case jsonContentType: return jsEncoder, true default: handleUnmatchedContentType(resp) return nil, false } } func readAndCloseBody(resp http.ResponseWriter, req *http.Request, enc encoder) ([]byte, bool) { body, err := io.ReadAll(req.Body) if err != nil { writeError(resp, enc, err, http.StatusBadRequest) return nil, false } if err = req.Body.Close(); err != nil { writeError(resp, enc, err, http.StatusBadRequest) return nil, false } return body, true } // writeError encodes the HTTP error inside a rpc.Status message as required by the OTLP protocol. func writeError(w http.ResponseWriter, encoder encoder, err error, statusCode int) { s, ok := status.FromError(err) if ok { statusCode = errors.GetHTTPStatusCodeFromStatus(s) } else { s = statusutil.NewStatusFromMsgAndHTTPCode(err.Error(), statusCode) } writeStatusResponse(w, encoder, statusCode, s) } // errorHandler encodes the HTTP error message inside a rpc.Status message as required // by the OTLP protocol. func errorHandler(w http.ResponseWriter, r *http.Request, errMsg string, statusCode int) { s := statusutil.NewStatusFromMsgAndHTTPCode(errMsg, statusCode) switch getMimeTypeFromContentType(r.Header.Get("Content-Type")) { case pbContentType: writeStatusResponse(w, pbEncoder, statusCode, s) return case jsonContentType: writeStatusResponse(w, jsEncoder, statusCode, s) return } writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg) } func writeStatusResponse(w http.ResponseWriter, enc encoder, statusCode int, st *status.Status) { // https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md#otlphttp-throttling if statusCode == http.StatusTooManyRequests || statusCode == http.StatusServiceUnavailable { retryInfo := statusutil.GetRetryInfo(st) // Check if server returned throttling information. if retryInfo != nil { // We are throttled. Wait before retrying as requested by the server. // The value of Retry-After field can be either an HTTP-date or a number of // seconds to delay after the response is received. See https://datatracker.ietf.org/doc/html/rfc7231#section-7.1.3 // // Retry-After = HTTP-date / delay-seconds // // Use delay-seconds since is easier to format as well as does not require clock synchronization. w.Header().Set("Retry-After", strconv.FormatInt(int64(retryInfo.GetRetryDelay().AsDuration()/time.Second), 10)) } } msg, err := enc.marshalStatus(st.Proto()) if err != nil { writeResponse(w, fallbackContentType, http.StatusInternalServerError, fallbackMsg) return } writeResponse(w, enc.contentType(), statusCode, msg) } func writeResponse(w http.ResponseWriter, contentType string, statusCode int, msg []byte) { w.Header().Set("Content-Type", contentType) w.WriteHeader(statusCode) // Nothing we can do with the error if we cannot write to the response. _, _ = w.Write(msg) } func getMimeTypeFromContentType(contentType string) string { mediatype, _, err := mime.ParseMediaType(contentType) if err != nil { return "" } return mediatype } func handleUnmatchedMethod(resp http.ResponseWriter) { hst := http.StatusMethodNotAllowed writeResponse(resp, "text/plain", hst, []byte(fmt.Sprintf("%v method not allowed, supported: [POST]", hst))) } func handleUnmatchedContentType(resp http.ResponseWriter) { hst := http.StatusUnsupportedMediaType writeResponse(resp, "text/plain", hst, []byte(fmt.Sprintf("%v unsupported media type, supported: [%s, %s]", hst, jsonContentType, pbContentType))) }