deps: update buildkit, vendor changes

Signed-off-by: Laura Brehm <laurabrehm@hey.com>
This commit is contained in:
Laura Brehm 2023-12-19 12:36:24 +00:00
parent 8484fcdd57
commit 0f45b629ad
No known key found for this signature in database
GPG Key ID: CFBF847B4A313468
157 changed files with 17189 additions and 1232 deletions

View File

@ -392,7 +392,7 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
conn = demuxConn(conn)
exp, err := detect.Exporter()
exp, _, err := detect.Exporter()
if err != nil {
return nil, err
}

View File

@ -66,7 +66,7 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
}),
}
exp, err := detect.Exporter()
exp, _, err := detect.Exporter()
if err != nil {
return nil, err
}

View File

@ -209,7 +209,7 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
return nil, err
}
exp, err := detect.Exporter()
exp, _, err := detect.Exporter()
if err != nil {
return nil, err
}

View File

@ -74,7 +74,7 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
opts := []client.ClientOpt{}
exp, err := detect.Exporter()
exp, _, err := detect.Exporter()
if err != nil {
return nil, err
}

15
go.mod
View File

@ -7,7 +7,7 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.16
github.com/compose-spec/compose-go v1.20.0
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.7.9
github.com/containerd/containerd v1.7.11
github.com/containerd/continuity v0.4.2
github.com/containerd/log v0.1.0
github.com/containerd/typeurl/v2 v2.1.1
@ -24,7 +24,7 @@ require (
github.com/google/uuid v1.3.0
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
github.com/hashicorp/hcl/v2 v2.19.1
github.com/moby/buildkit v0.13.0-beta1.0.20231129145300-c9ee8491d74f // master (v0.13.0-dev)
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 // master (v0.13.0-dev)
github.com/moby/sys/mountinfo v0.6.2
github.com/moby/sys/signal v0.7.0
github.com/morikuni/aec v1.0.0
@ -120,10 +120,10 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
@ -137,11 +137,16 @@ require (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/sdk v1.19.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect

31
go.sum
View File

@ -92,8 +92,8 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc=
github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y=
github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw=
github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE=
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
@ -206,7 +206,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@ -318,8 +317,8 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.13.0-beta1.0.20231129145300-c9ee8491d74f h1:4cKlhf1VAERk8ycrBAeHVEE+tPgogx9wl97SHlCNCGg=
github.com/moby/buildkit v0.13.0-beta1.0.20231129145300-c9ee8491d74f/go.mod h1:WZBEvD1dKbPOPS84u1qz4Ja4l8uPD37w4PlpwdhxvKE=
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991 h1:r80LLQ91uOLxU1ElAvrB1o8oBsph51lPzVnr7t2b200=
github.com/moby/buildkit v0.13.0-beta1.0.20231219135447-957cb50df991/go.mod h1:6MddWPSL5jxy+W8eMMHWDOfZzzRRKWXPZqajw72YHBc=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
@ -384,14 +383,14 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
@ -402,8 +401,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -483,16 +482,26 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZ
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA=
go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k=
go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=

View File

@ -76,6 +76,16 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
if _, err2 := hrs.reader(); err2 == nil {
return n, nil
}
} else if err == io.EOF {
// The CRI's imagePullProgressTimeout relies on responseBody.Close to
// update the process monitor's status. If the err is io.EOF, close
// the connection since there is no more available data.
if hrs.rc != nil {
if clsErr := hrs.rc.Close(); clsErr != nil {
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF")
}
hrs.rc = nil
}
}
return
}

View File

@ -585,18 +585,13 @@ func (r *request) do(ctx context.Context) (*http.Response, error) {
return nil
}
}
_, httpSpan := tracing.StartSpan(
ctx,
tracing.Name("remotes.docker.resolver", "HTTPRequest"),
tracing.WithHTTPRequest(req),
)
defer httpSpan.End()
tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest"))
resp, err := client.Do(req)
if err != nil {
httpSpan.SetStatus(err)
return nil, fmt.Errorf("failed to do request: %w", err)
}
httpSpan.SetAttributes(tracing.HTTPStatusCodeAttributes(resp.StatusCode)...)
log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
return resp, nil
}

View File

@ -361,8 +361,15 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.In
return children, nil
}
// parentInfo can be used to inherit info for non-existent blobs
var parentInfo *content.Info
parentSourceAnnotations := desc.Annotations
var parentLabels map[string]string
if pi, err := provider.Info(ctx, desc.Digest); err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
} else {
parentLabels = pi.Labels
}
for i := range children {
child := children[i]
@ -372,32 +379,35 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.In
if !errdefs.IsNotFound(err) {
return nil, err
}
if parentInfo == nil {
pi, err := provider.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
parentInfo = &pi
}
// Blob may not exist locally, annotate with parent labels for cross repo
// mount or fetch. Parent sources may apply to all children since most
// registries enforce that children exist before the manifests.
info = *parentInfo
}
copyDistributionSourceLabels(info.Labels, &child)
for k, v := range info.Labels {
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
continue
}
if child.Annotations == nil {
child.Annotations = map[string]string{}
}
child.Annotations[k] = v
}
// Annotate with parent labels for cross repo mount or fetch.
// Parent sources may apply to all children since most registries
// enforce that children exist before the manifests.
copyDistributionSourceLabels(parentSourceAnnotations, &child)
copyDistributionSourceLabels(parentLabels, &child)
children[i] = child
}
return children, nil
}
}
func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) {
for k, v := range from {
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
continue
}
if to.Annotations == nil {
to.Annotations = make(map[string]string)
} else {
// Only propagate the parent label if the child doesn't already have it.
if _, has := to.Annotations[k]; has {
continue
}
}
to.Annotations[k] = v
}
}

View File

@ -20,11 +20,11 @@ import (
"context"
"net/http"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
httpconv "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
)
@ -37,15 +37,27 @@ type SpanOpt func(config *StartConfig)
// WithHTTPRequest marks span as a HTTP request operation from client to server.
// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type.
func WithHTTPRequest(request *http.Request) SpanOpt {
//
// Deprecated: use upstream functionality from otelhttp directly instead. This function is kept for API compatibility
// but no longer works as expected due to required functionality no longer exported in OpenTelemetry libraries.
func WithHTTPRequest(_ *http.Request) SpanOpt {
return func(config *StartConfig) {
config.spanOpts = append(config.spanOpts,
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
trace.WithAttributes(httpconv.ClientRequest(request)...), // Add HTTP attributes
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
)
}
}
// UpdateHTTPClient updates the http client with the necessary otel transport
func UpdateHTTPClient(client *http.Client, name string) {
client.Transport = otelhttp.NewTransport(
client.Transport,
otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
return name
}),
)
}
// StartSpan starts child span in a context.
func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) {
config := StartConfig{}

View File

@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.7.9+unknown"
Version = "1.7.11+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -59,9 +59,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
var creds *withCredentials
for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
if creds == nil {
creds = &withCredentials{}
@ -205,7 +202,7 @@ func (c *Client) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
return context.Cause(ctx)
case <-time.After(time.Second):
}
c.conn.ResetConnectBackoff()
@ -216,14 +213,6 @@ func (c *Client) Close() error {
return c.conn.Close()
}
type withFailFast struct{}
func (*withFailFast) isClientOpt() {}
func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withDialer struct {
dialer func(context.Context, string) (net.Conn, error)
}

View File

@ -61,7 +61,7 @@ func (as *asyncState) Do(ctx context.Context, c *Constraints) error {
if err != nil {
select {
case <-ctx.Done():
if errors.Is(err, ctx.Err()) {
if errors.Is(err, context.Cause(ctx)) {
return res, err
}
default:

View File

@ -106,8 +106,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background())
defer cancelStatus()
statusContext, cancelStatus := context.WithCancelCause(context.Background())
defer cancelStatus(errors.WithStack(context.Canceled))
if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
statusContext = trace.ContextWithSpan(statusContext, span)
@ -230,16 +230,16 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
frontendAttrs[k] = v
}
solveCtx, cancelSolve := context.WithCancel(ctx)
solveCtx, cancelSolve := context.WithCancelCause(ctx)
var res *SolveResponse
eg.Go(func() error {
ctx := solveCtx
defer cancelSolve()
defer cancelSolve(errors.WithStack(context.Canceled))
defer func() { // make sure the Status ends cleanly on build errors
go func() {
<-time.After(3 * time.Second)
cancelStatus()
cancelStatus(errors.WithStack(context.Canceled))
}()
if !opt.SessionPreInitialized {
bklog.G(ctx).Debugf("stopping session")
@ -298,7 +298,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
select {
case <-solveCtx.Done():
case <-time.After(5 * time.Second):
cancelSolve()
cancelSolve(errors.WithStack(context.Canceled))
}
return err

View File

@ -95,7 +95,7 @@ type Source struct {
type ContextOpt struct {
NoDockerignore bool
LocalOpts []llb.LocalOption
AsyncLocalOpts func() []llb.LocalOption
Platform *ocispecs.Platform
ResolveMode string
CaptureDigest *digest.Digest
@ -473,11 +473,8 @@ func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt)
}
pname := name + "::" + platforms.Format(platforms.Normalize(pp))
st, img, err := bc.namedContext(ctx, name, pname, opt)
if err != nil {
return nil, nil, err
}
if st != nil {
return st, img, nil
if err != nil || st != nil {
return st, img, err
}
return bc.namedContext(ctx, name, name, opt)
}

View File

@ -6,12 +6,14 @@ import (
"encoding/json"
"fmt"
"strings"
"sync"
"github.com/distribution/reference"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/patternmatcher/ignorefile"
"github.com/pkg/errors"
@ -212,12 +214,15 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi
}
}
}
st = llb.Local(vv[1],
llb.WithCustomName("[context "+nameWithPlatform+"] load from client"),
llb.SessionID(bc.bopts.SessionID),
llb.SharedKeyHint("context:"+nameWithPlatform),
llb.ExcludePatterns(excludes),
)
localOutput := &asyncLocalOutput{
name: vv[1],
nameWithPlatform: nameWithPlatform,
sessionID: bc.bopts.SessionID,
excludes: excludes,
extraOpts: opt.AsyncLocalOpts,
}
st = llb.NewState(localOutput)
return &st, nil, nil
case "input":
inputs, err := bc.client.Inputs(ctx)
@ -251,3 +256,41 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi
return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], nameWithPlatform)
}
}
// asyncLocalOutput is an llb.Output that computes an llb.Local
// on-demand instead of at the time of initialization.
type asyncLocalOutput struct {
llb.Output
name string
nameWithPlatform string
sessionID string
excludes []string
extraOpts func() []llb.LocalOption
once sync.Once
}
func (a *asyncLocalOutput) ToInput(ctx context.Context, constraints *llb.Constraints) (*pb.Input, error) {
a.once.Do(a.do)
return a.Output.ToInput(ctx, constraints)
}
func (a *asyncLocalOutput) Vertex(ctx context.Context, constraints *llb.Constraints) llb.Vertex {
a.once.Do(a.do)
return a.Output.Vertex(ctx, constraints)
}
func (a *asyncLocalOutput) do() {
var extraOpts []llb.LocalOption
if a.extraOpts != nil {
extraOpts = a.extraOpts()
}
opts := append([]llb.LocalOption{
llb.WithCustomName("[context " + a.nameWithPlatform + "] load from client"),
llb.SessionID(a.sessionID),
llb.SharedKeyHint("context:" + a.nameWithPlatform),
llb.ExcludePatterns(a.excludes),
}, extraOpts...)
st := llb.Local(a.name, opts...)
a.Output = st.Output()
}

View File

@ -43,8 +43,9 @@ type GrpcClient interface {
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second)
defer pingCancel()
pingCtx, pingCancel := context.WithCancelCause(ctx)
pingCtx, _ = context.WithTimeoutCause(pingCtx, 15*time.Second, errors.WithStack(context.DeadlineExceeded))
defer pingCancel(errors.WithStack(context.Canceled))
resp, err := c.Ping(pingCtx, &pb.PingRequest{})
if err != nil {
return nil, err
@ -616,7 +617,7 @@ func (b *procMessageForwarder) Close() {
type messageForwarder struct {
client pb.LLBBridgeClient
ctx context.Context
cancel func()
cancel func(error)
eg *errgroup.Group
mu sync.Mutex
pids map[string]*procMessageForwarder
@ -630,7 +631,7 @@ type messageForwarder struct {
}
func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder {
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancelCause(ctx)
eg, ctx := errgroup.WithContext(ctx)
return &messageForwarder{
client: client,
@ -719,7 +720,7 @@ func (m *messageForwarder) Send(msg *pb.ExecMessage) error {
}
func (m *messageForwarder) Release() error {
m.cancel()
m.cancel(errors.WithStack(context.Canceled))
return m.eg.Wait()
}
@ -949,7 +950,7 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien
closeDoneOnce.Do(func() {
close(done)
})
return ctx.Err()
return context.Cause(ctx)
}
if file := msg.GetFile(); file != nil {
@ -1145,7 +1146,7 @@ func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, err
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancelCause(ctx)
_ = cancel
// go monitorHealth(ctx, cc, cancel)

View File

@ -195,8 +195,8 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
opts[keyDirName] = []string{opt.Name}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(errors.WithStack(context.Canceled))
client := NewFileSyncClient(c.Conn())

View File

@ -72,8 +72,9 @@ func (sm *Manager) Any(ctx context.Context, g Group, f func(context.Context, str
return errors.Errorf("no active sessions")
}
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
timeoutCtx, cancel := context.WithCancelCause(ctx)
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
c, err := sm.Get(timeoutCtx, id, false)
if err != nil {
lastErr = err

View File

@ -74,14 +74,14 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancelCause(ctx)
go monitorHealth(ctx, cc, cancel)
return ctx, cc, nil
}
func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) {
defer cancelConn()
func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func(error)) {
defer cancelConn(errors.WithStack(context.Canceled))
defer cc.Close()
ticker := time.NewTicker(5 * time.Second)
@ -104,9 +104,11 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
healthcheckStart := time.Now()
timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5))
ctx, cancel := context.WithTimeout(ctx, timeout)
ctx, cancel := context.WithCancelCause(ctx)
ctx, _ = context.WithTimeoutCause(ctx, timeout, errors.WithStack(context.DeadlineExceeded))
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
cancel()
cancel(errors.WithStack(context.Canceled))
lastHealthcheckDuration = time.Since(healthcheckStart)
logFields := logrus.Fields{

View File

@ -99,8 +99,8 @@ func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[strin
// caller needs to take lock, this function will release it
func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(errors.WithStack(context.Canceled))
opts = canonicalHeaders(opts)
@ -156,8 +156,8 @@ func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, err
id = p[1]
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(errors.WithStack(context.Canceled))
go func() {
<-ctx.Done()
@ -173,7 +173,7 @@ func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, err
select {
case <-ctx.Done():
sm.mu.Unlock()
return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id)
return nil, errors.Wrapf(context.Cause(ctx), "no active session for %s", id)
default:
}
var ok bool

View File

@ -42,7 +42,7 @@ type Session struct {
name string
sharedKey string
ctx context.Context
cancelCtx func()
cancelCtx func(error)
done chan struct{}
grpcServer *grpc.Server
conn net.Conn
@ -107,11 +107,11 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error {
s.mu.Unlock()
return nil
}
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancelCause(ctx)
s.cancelCtx = cancel
s.done = make(chan struct{})
defer cancel()
defer cancel(errors.WithStack(context.Canceled))
defer close(s.done)
meta := make(map[string][]string)

View File

@ -39,7 +39,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre
select {
case <-ctx.Done():
conn.Close()
return ctx.Err()
return context.Cause(ctx)
default:
}
if _, err := conn.Write(p.Data); err != nil {
@ -65,7 +65,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre
}
select {
case <-ctx.Done():
return ctx.Err()
return context.Cause(ctx)
default:
}
p := &BytesMessage{Data: buf[:n]}

View File

@ -26,7 +26,7 @@ func (s *server) run(ctx context.Context, l net.Listener, id string) error {
eg.Go(func() error {
<-ctx.Done()
return ctx.Err()
return context.Cause(ctx)
})
eg.Go(func() error {

View File

@ -14,7 +14,7 @@ func IsCanceled(ctx context.Context, err error) bool {
return true
}
// grpc does not set cancel correctly when stream gets cancelled and then Recv is called
if err != nil && ctx.Err() == context.Canceled {
if err != nil && context.Cause(ctx) == context.Canceled {
// when this error comes from containerd it is not typed at all, just concatenated string
if strings.Contains(err.Error(), "EOF") {
return true

View File

@ -7,6 +7,7 @@ import (
"sync"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
var appContextCache context.Context
@ -27,16 +28,17 @@ func Context() context.Context {
ctx = f(ctx)
}
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancelCause(ctx)
appContextCache = ctx
go func() {
for {
<-signals
cancel()
retries++
err := errors.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries)
cancel(err)
if retries >= exitLimit {
bklog.G(ctx).Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries)
bklog.G(ctx).Errorf(err.Error())
os.Exit(1)
}
}

View File

@ -90,7 +90,7 @@ type call[T any] struct {
fn func(ctx context.Context) (T, error)
once sync.Once
closeProgressWriter func()
closeProgressWriter func(error)
progressState *progressState
progressCtx context.Context
}
@ -115,9 +115,9 @@ func newCall[T any](fn func(ctx context.Context) (T, error)) *call[T] {
}
func (c *call[T]) run() {
defer c.closeProgressWriter()
ctx, cancel := context.WithCancel(c.ctx)
defer cancel()
defer c.closeProgressWriter(errors.WithStack(context.Canceled))
ctx, cancel := context.WithCancelCause(c.ctx)
defer cancel(errors.WithStack(context.Canceled))
v, err := c.fn(ctx)
c.mu.Lock()
c.result = v
@ -155,8 +155,8 @@ func (c *call[T]) wait(ctx context.Context) (v T, err error) {
c.progressState.add(pw)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(errors.WithStack(context.Canceled))
c.ctxs = append(c.ctxs, ctx)
@ -175,7 +175,7 @@ func (c *call[T]) wait(ctx context.Context) (v T, err error) {
if ok {
c.progressState.close(pw)
}
return empty, ctx.Err()
return empty, context.Cause(ctx)
case <-c.ready:
return c.result, c.err // shared not implemented yet
}
@ -262,7 +262,9 @@ func (sc *sharedContext[T]) checkDone() bool {
for _, ctx := range sc.ctxs {
select {
case <-ctx.Done():
err = ctx.Err()
// Cause can't be used here because this error is returned for Err() in custom context
// implementation and unfortunately stdlib does not allow defining Cause() for custom contexts
err = ctx.Err() //nolint: forbidigo
default:
sc.mu.Unlock()
return false

View File

@ -11,14 +11,15 @@ type MultiReader struct {
main Reader
initialized bool
done chan struct{}
writers map[*progressWriter]func()
doneCause error
writers map[*progressWriter]func(error)
sent []*Progress
}
func NewMultiReader(pr Reader) *MultiReader {
mr := &MultiReader{
main: pr,
writers: make(map[*progressWriter]func()),
writers: make(map[*progressWriter]func(error)),
done: make(chan struct{}),
}
return mr
@ -46,9 +47,9 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader {
go func() {
if isBehind {
close := func() {
close := func(err error) {
w.Close()
closeWriter()
closeWriter(err)
}
i := 0
for {
@ -58,11 +59,11 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader {
if count == 0 {
select {
case <-ctx.Done():
close()
close(context.Cause(ctx))
mr.mu.Unlock()
return
case <-mr.done:
close()
close(mr.doneCause)
mr.mu.Unlock()
return
default:
@ -77,7 +78,7 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader {
if i%100 == 0 {
select {
case <-ctx.Done():
close()
close(context.Cause(ctx))
return
default:
}
@ -110,10 +111,12 @@ func (mr *MultiReader) handle() error {
if err != nil {
if err == io.EOF {
mr.mu.Lock()
cancelErr := context.Canceled
for w, c := range mr.writers {
w.Close()
c()
c(cancelErr)
}
mr.doneCause = cancelErr
close(mr.done)
mr.mu.Unlock()
return nil

View File

@ -56,7 +56,7 @@ type WriterOption func(Writer)
// NewContext returns a new context and a progress reader that captures all
// progress items writtern to this context. Last returned parameter is a closer
// function to signal that no new writes will happen to this context.
func NewContext(ctx context.Context) (Reader, context.Context, func()) {
func NewContext(ctx context.Context) (Reader, context.Context, func(error)) {
pr, pw, cancel := pipe()
ctx = WithProgress(ctx, pw)
return pr, ctx, cancel
@ -141,7 +141,7 @@ func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) {
select {
case <-ctx.Done():
pr.mu.Unlock()
return nil, ctx.Err()
return nil, context.Cause(ctx)
default:
}
dmap := pr.dirty
@ -185,8 +185,8 @@ func (pr *progressReader) append(pw *progressWriter) {
}
}
func pipe() (*progressReader, *progressWriter, func()) {
ctx, cancel := context.WithCancel(context.Background())
func pipe() (*progressReader, *progressWriter, func(error)) {
ctx, cancel := context.WithCancelCause(context.Background())
pr := &progressReader{
ctx: ctx,
writers: make(map[*progressWriter]struct{}),

View File

@ -99,7 +99,7 @@ func (d Display) UpdateFrom(ctx context.Context, ch chan *client.SolveStatus) ([
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
return nil, context.Cause(ctx)
case <-ticker.C:
d.disp.refresh()
case ss, ok := <-ch:

View File

@ -67,8 +67,9 @@ http:
}
deferF.Append(stop)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, cancel := context.WithCancelCause(context.Background())
ctx, _ = context.WithTimeoutCause(ctx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
defer cancel(errors.WithStack(context.Canceled))
url, err = detectPort(ctx, rc)
if err != nil {
return "", nil, err

View File

@ -423,3 +423,10 @@ func prepareValueMatrix(tc testConf) []matrixValue {
}
return m
}
// Skips tests on Windows
func SkipOnPlatform(t *testing.T, goos string) {
if runtime.GOOS == goos {
t.Skipf("Skipped on %s", goos)
}
}

View File

@ -99,7 +99,7 @@ func newSandbox(ctx context.Context, w Worker, mirror string, mv matrixValue) (s
b, closer, err := w.New(ctx, cfg)
if err != nil {
return nil, nil, err
return nil, nil, errors.Wrap(err, "creating worker")
}
deferF.Append(closer)

View File

@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"net"
"os"
"os/exec"
"strings"
@ -100,13 +99,11 @@ func StartCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) (func() error, error
}, nil
}
func WaitUnix(address string, d time.Duration, cmd *exec.Cmd) error {
address = strings.TrimPrefix(address, "unix://")
addr, err := net.ResolveUnixAddr("unix", address)
if err != nil {
return errors.Wrapf(err, "failed resolving unix addr: %s", address)
}
// WaitSocket will dial a socket opened by a command passed in as cmd.
// On Linux this socket is typically a Unix socket,
// while on Windows this will be a named pipe.
func WaitSocket(address string, d time.Duration, cmd *exec.Cmd) error {
address = strings.TrimPrefix(address, socketScheme)
step := 50 * time.Millisecond
i := 0
for {
@ -114,7 +111,7 @@ func WaitUnix(address string, d time.Duration, cmd *exec.Cmd) error {
return errors.Errorf("process exited: %s", cmd.String())
}
if conn, err := net.DialUnix("unix", nil, addr); err == nil {
if conn, err := dialPipe(address); err == nil {
conn.Close()
break
}

View File

@ -0,0 +1,23 @@
//go:build !windows
// +build !windows
package integration
import (
"net"
"github.com/pkg/errors"
)
var socketScheme = "unix://"
// abstracted function to handle pipe dialing on unix.
// some simplification has been made to discard
// laddr for unix -- left as nil.
func dialPipe(address string) (net.Conn, error) {
addr, err := net.ResolveUnixAddr("unix", address)
if err != nil {
return nil, errors.Wrapf(err, "failed resolving unix addr: %s", address)
}
return net.DialUnix("unix", nil, addr)
}

View File

@ -0,0 +1,15 @@
package integration
import (
"net"
"github.com/Microsoft/go-winio"
)
var socketScheme = "npipe://"
// abstracted function to handle pipe dialing on windows.
// some simplification has been made to discard timeout param.
func dialPipe(address string) (net.Conn, error) {
return winio.DialPipe(address, nil)
}

View File

@ -88,9 +88,11 @@ func (c *Containerd) New(ctx context.Context, cfg *integration.BackendConfig) (b
if err := integration.LookupBinary(c.Containerd); err != nil {
return nil, nil, err
}
if err := integration.LookupBinary("buildkitd"); err != nil {
return nil, nil, err
}
if err := requireRoot(); err != nil {
return nil, nil, err
}
@ -117,6 +119,7 @@ func (c *Containerd) New(ctx context.Context, cfg *integration.BackendConfig) (b
if err != nil {
return nil, nil, err
}
if rootless {
if err := os.Chown(tmpdir, c.UID, c.GID); err != nil {
return nil, nil, err
@ -125,7 +128,7 @@ func (c *Containerd) New(ctx context.Context, cfg *integration.BackendConfig) (b
deferF.Append(func() error { return os.RemoveAll(tmpdir) })
address := filepath.Join(tmpdir, "containerd.sock")
address := getContainerdSock(tmpdir)
config := fmt.Sprintf(`root = %q
state = %q
# CRI plugins listens on 10010/tcp for stream server.
@ -137,8 +140,11 @@ disabled_plugins = ["cri"]
[debug]
level = "debug"
address = %q
`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"))
address = %q`,
filepath.Join(tmpdir, "root"),
filepath.Join(tmpdir, "state"),
address, getContainerdDebugSock(tmpdir),
)
var snBuildkitdArgs []string
if c.Snapshotter != "" {
@ -185,19 +191,23 @@ disabled_plugins = ["cri"]
if err != nil {
return nil, nil, err
}
if err := integration.WaitUnix(address, 10*time.Second, cmd); err != nil {
if err := integration.WaitSocket(address, 10*time.Second, cmd); err != nil {
ctdStop()
return nil, nil, errors.Wrapf(err, "containerd did not start up: %s", integration.FormatLogs(cfg.Logs))
}
deferF.Append(ctdStop)
buildkitdArgs := append([]string{"buildkitd",
"--oci-worker=false",
// handles only windows case, no effect on unix
address = normalizeAddress(address)
buildkitdArgs := []string{
"buildkitd",
"--containerd-worker-gc=false",
"--containerd-worker=true",
"--containerd-worker-addr", address,
"--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603
}, snBuildkitdArgs...)
}
buildkitdArgs = applyBuildkitdPlatformFlags(buildkitdArgs)
buildkitdArgs = append(buildkitdArgs, snBuildkitdArgs...)
if runtime.GOOS != "windows" && c.Snapshotter != "native" {
c.ExtraEnv = append(c.ExtraEnv, "BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF=true")
@ -266,7 +276,7 @@ func runStargzSnapshotter(cfg *integration.BackendConfig) (address string, cl fu
if err != nil {
return "", nil, err
}
if err = integration.WaitUnix(address, 10*time.Second, cmd); err != nil {
if err = integration.WaitSocket(address, 10*time.Second, cmd); err != nil {
snStop()
return "", nil, errors.Wrapf(err, "containerd-stargz-grpc did not start up: %s", integration.FormatLogs(cfg.Logs))
}

View File

@ -159,7 +159,7 @@ func (c Moby) New(ctx context.Context, cfg *integration.BackendConfig) (b integr
}
deferF.Append(d.StopWithError)
if err := integration.WaitUnix(d.Sock(), 5*time.Second, nil); err != nil {
if err := integration.WaitSocket(d.Sock(), 5*time.Second, nil); err != nil {
return nil, nil, errors.Errorf("dockerd did not start up: %q, %s", err, integration.FormatLogs(cfg.Logs))
}

View File

@ -4,31 +4,18 @@ import (
"context"
"fmt"
"log"
"os"
"runtime"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
// InitOCIWorker registers an integration test worker, which enables the --oci-worker
// flag in the test buildkitd instance and disables the --containerd-worker flag. This
// integration test worker is not supported on Windows.
func InitOCIWorker() {
integration.Register(&OCI{ID: "oci"})
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if integration.RootlessSupported(uid) {
integration.Register(&OCI{ID: "oci-rootless", UID: uid, GID: gid})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
integration.Register(&OCI{ID: "oci-snapshotter-" + s, Snapshotter: s})
}
// calling platform specific
initOCIWorker()
}
type OCI struct {

View File

@ -0,0 +1,31 @@
//go:build !windows
// +build !windows
package workers
import (
"fmt"
"os"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/testutil/integration"
)
func initOCIWorker() {
integration.Register(&OCI{ID: "oci"})
// the rootless uid is defined in Dockerfile
if s := os.Getenv("BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR"); s != "" {
var uid, gid int
if _, err := fmt.Sscanf(s, "%d:%d", &uid, &gid); err != nil {
bklog.L.Fatalf("unexpected BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR: %q", s)
}
if integration.RootlessSupported(uid) {
integration.Register(&OCI{ID: "oci-rootless", UID: uid, GID: gid})
}
}
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
integration.Register(&OCI{ID: "oci-snapshotter-" + s, Snapshotter: s})
}
}

View File

@ -0,0 +1,7 @@
package workers
import "github.com/moby/buildkit/util/bklog"
func initOCIWorker() {
bklog.L.Info("OCI Worker not supported on Windows.")
}

View File

@ -1,23 +0,0 @@
//go:build !windows
// +build !windows
package workers
import (
"path/filepath"
"syscall"
)
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setsid: true, // stretch sudo needs this for sigterm
}
}
func getBuildkitdAddr(tmpdir string) string {
return "unix://" + filepath.Join(tmpdir, "buildkitd.sock")
}
func getTraceSocketPath(tmpdir string) string {
return filepath.Join(tmpdir, "otel-grpc.sock")
}

View File

@ -1,21 +0,0 @@
//go:build windows
// +build windows
package workers
import (
"path/filepath"
"syscall"
)
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{}
}
func getBuildkitdAddr(tmpdir string) string {
return "//./pipe/buildkitd-" + filepath.Base(tmpdir)
}
func getTraceSocketPath(tmpdir string) string {
return `\\.\pipe\buildkit-otel-grpc-` + filepath.Base(tmpdir)
}

View File

@ -1,98 +1,17 @@
package workers
import (
"bufio"
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
func requireRoot() error {
if os.Getuid() != 0 {
return errors.Wrap(integration.ErrRequirements, "requires root")
}
return nil
}
func runBuildkitd(ctx context.Context, conf *integration.BackendConfig, args []string, logs map[string]*bytes.Buffer, uid, gid int, extraEnv []string) (address string, cl func() error, err error) {
deferF := &integration.MultiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
tmpdir, err := os.MkdirTemp("", "bktest_buildkitd")
if err != nil {
return "", nil, err
}
if err := os.Chown(tmpdir, uid, gid); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Join(tmpdir, "tmp"), 0711); err != nil {
return "", nil, err
}
if err := os.Chown(filepath.Join(tmpdir, "tmp"), uid, gid); err != nil {
return "", nil, err
}
deferF.Append(func() error { return os.RemoveAll(tmpdir) })
cfgfile, err := integration.WriteConfig(append(conf.DaemonConfig, withOTELSocketPath(getTraceSocketPath(tmpdir))))
if err != nil {
return "", nil, err
}
deferF.Append(func() error {
return os.RemoveAll(filepath.Dir(cfgfile))
})
args = append(args, "--config="+cfgfile)
address = getBuildkitdAddr(tmpdir)
args = append(args, "--root", tmpdir, "--addr", address, "--debug")
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // test utility
cmd.Env = append(os.Environ(), "BUILDKIT_DEBUG_EXEC_OUTPUT=1", "BUILDKIT_DEBUG_PANIC_ON_ERROR=1", "TMPDIR="+filepath.Join(tmpdir, "tmp"))
cmd.Env = append(cmd.Env, extraEnv...)
cmd.SysProcAttr = getSysProcAttr()
stop, err := integration.StartCmd(cmd, logs)
if err != nil {
return "", nil, err
}
deferF.Append(stop)
if err := integration.WaitUnix(address, 15*time.Second, cmd); err != nil {
return "", nil, err
}
deferF.Append(func() error {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return errors.Wrap(err, "failed to open mountinfo")
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), tmpdir) {
return errors.Errorf("leaked mountpoint for %s", tmpdir)
}
}
return s.Err()
})
return address, cl, err
}
func withOTELSocketPath(socketPath string) integration.ConfigUpdater {
return otelSocketPath(socketPath)
}
@ -106,3 +25,79 @@ func (osp otelSocketPath) UpdateConfigFile(in string) string {
socketPath = %q
`, in, osp)
}
func runBuildkitd(
ctx context.Context,
conf *integration.BackendConfig,
args []string,
logs map[string]*bytes.Buffer,
uid, gid int,
extraEnv []string,
) (address string, cl func() error, err error) {
deferF := &integration.MultiCloser{}
cl = deferF.F()
defer func() {
if err != nil {
deferF.F()()
cl = nil
}
}()
tmpdir, err := os.MkdirTemp("", "bktest_buildkitd")
if err != nil {
return "", nil, err
}
if err := chown(tmpdir, uid, gid); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Join(tmpdir, "tmp"), 0711); err != nil {
return "", nil, err
}
if err := chown(filepath.Join(tmpdir, "tmp"), uid, gid); err != nil {
return "", nil, err
}
deferF.Append(func() error { return os.RemoveAll(tmpdir) })
cfgfile, err := integration.WriteConfig(
append(conf.DaemonConfig, withOTELSocketPath(getTraceSocketPath(tmpdir))))
if err != nil {
return "", nil, err
}
deferF.Append(func() error {
return os.RemoveAll(filepath.Dir(cfgfile))
})
args = append(args, "--config="+cfgfile)
address = getBuildkitdAddr(tmpdir)
args = append(args, "--root", tmpdir, "--addr", address, "--debug")
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // test utility
cmd.Env = append(
os.Environ(),
"BUILDKIT_DEBUG_EXEC_OUTPUT=1",
"BUILDKIT_DEBUG_PANIC_ON_ERROR=1",
"TMPDIR="+filepath.Join(tmpdir, "tmp"))
cmd.Env = append(cmd.Env, extraEnv...)
cmd.SysProcAttr = getSysProcAttr()
stop, err := integration.StartCmd(cmd, logs)
if err != nil {
return "", nil, err
}
deferF.Append(stop)
if err := integration.WaitSocket(address, 15*time.Second, cmd); err != nil {
return "", nil, err
}
// separated out since it's not required in windows
deferF.Append(func() error {
return mountInfo(tmpdir)
})
return address, cl, err
}

View File

@ -0,0 +1,74 @@
//go:build !windows
// +build !windows
package workers
import (
"bufio"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
)
func applyBuildkitdPlatformFlags(args []string) []string {
return append(args, "--oci-worker=false")
}
func requireRoot() error {
if os.Getuid() != 0 {
return errors.Wrap(integration.ErrRequirements, "requires root")
}
return nil
}
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setsid: true, // stretch sudo needs this for sigterm
}
}
func getBuildkitdAddr(tmpdir string) string {
return "unix://" + filepath.Join(tmpdir, "buildkitd.sock")
}
func getTraceSocketPath(tmpdir string) string {
return filepath.Join(tmpdir, "otel-grpc.sock")
}
func getContainerdSock(tmpdir string) string {
return filepath.Join(tmpdir, "containerd.sock")
}
func getContainerdDebugSock(tmpdir string) string {
return filepath.Join(tmpdir, "debug.sock")
}
func mountInfo(tmpdir string) error {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return errors.Wrap(err, "failed to open mountinfo")
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.Contains(s.Text(), tmpdir) {
return errors.Errorf("leaked mountpoint for %s", tmpdir)
}
}
return s.Err()
}
// moved here since os.Chown is not supported on Windows.
// see no-op counterpart in util_windows.go
func chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}
func normalizeAddress(address string) string {
// for parity with windows, no effect for unix
return address
}

View File

@ -0,0 +1,53 @@
package workers
import (
"path/filepath"
"strings"
"syscall"
)
func applyBuildkitdPlatformFlags(args []string) []string {
return args
}
func requireRoot() error {
return nil
}
func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{}
}
func getBuildkitdAddr(tmpdir string) string {
return "npipe:////./pipe/buildkitd-" + filepath.Base(tmpdir)
}
func getTraceSocketPath(tmpdir string) string {
return `\\.\pipe\buildkit-otel-grpc-` + filepath.Base(tmpdir)
}
func getContainerdSock(tmpdir string) string {
return `\\.\pipe\containerd-` + filepath.Base(tmpdir)
}
func getContainerdDebugSock(tmpdir string) string {
return `\\.\pipe\containerd-` + filepath.Base(tmpdir) + `debug`
}
// no-op for parity with unix
func mountInfo(tmpdir string) error {
return nil
}
func chown(name string, uid, gid int) error {
// Chown not supported on Windows
return nil
}
func normalizeAddress(address string) string {
address = filepath.ToSlash(address)
if !strings.HasPrefix(address, "npipe://") {
address = "npipe://" + address
}
return address
}

View File

@ -5,6 +5,7 @@ import (
"sync"
"github.com/moby/buildkit/util/tracing/detect"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
@ -13,8 +14,8 @@ const maxBuffer = 256
var exp = &Exporter{}
func init() {
detect.Register("delegated", func() (sdktrace.SpanExporter, error) {
return exp, nil
detect.Register("delegated", func() (sdktrace.SpanExporter, sdkmetric.Exporter, error) {
return exp, nil, nil
}, 100)
}

View File

@ -10,13 +10,16 @@ import (
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/exporters/prometheus"
"go.opentelemetry.io/otel/metric"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
)
type ExporterDetector func() (sdktrace.SpanExporter, error)
type ExporterDetector func() (sdktrace.SpanExporter, sdkmetric.Exporter, error)
type detector struct {
f ExporterDetector
@ -26,10 +29,16 @@ type detector struct {
var ServiceName string
var Recorder *TraceRecorder
var Resource *resource.Resource
var detectors map[string]detector
var once sync.Once
var tp trace.TracerProvider
var exporter sdktrace.SpanExporter
var mp metric.MeterProvider
var exporter struct {
SpanExporter sdktrace.SpanExporter
MetricExporter sdkmetric.Exporter
}
var closers []func(context.Context) error
var err error
@ -43,14 +52,14 @@ func Register(name string, exp ExporterDetector, priority int) {
}
}
func detectExporter() (sdktrace.SpanExporter, error) {
func detectExporter() (texp sdktrace.SpanExporter, mexp sdkmetric.Exporter, err error) {
if n := os.Getenv("OTEL_TRACES_EXPORTER"); n != "" {
d, ok := detectors[n]
if !ok {
if n == "none" {
return nil, nil
return nil, nil, nil
}
return nil, errors.Errorf("unsupported opentelemetry tracer %v", n)
return nil, nil, errors.Errorf("unsupported opentelemetry tracer %v", n)
}
return d.f()
}
@ -61,84 +70,145 @@ func detectExporter() (sdktrace.SpanExporter, error) {
sort.Slice(arr, func(i, j int) bool {
return arr[i].priority < arr[j].priority
})
for _, d := range arr {
exp, err := d.f()
t, m, err := d.f()
if err != nil {
return nil, err
return nil, nil, err
}
if exp != nil {
return exp, nil
if texp == nil {
texp = t
}
if mexp == nil {
mexp = m
}
// Found a candidate for both exporters so just return now.
if texp != nil && mexp != nil {
return texp, mexp, nil
}
}
return nil, nil
return texp, mexp, nil
}
func getExporter() (sdktrace.SpanExporter, error) {
exp, err := detectExporter()
func getExporters() (sdktrace.SpanExporter, sdkmetric.Exporter, error) {
texp, mexp, err := detectExporter()
if err != nil {
return nil, err
return nil, nil, err
}
if Recorder != nil {
Recorder.SpanExporter = exp
exp = Recorder
Recorder.SpanExporter = texp
texp = Recorder
}
return exp, nil
return texp, mexp, nil
}
func detect() error {
tp = trace.NewNoopTracerProvider()
mp = sdkmetric.NewMeterProvider()
exp, err := getExporter()
if err != nil || exp == nil {
texp, mexp, err := getExporters()
if err != nil || (texp == nil && mexp == nil) {
return err
}
if Resource == nil {
res, err := resource.Detect(context.Background(), serviceNameDetector{})
if err != nil {
return err
}
res, err = resource.Merge(resource.Default(), res)
if err != nil {
return err
}
Resource = res
}
// enable log with traceID when valid exporter
bklog.EnableLogWithTraceID(true)
if texp != nil {
bklog.EnableLogWithTraceID(true)
res, err := resource.Detect(context.Background(), serviceNameDetector{})
if err != nil {
return err
}
res, err = resource.Merge(resource.Default(), res)
if err != nil {
return err
sp := sdktrace.NewBatchSpanProcessor(texp)
if Recorder != nil {
Recorder.flush = sp.ForceFlush
}
sdktp := sdktrace.NewTracerProvider(
sdktrace.WithSpanProcessor(sp),
sdktrace.WithResource(Resource),
)
closers = append(closers, sdktp.Shutdown)
exporter.SpanExporter = texp
tp = sdktp
}
sp := sdktrace.NewBatchSpanProcessor(exp)
if Recorder != nil {
Recorder.flush = sp.ForceFlush
var readers []sdkmetric.Reader
if mexp != nil {
// Create a new periodic reader using any configured metric exporter.
readers = append(readers, sdkmetric.NewPeriodicReader(mexp))
}
sdktp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sp), sdktrace.WithResource(res))
closers = append(closers, sdktp.Shutdown)
if r, err := prometheus.New(); err != nil {
// Log the error but do not fail if we could not configure the prometheus metrics.
bklog.G(context.Background()).
WithError(err).
Error("failed prometheus metrics configuration")
} else {
// Register the prometheus reader if there was no error.
readers = append(readers, r)
}
exporter = exp
tp = sdktp
if len(readers) > 0 {
opts := make([]sdkmetric.Option, 0, len(readers)+1)
opts = append(opts, sdkmetric.WithResource(Resource))
for _, r := range readers {
opts = append(opts, sdkmetric.WithReader(r))
}
sdkmp := sdkmetric.NewMeterProvider(opts...)
closers = append(closers, sdkmp.Shutdown)
exporter.MetricExporter = mexp
mp = sdkmp
}
return nil
}
func TracerProvider() (trace.TracerProvider, error) {
once.Do(func() {
if err1 := detect(); err1 != nil {
err = err1
}
})
b, _ := strconv.ParseBool(os.Getenv("OTEL_IGNORE_ERROR"))
if err != nil && !b {
if err := detectOnce(); err != nil {
return nil, err
}
return tp, nil
}
func Exporter() (sdktrace.SpanExporter, error) {
_, err := TracerProvider()
if err != nil {
func MeterProvider() (metric.MeterProvider, error) {
if err := detectOnce(); err != nil {
return nil, err
}
return exporter, nil
return mp, nil
}
func detectOnce() error {
once.Do(func() {
if err1 := detect(); err1 != nil {
b, _ := strconv.ParseBool(os.Getenv("OTEL_IGNORE_ERROR"))
if !b {
err = err1
}
}
})
return err
}
func Exporter() (sdktrace.SpanExporter, sdkmetric.Exporter, error) {
_, err := TracerProvider()
if err != nil {
return nil, nil, err
}
return exporter.SpanExporter, exporter.MetricExporter, nil
}
func Shutdown(ctx context.Context) error {

View File

@ -5,9 +5,13 @@ import (
"os"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
@ -15,7 +19,20 @@ func init() {
Register("otlp", otlpExporter, 10)
}
func otlpExporter() (sdktrace.SpanExporter, error) {
func otlpExporter() (sdktrace.SpanExporter, sdkmetric.Exporter, error) {
texp, err := otlpSpanExporter()
if err != nil {
return nil, nil, err
}
mexp, err := otlpMetricExporter()
if err != nil {
return nil, nil, err
}
return texp, mexp, nil
}
func otlpSpanExporter() (sdktrace.SpanExporter, error) {
set := os.Getenv("OTEL_TRACES_EXPORTER") == "otlp" || os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != "" || os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT") != ""
if !set {
return nil, nil
@ -43,3 +60,36 @@ func otlpExporter() (sdktrace.SpanExporter, error) {
return otlptrace.New(context.Background(), c)
}
func otlpMetricExporter() (sdkmetric.Exporter, error) {
set := os.Getenv("OTEL_METRICS_EXPORTER") == "otlp" || os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != "" || os.Getenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT") != ""
if !set {
return nil, nil
}
proto := os.Getenv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL")
if proto == "" {
proto = os.Getenv("OTEL_EXPORTER_OTLP_PROTOCOL")
}
if proto == "" {
proto = "grpc"
}
switch proto {
case "grpc":
return otlpmetricgrpc.New(context.Background(),
otlpmetricgrpc.WithTemporalitySelector(deltaTemporality),
)
case "http/protobuf":
return otlpmetrichttp.New(context.Background(),
otlpmetrichttp.WithTemporalitySelector(deltaTemporality),
)
// case "http/json": // unsupported by library
default:
return nil, errors.Errorf("unsupported otlp protocol %v", proto)
}
}
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality {
return metricdata.DeltaTemporality
}

View File

@ -70,9 +70,10 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
}
ctx, cancel := c.connection.ContextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, 30*time.Second)
defer tCancel()
defer cancel(errors.WithStack(context.Canceled))
ctx, tCancel := context.WithCancelCause(ctx)
ctx, _ = context.WithTimeoutCause(ctx, 30*time.Second, errors.WithStack(context.DeadlineExceeded))
defer tCancel(errors.WithStack(context.Canceled))
ctx = c.connection.ContextWithMetadata(ctx)
err := func() error {

View File

@ -22,6 +22,7 @@ import (
"time"
"unsafe"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@ -185,7 +186,7 @@ func (c *Connection) Shutdown(ctx context.Context) error {
select {
case <-c.backgroundConnectionDoneCh:
case <-ctx.Done():
return ctx.Err()
return context.Cause(ctx)
}
c.mu.Lock()
@ -200,17 +201,17 @@ func (c *Connection) Shutdown(ctx context.Context) error {
return nil
}
func (c *Connection) ContextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
func (c *Connection) ContextWithStop(ctx context.Context) (context.Context, context.CancelCauseFunc) {
// Unify the parent context Done signal with the Connection's
// stop channel.
ctx, cancel := context.WithCancel(ctx)
go func(ctx context.Context, cancel context.CancelFunc) {
ctx, cancel := context.WithCancelCause(ctx)
go func(ctx context.Context, cancel context.CancelCauseFunc) {
select {
case <-ctx.Done():
// Nothing to do, either cancelled or deadline
// happened.
case <-c.stopCh:
cancel()
cancel(errors.WithStack(context.Canceled))
}
}(ctx, cancel)
return ctx, cancel

View File

@ -59,6 +59,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// CounterVecOpts bundles the options to create a CounterVec metric.
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type CounterVecOpts struct {
CounterOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@ -174,16 +186,24 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
return V2.NewCounterVec(CounterVecOpts{
CounterOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.VariableLabels,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.

View File

@ -14,20 +14,16 @@
package prometheus
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// variableLabels contains names of labels and normalization function for
// which the metric maintains variable values.
variableLabels ConstrainedLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names and normalization functions. Their
// label values are variable and therefore not part of the Desc. (They are managed
// within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels,
variableLabels: variableLabels.constrainedLabels(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
for _, label := range d.variableLabels {
if !checkLabelName(label.Name) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
return d
}
labelNames = append(labelNames, "$"+labelName)
labelNameSet[labelName] = struct{}{}
labelNames = append(labelNames, "$"+label.Name)
labelNameSet[label.Name] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names")
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}

View File

@ -37,35 +37,35 @@
//
// type metrics struct {
// cpuTemp prometheus.Gauge
// hdFailures *prometheus.CounterVec
// hdFailures *prometheus.CounterVec
// }
//
// func NewMetrics(reg prometheus.Registerer) *metrics {
// m := &metrics{
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// }),
// hdFailures: prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// ),
// }
// reg.MustRegister(m.cpuTemp)
// reg.MustRegister(m.hdFailures)
// return m
// m := &metrics{
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// }),
// hdFailures: prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// ),
// }
// reg.MustRegister(m.cpuTemp)
// reg.MustRegister(m.hdFailures)
// return m
// }
//
// func main() {
// // Create a non-global registry.
// reg := prometheus.NewRegistry()
// // Create a non-global registry.
// reg := prometheus.NewRegistry()
//
// // Create new metrics and register them using the custom registry.
// m := NewMetrics(reg)
// // Set values for the new created metrics.
// // Create new metrics and register them using the custom registry.
// m := NewMetrics(reg)
// // Set values for the new created metrics.
// m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//

View File

@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// GaugeVecOpts bundles the options to create a GaugeVec metric.
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type GaugeVecOpts struct {
GaugeOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
return V2.NewGaugeVec(GaugeVecOpts{
GaugeOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.

View File

@ -23,11 +23,10 @@ import (
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
)
const (

View File

@ -22,10 +22,9 @@ import (
"sync/atomic"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@ -402,7 +401,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
// SparseBucketsZeroThreshold below). From any one bucket to the next,
// NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
@ -433,7 +432,7 @@ type HistogramOpts struct {
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
// DefSparseBucketsZeroThreshold is used as the threshold. To configure
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
@ -469,6 +468,18 @@ type HistogramOpts struct {
NativeHistogramMaxZeroThreshold float64
}
// HistogramVecOpts bundles the options to create a HistogramVec metric.
// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type HistogramVecOpts struct {
HistogramOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
if n == bucketLabel {
if n.Name == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
h.counts[0] = &histogramCounts{
buckets: make([]uint64, len(h.upperBounds)),
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
nativeHistogramSchema: h.nativeHistogramSchema,
}
h.counts[1] = &histogramCounts{
buckets: make([]uint64, len(h.upperBounds)),
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
nativeHistogramSchema: h.nativeHistogramSchema,
}
h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@ -632,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 {
key--
}
div := 1 << -schema
key = (key + div - 1) / div
offset := (1 << -schema) - 1
key = (key + offset) >> -schema
}
if isInf {
key++
@ -810,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
}
}
// limitSparsebuckets applies a strategy to limit the number of populated sparse
// limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet
@ -1034,15 +1041,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
return V2.NewHistogramVec(HistogramVecOpts{
HistogramOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}

View File

@ -32,6 +32,78 @@ import (
// create a Desc.
type Labels map[string]string
// ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabel struct {
Name string
Constraint func(string) string
}
func (cl ConstrainedLabel) Constrain(v string) string {
if cl.Constraint == nil {
return v
}
return cl.Constraint(v)
}
// ConstrainableLabels is an interface that allows creating of labels that can
// be optionally constrained.
//
// prometheus.V2().NewCounterVec(CounterVecOpts{
// CounterOpts: {...}, // Usual CounterOpts fields
// VariableLabels: []ConstrainedLabels{
// {Name: "A"},
// {Name: "B", Constraint: func(v string) string { ... }},
// },
// })
type ConstrainableLabels interface {
constrainedLabels() ConstrainedLabels
labelNames() []string
}
// ConstrainedLabels represents a collection of label name -> constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
return cls
}
func (cls ConstrainedLabels) labelNames() []string {
names := make([]string, len(cls))
for i, label := range cls {
names[i] = label.Name
}
return names
}
// UnconstrainedLabels represents collection of label without any constraint on
// their value. Thus, it is simply a collection of label names.
//
// UnconstrainedLabels([]string{ "A", "B" })
//
// is equivalent to
//
// ConstrainedLabels {
// { Name: "A" },
// { Name: "B" },
// }
type UnconstrainedLabels []string
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
constrainedLabels := make([]ConstrainedLabel, len(uls))
for i, l := range uls {
constrainedLabels[i] = ConstrainedLabel{Name: l}
}
return constrainedLabels
}
func (uls UnconstrainedLabels) labelNames() []string {
return uls
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"

View File

@ -20,11 +20,9 @@ import (
"strings"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.

View File

@ -37,6 +37,7 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -47,9 +48,10 @@ import (
)
const (
contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
processStartTimeHeader = "Process-Start-Time-Unix"
)
var gzipPool = sync.Pool{
@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
}
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
// scrape target.
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
// exposition format.
ProcessStartTime time.Time
}
// gzipAccepted returns whether the client will accept gzip-encoded content.

View File

@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts)
}
code, method := checkLabels(counter)
// Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
addWithExemplar(
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
1,
rtOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
for label, resolve := range rtOpts.extraLabelsFromCtx {
l[label] = resolve(resp.Request.Context())
}
addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts)
}
code, method := checkLabels(obs)
// Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
observeWithExemplar(
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
time.Since(start).Seconds(),
rtOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
for label, resolve := range rtOpts.extraLabelsFromCtx {
l[label] = resolve(resp.Request.Context())
}
observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}

View File

@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts)
}
code, method := checkLabels(obs)
// Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts)
}
code, method := checkLabels(counter)
// Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
addWithExemplar(
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
1,
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
addWithExemplar(
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
1,
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts)
}
code, method := checkLabels(obs)
// Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
observeWithExemplar(
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
time.Since(now).Seconds(),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, status, hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
}
@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts)
}
code, method := checkLabels(obs)
// Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
float64(size),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
float64(size),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts)
}
code, method := checkLabels(obs)
// Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
float64(d.Written()),
hOpts.getExemplarFn(r.Context()),
)
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
for label, resolve := range hOpts.extraLabelsFromCtx {
l[label] = resolve(r.Context())
}
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}

View File

@ -24,14 +24,32 @@ type Option interface {
apply(*options)
}
// LabelValueFromCtx are used to compute the label value from request context.
// Context can be filled with values from request through middleware.
type LabelValueFromCtx func(ctx context.Context) string
// options store options for both a handler or round tripper.
type options struct {
extraMethods []string
getExemplarFn func(requestCtx context.Context) prometheus.Labels
extraMethods []string
getExemplarFn func(requestCtx context.Context) prometheus.Labels
extraLabelsFromCtx map[string]LabelValueFromCtx
}
func defaultOptions() *options {
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
return &options{
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
}
}
func (o *options) emptyDynamicLabels() prometheus.Labels {
labels := prometheus.Labels{}
for label := range o.extraLabelsFromCtx {
labels[label] = ""
}
return labels
}
type optionApplyFunc func(*options)
@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
})
}
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
// will get instrumented without exemplar.
// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
// metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn
})
}
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
return optionApplyFunc(func(o *options) {
o.extraLabelsFromCtx[name] = valueFn
})
}

View File

@ -21,18 +21,17 @@ import (
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"unicode/utf8"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"google.golang.org/protobuf/proto"
)
const (
@ -933,6 +932,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
if dtoMetric.TimestampMs != nil {
h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
h.Write(separatorByteSlice)
}
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@ -962,7 +965,7 @@ func checkDescConsistency(
copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
Name: proto.String(l.Name),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {

View File

@ -22,11 +22,10 @@ import (
"sync/atomic"
"time"
"github.com/beorn7/perks/quantile"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto"
)
// quantileLabel is used for the label that defines the quantile in a
@ -148,6 +147,18 @@ type SummaryOpts struct {
BufCap uint32
}
// SummaryVecOpts bundles the options to create a SummaryVec metric.
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type SummaryVecOpts struct {
SummaryOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
if n == quantileLabel {
if n.Name == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
@ -530,20 +541,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
for _, ln := range labelNames {
return V2.NewSummaryVec(SummaryVecOpts{
SummaryOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
desc := NewDesc(
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}

View File

@ -23,7 +23,9 @@ type Timer struct {
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the
// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
// later on will be also supported.
// Timer is usually used to time a function call in the
// following way:
//
// func TimeMe() {
@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration()
// // Do actual work.
// }
//
// or
//
// func TimeMeWithExemplar() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDurationWithExemplar(exemplar)
// // Do actual work.
// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
}
return d
}
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
// be casted to ExemplarObserver.
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
d := time.Since(t.begin)
eo, ok := t.observer.(ExemplarObserver)
if ok && exemplar != nil {
eo.ObserveWithExemplar(d.Seconds(), exemplar)
return d
}
if t.observer != nil {
t.observer.Observe(d.Seconds())
}
return d
}

View File

@ -19,13 +19,11 @@ import (
"time"
"unicode/utf8"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, n := range desc.variableLabels {
for i, l := range desc.variableLabels {
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(n),
Name: proto.String(l.Name),
Value: proto.String(labelValues[i]),
})
}

View File

@ -20,6 +20,24 @@ import (
"github.com/prometheus/common/model"
)
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@ -72,6 +90,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -91,6 +110,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels)
if err != nil {
return false
@ -106,6 +128,9 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
return m.metricMap.deleteByLabels(labels, m.curry)
}
@ -145,10 +170,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label]
val, ok := labels[label.Name]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
return nil, fmt.Errorf("label name %q is already curried", label)
return nil, fmt.Errorf("label name %q is already curried", label.Name)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@ -156,7 +181,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
newCurry = append(newCurry, curriedLabelValue{i, val})
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -199,6 +224,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@ -224,6 +250,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@ -266,16 +295,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label]
val, ok := labels[label.Name]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
return 0, fmt.Errorf("label name %q is already curried", label)
return 0, fmt.Errorf("label name %q is already curried", label.Name)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
return 0, fmt.Errorf("label name %q missing in label map", label.Name)
}
h = m.hashAdd(h, val)
}
@ -453,7 +482,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@ -605,7 +634,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++
continue
}
if values[i] != labels[k] {
if values[i] != labels[k.Name] {
return false
}
}
@ -621,7 +650,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
iCurry++
continue
}
labelValues[i] = labels[k]
labelValues[i] = labels[k.Name]
}
return labelValues
}
@ -640,3 +669,35 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
return labelValues
}
func constrainLabels(desc *Desc, labels Labels) Labels {
constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
v = desc.variableLabels[i].Constrain(v)
}
constrainedLabels[l] = v
}
return constrainedLabels
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
iCurry++
continue
}
if i < len(desc.variableLabels) {
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
} else {
constrainedValues[iLVs] = lvs[iLVs]
}
iLVs++
}
return constrainedValues
}

View File

@ -0,0 +1,23 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
type v2 struct{}
// V2 is a struct that can be referenced to access experimental API that might
// be present in v2 of client golang someday. It offers extended functionality
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
// in the same codebase.
var V2 = v2{}

View File

@ -17,12 +17,10 @@ import (
"fmt"
"sort"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
"google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.49.0
GOLANGCI_LINT_VERSION ?= v1.51.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)

View File

@ -21,6 +21,7 @@ import (
// kernel data structures.
type FS struct {
proc fs.FS
real bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil {
return FS{}, err
}
return FS{fs}, nil
real, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, real}, nil
}

View File

@ -0,0 +1,23 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows
// +build netbsd openbsd solaris windows
package procfs
// isRealProc returns true on architectures that don't have a Type argument
// in their Statfs_t struct
func isRealProc(mountPoint string) (bool, error) {
return true, nil
}

33
vendor/github.com/prometheus/procfs/fs_statfs_type.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows
// +build !netbsd,!openbsd,!solaris,!windows
package procfs
import (
"syscall"
)
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
func isRealProc(mountPoint string) (bool, error) {
stat := syscall.Statfs_t{}
err := syscall.Statfs(mountPoint, &stat)
if err != nil {
return false, err
}
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
return stat.Type == 0x9fa0, nil
}

View File

@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil
}
// Parses a uint64 from given hex in string.
func ParseHexUint64s(ss []string) ([]*uint64, error) {
us := make([]*uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path)

View File

@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 {
opStats.Errors = ns[8]

View File

@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
Searched uint64
Found uint64
New uint64
Invalid uint64
Ignore uint64
Delete uint64
DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
entries, err := util.ParseHexUint64s(fields)
if err != nil {
return nil, err
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
stats := &ConntrackStatEntry{
Entries: *entries[0],
Searched: *entries[1],
Found: *entries[2],
New: *entries[3],
Invalid: *entries[4],
Ignore: *entries[5],
Delete: *entries[6],
DeleteList: *entries[7],
Insert: *entries[8],
InsertFailed: *entries[9],
Drop: *entries[10],
EarlyDrop: *entries[11],
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
// Ignore missing search_restart on Linux < 2.6.35.
if numEntries == 17 {
stats.SearchRestart = *entries[16]
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string.
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
return stats, nil
}

View File

@ -76,6 +76,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r)
var stats []SoftnetStat
cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat.SoftnetBacklogLen = us[0]
softnetStat.Index = us[1]
} else {
// For older kernels, create the Index based on the scan line number.
softnetStat.Index = uint32(cpuIndex)
}
softnetStat.Width = width
stats = append(stats, softnetStat)
cpuIndex++
}
return stats, nil

182
vendor/github.com/prometheus/procfs/net_wireless.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Wireless models the content of /proc/net/wireless.
type Wireless struct {
Name string
// Status is the current 4-digit hex value status of the interface.
Status uint64
// QualityLink is the link quality.
QualityLink int
// QualityLevel is the signal gain (dBm).
QualityLevel int
// QualityNoise is the signal noise baseline (dBm).
QualityNoise int
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
DiscardedNwid int
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
DiscardedCrypt int
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
DiscardedFrag int
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
DiscardedRetry int
// DiscardedMisc is the number of discarded packets for other reasons.
DiscardedMisc int
// MissedBeacon is the number of missed beacons/superframe.
MissedBeacon int
}
// Wireless returns kernel wireless statistics.
func (fs FS) Wireless() ([]*Wireless, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
if err != nil {
return nil, err
}
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse wireless: %w", err)
}
return m, nil
}
// parseWireless parses the contents of /proc/net/wireless.
/*
Inter-| sta-| Quality | Discarded packets | Missed | WE
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
*/
func parseWireless(r io.Reader) ([]*Wireless, error) {
var (
interfaces []*Wireless
scanner = bufio.NewScanner(r)
)
for n := 0; scanner.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line := scanner.Text()
parts := strings.Split(line, ":")
if len(parts) != 2 {
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
}
name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1])
if len(stats) < 10 {
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
}
status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil {
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
}
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
}
w := &Wireless{
Name: name,
Status: status,
QualityLink: qlink,
QualityLevel: qlevel,
QualityNoise: qnoise,
DiscardedNwid: dnwid,
DiscardedCrypt: dcrypt,
DiscardedFrag: dfrag,
DiscardedRetry: dretry,
DiscardedMisc: dmisc,
MissedBeacon: mbeacon,
}
interfaces = append(interfaces, w)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
}
return interfaces, nil
}

View File

@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"io"
"os"
"path/filepath"
"strconv"
@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat
for _, filePath := range statFiles {
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
procNetstat, err := parseNetstat(file)
procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) {
// parseNetstat parses the metrics from `/proc/net/stat/` file
// and returns a NetStat structure.
func parseNetstat(r io.Reader) (NetStat, error) {
var (
scanner = bufio.NewScanner(r)
netStat = NetStat{
Stats: make(map[string][]uint64),
}
)
func parseNetstat(filePath string) (NetStat, error) {
netStat := NetStat{
Stats: make(map[string][]uint64),
}
file, err := os.Open(filePath)
if err != nil {
return netStat, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan()
// First string is always a header for stats

View File

@ -21,7 +21,6 @@ import (
"strconv"
"strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@ -30,7 +29,7 @@ type Proc struct {
// The process ID.
PID int
fs fs.FS
fs FS
}
// Procs represents a list of Proc structs.
@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
return Proc{PID: pid, fs: fs.proc}, nil
return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
p = append(p, Proc{PID: int(pid), fs: fs.proc})
p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.real {
stat, err := os.Stat(p.path("fd"))
if err != nil {
return 0, err
}
size := stat.Size()
if size > 0 {
return int(size), nil
}
}
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
}
func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of

View File

@ -18,7 +18,6 @@ import (
"fmt"
"os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@ -112,7 +111,7 @@ type ProcStat struct {
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
proc fs.FS
proc FS
}
// NewStat returns the current status information of the process.
@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
fs := FS{proc: s.proc}
stat, err := fs.Stat()
stat, err := s.proc.Stat()
if err != nil {
return 0, err
}

View File

@ -15,6 +15,7 @@ package procfs
import (
"bytes"
"sort"
"strconv"
"strings"
@ -76,6 +77,9 @@ type ProcStatus struct {
UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
case "Cpus_allowed_list":
s.CpusAllowedList = calcCpusAllowedList(vString)
}
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
func calcCpusAllowedList(cpuString string) []uint64 {
s := strings.Split(cpuString, ",")
var g []uint64
for _, cpu := range s {
// parse cpu ranges, example: 1-3=[1,2,3]
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
for i := startCPU; i <= endCPU; i++ {
g = append(g, i)
}
} else if len(l) == 1 {
cpu, _ := strconv.ParseUint(l[0], 10, 64)
g = append(g, cpu)
}
}
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}

View File

@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
if err != nil {
continue
}
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
}
return t, nil
@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
}
// Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) {
tfs := fsi.FS(proc.path("task"))
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err
}
return Proc{PID: tid, fs: tfs}, nil

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package otlpmetric provides an OpenTelemetry metric Exporter that can be
// used with PeriodicReader. It transforms metricdata into OTLP and transmits
// the transformed data to OTLP receivers. The Exporter is configurable to use
// different Clients, each using a distinct transport protocol to communicate
// to an OTLP receiving endpoint.
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,199 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
import (
"context"
"time"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
type client struct {
metadata metadata.MD
exportTimeout time.Duration
requestFunc retry.RequestFunc
// ourConn keeps track of where conn was created: true if created here in
// NewClient, or false if passed with an option. This is important on
// Shutdown as the conn should only be closed if we created it. Otherwise,
// it is up to the processes that passed the conn to close it.
ourConn bool
conn *grpc.ClientConn
msc colmetricpb.MetricsServiceClient
}
// newClient creates a new gRPC metric client.
func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
c := &client{
exportTimeout: cfg.Metrics.Timeout,
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
conn: cfg.GRPCConn,
}
if len(cfg.Metrics.Headers) > 0 {
c.metadata = metadata.New(cfg.Metrics.Headers)
}
if c.conn == nil {
// If the caller did not provide a ClientConn when the client was
// created, create one using the configuration they did provide.
conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...)
if err != nil {
return nil, err
}
// Keep track that we own the lifecycle of this conn and need to close
// it on Shutdown.
c.ourConn = true
c.conn = conn
}
c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
return c, nil
}
// Shutdown shuts down the client, freeing all resource.
//
// Any active connections to a remote endpoint are closed if they were created
// by the client. Any gRPC connection passed during creation using
// WithGRPCConn will not be closed. It is the caller's responsibility to
// handle cleanup of that resource.
func (c *client) Shutdown(ctx context.Context) error {
// The otlpmetric.Exporter synchronizes access to client methods and
// ensures this is called only once. The only thing that needs to be done
// here is to release any computational resources the client holds.
c.metadata = nil
c.requestFunc = nil
c.msc = nil
err := ctx.Err()
if c.ourConn {
closeErr := c.conn.Close()
// A context timeout error takes precedence over this error.
if err == nil && closeErr != nil {
err = closeErr
}
}
c.conn = nil
return err
}
// UploadMetrics sends protoMetrics to connected endpoint.
//
// Retryable errors from the server will be handled according to any
// RetryConfig the client was created with.
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
// The otlpmetric.Exporter synchronizes access to client methods, and
// ensures this is not called after the Exporter is shutdown. Only thing
// to do here is send data.
select {
case <-ctx.Done():
// Do not upload if the context is already expired.
return ctx.Err()
default:
}
ctx, cancel := c.exportContext(ctx)
defer cancel()
return c.requestFunc(ctx, func(iCtx context.Context) error {
resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
})
if resp != nil && resp.PartialSuccess != nil {
msg := resp.PartialSuccess.GetErrorMessage()
n := resp.PartialSuccess.GetRejectedDataPoints()
if n != 0 || msg != "" {
err := internal.MetricPartialSuccessError(n, msg)
otel.Handle(err)
}
}
// nil is converted to OK.
if status.Code(err) == codes.OK {
// Success.
return nil
}
return err
})
}
// exportContext returns a copy of parent with an appropriate deadline and
// cancellation function based on the clients configured export timeout.
//
// It is the callers responsibility to cancel the returned context once its
// use is complete, via the parent or directly with the returned CancelFunc, to
// ensure all resources are correctly released.
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
var (
ctx context.Context
cancel context.CancelFunc
)
if c.exportTimeout > 0 {
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
} else {
ctx, cancel = context.WithCancel(parent)
}
if c.metadata.Len() > 0 {
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
}
return ctx, cancel
}
// retryable returns if err identifies a request that can be retried and a
// duration to wait for if an explicit throttle time is included in err.
func retryable(err error) (bool, time.Duration) {
s := status.Convert(err)
switch s.Code() {
case codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
return true, throttleDelay(s)
}
// Not a retry-able error.
return false, 0
}
// throttleDelay returns a duration to wait for if an explicit throttle time
// is included in the response status.
func throttleDelay(s *status.Status) time.Duration {
for _, detail := range s.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
return t.RetryDelay.AsDuration()
}
}
return 0
}

View File

@ -0,0 +1,256 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
import (
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
"go.opentelemetry.io/otel/sdk/metric"
)
// Option applies a configuration option to the Exporter.
type Option interface {
applyGRPCOption(oconf.Config) oconf.Config
}
func asGRPCOptions(opts []Option) []oconf.GRPCOption {
converted := make([]oconf.GRPCOption, len(opts))
for i, o := range opts {
converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
}
return converted
}
// RetryConfig defines configuration for retrying the export of metric data
// that failed.
//
// This configuration does not define any network retry strategy. That is
// entirely handled by the gRPC ClientConn.
type RetryConfig retry.Config
type wrappedOption struct {
oconf.GRPCOption
}
func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
return w.ApplyGRPCOption(cfg)
}
// WithInsecure disables client transport security for the Exporter's gRPC
// connection, just like grpc.WithInsecure()
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
//
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
// environment variable is set, and this option is not passed, that variable
// value will be used to determine client security. If the endpoint has a
// scheme of "http" or "unix" client security will be disabled. If both are
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, client security will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithInsecure() Option {
return wrappedOption{oconf.WithInsecure()}
}
// WithEndpoint sets the target endpoint the Exporter will connect to.
//
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
// environment variable is set, and this option is not passed, that variable
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
// will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, "localhost:4317" will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithEndpoint(endpoint string) Option {
return wrappedOption{oconf.WithEndpoint(endpoint)}
}
// WithReconnectionPeriod set the minimum amount of time between connection
// attempts to the target endpoint.
//
// This option has no effect if WithGRPCConn is used.
func WithReconnectionPeriod(rp time.Duration) Option {
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
cfg.ReconnectionPeriod = rp
return cfg
})}
}
func compressorToCompression(compressor string) oconf.Compression {
if compressor == "gzip" {
return oconf.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return oconf.NoCompression
}
// WithCompressor sets the compressor the gRPC client uses.
//
// It is the responsibility of the caller to ensure that the compressor set
// has been registered with google.golang.org/grpc/encoding (see
// encoding.RegisterCompressor for more information). For example, to register
// the gzip compressor import the package:
//
// import _ "google.golang.org/grpc/encoding/gzip"
//
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
// this option is not passed, that variable value will be used. That value can
// be either "none" or "gzip". If both are set,
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, no compressor will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithCompressor(compressor string) Option {
return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
}
// WithHeaders will send the provided headers with each gRPC requests.
//
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
// environment variable is set, and this option is not passed, that variable
// value will be used. The value will be parsed as a list of key value pairs.
// These pairs are expected to be in the W3C Correlation-Context format
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, no user headers will be set.
func WithHeaders(headers map[string]string) Option {
return wrappedOption{oconf.WithHeaders(headers)}
}
// WithTLSCredentials sets the gRPC connection to use creds.
//
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
// this option is not passed, that variable value will be used. The value will
// be parsed the filepath of the TLS certificate chain to use. If both are
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, no TLS credentials will be used.
//
// This option has no effect if WithGRPCConn is used.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
cfg.Metrics.GRPCCredentials = creds
return cfg
})}
}
// WithServiceConfig defines the default gRPC service config used.
//
// This option has no effect if WithGRPCConn is used.
func WithServiceConfig(serviceConfig string) Option {
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
cfg.ServiceConfig = serviceConfig
return cfg
})}
}
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
// gRPC connection. The options here are appended to the internal grpc.DialOptions
// used so they will take precedence over any other internal grpc.DialOptions
// they might conflict with.
//
// This option has no effect if WithGRPCConn is used.
func WithDialOption(opts ...grpc.DialOption) Option {
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
cfg.DialOptions = opts
return cfg
})}
}
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
//
// This option takes precedence over any other option that relates to
// establishing or persisting a gRPC connection to a target endpoint. Any
// other option of those types passed will be ignored.
//
// It is the callers responsibility to close the passed conn. The Exporter
// Shutdown method will not close this connection.
func WithGRPCConn(conn *grpc.ClientConn) Option {
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
cfg.GRPCConn = conn
return cfg
})}
}
// WithTimeout sets the max amount of time an Exporter will attempt an export.
//
// This takes precedence over any retry settings defined by WithRetry. Once
// this time limit has been reached the export is abandoned and the metric
// data is dropped.
//
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
// environment variable is set, and this option is not passed, that variable
// value will be used. The value will be parsed as an integer representing the
// timeout in milliseconds. If both are set,
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
//
// By default, if an environment variable is not set, and this option is not
// passed, a timeout of 10 seconds will be used.
func WithTimeout(duration time.Duration) Option {
return wrappedOption{oconf.WithTimeout(duration)}
}
// WithRetry sets the retry policy for transient retryable errors that are
// returned by the target endpoint.
//
// If the target endpoint responds with not only a retryable error, but
// explicitly returns a backoff time in the response, that time will take
// precedence over these settings.
//
// These settings do not define any network retry strategy. That is entirely
// handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
// after each error for no more than a total time of 1 minute.
func WithRetry(settings RetryConfig) Option {
return wrappedOption{oconf.WithRetry(retry.Config(settings))}
}
// WithTemporalitySelector sets the TemporalitySelector the client will use to
// determine the Temporality of an instrument based on its kind. If this option
// is not used, the client will use the DefaultTemporalitySelector from the
// go.opentelemetry.io/otel/sdk/metric package.
func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
return wrappedOption{oconf.WithTemporalitySelector(selector)}
}
// WithAggregationSelector sets the AggregationSelector the client will use to
// determine the aggregation to use for an instrument based on its kind. If
// this option is not used, the reader will use the DefaultAggregationSelector
// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
// explicitly passed for a view matching an instrument.
func WithAggregationSelector(selector metric.AggregationSelector) Option {
return wrappedOption{oconf.WithAggregationSelector(selector)}
}

View File

@ -0,0 +1,17 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates
// with an OTLP receiving endpoint using gRPC.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"

View File

@ -0,0 +1,167 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
import (
"context"
"fmt"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
// Exporter is a OpenTelemetry metric Exporter using gRPC.
type Exporter struct {
// Ensure synchronous access to the client across all functionality.
clientMu sync.Mutex
client interface {
UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
Shutdown(context.Context) error
}
temporalitySelector metric.TemporalitySelector
aggregationSelector metric.AggregationSelector
shutdownOnce sync.Once
}
func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
ts := cfg.Metrics.TemporalitySelector
if ts == nil {
ts = func(metric.InstrumentKind) metricdata.Temporality {
return metricdata.CumulativeTemporality
}
}
as := cfg.Metrics.AggregationSelector
if as == nil {
as = metric.DefaultAggregationSelector
}
return &Exporter{
client: c,
temporalitySelector: ts,
aggregationSelector: as,
}, nil
}
// Temporality returns the Temporality to use for an instrument kind.
func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
return e.temporalitySelector(k)
}
// Aggregation returns the Aggregation to use for an instrument kind.
func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
return e.aggregationSelector(k)
}
// Export transforms and transmits metric data to an OTLP receiver.
//
// This method returns an error if called after Shutdown.
// This method returns an error if the method is canceled by the passed context.
func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
otlpRm, err := transform.ResourceMetrics(rm)
// Best effort upload of transformable metrics.
e.clientMu.Lock()
upErr := e.client.UploadMetrics(ctx, otlpRm)
e.clientMu.Unlock()
if upErr != nil {
if err == nil {
return fmt.Errorf("failed to upload metrics: %w", upErr)
}
// Merge the two errors.
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
}
return err
}
// ForceFlush flushes any metric data held by an exporter.
//
// This method returns an error if called after Shutdown.
// This method returns an error if the method is canceled by the passed context.
//
// This method is safe to call concurrently.
func (e *Exporter) ForceFlush(ctx context.Context) error {
// The exporter and client hold no state, nothing to flush.
return ctx.Err()
}
// Shutdown flushes all metric data held by an exporter and releases any held
// computational resources.
//
// This method returns an error if called after Shutdown.
// This method returns an error if the method is canceled by the passed context.
//
// This method is safe to call concurrently.
func (e *Exporter) Shutdown(ctx context.Context) error {
err := errShutdown
e.shutdownOnce.Do(func() {
e.clientMu.Lock()
client := e.client
e.client = shutdownClient{}
e.clientMu.Unlock()
err = client.Shutdown(ctx)
})
return err
}
var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
type shutdownClient struct{}
func (c shutdownClient) err(ctx context.Context) error {
if err := ctx.Err(); err != nil {
return err
}
return errShutdown
}
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
return c.err(ctx)
}
func (c shutdownClient) Shutdown(ctx context.Context) error {
return c.err(ctx)
}
// MarshalLog returns logging data about the Exporter.
func (e *Exporter) MarshalLog() interface{} {
return struct{ Type string }{Type: "OTLP/gRPC"}
}
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
// endpoint using gRPC.
//
// If an already established gRPC ClientConn is not passed in options using
// WithGRPCConn, a connection to the OTLP endpoint will be established based
// on options. If a connection cannot be establishes in the lifetime of ctx,
// an error will be returned.
func New(ctx context.Context, options ...Option) (*Exporter, error) {
cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
c, err := newClient(ctx, cfg)
if err != nil {
return nil, err
}
return newExporter(c, cfg)
}

View File

@ -0,0 +1,202 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/internal/global"
)
// ConfigFn is the generic function used to set a config.
type ConfigFn func(*EnvOptionsReader)
// EnvOptionsReader reads the required environment variables.
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(string) ([]byte, error)
Namespace string
}
// Apply runs every ConfigFn.
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
for _, o := range opts {
o(e)
}
}
// GetEnvValue gets an OTLP environment variable value of the specified key
// using the GetEnv function.
// This function prepends the OTLP specified namespace to all key lookups.
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
return v, v != ""
}
// WithString retrieves the specified config and passes it to ConfigFn as a string.
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(v)
}
}
}
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
func WithBool(n string, fn func(bool)) ConfigFn {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
b := strings.ToLower(v) == "true"
fn(b)
}
}
}
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
d, err := strconv.Atoi(v)
if err != nil {
global.Error(err, "parse duration", "input", v)
return
}
fn(time.Duration(d) * time.Millisecond)
}
}
}
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
fn(stringToHeader(v))
}
}
}
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
u, err := url.Parse(v)
if err != nil {
global.Error(err, "parse url", "input", v)
return
}
fn(u)
}
}
}
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
return func(e *EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
b, err := e.ReadFile(v)
if err != nil {
global.Error(err, "read tls ca cert file", "file", v)
return
}
c, err := createCertPool(b)
if err != nil {
global.Error(err, "create tls cert pool")
return
}
fn(c)
}
}
}
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
return func(e *EnvOptionsReader) {
vc, okc := e.GetEnvValue(nc)
vk, okk := e.GetEnvValue(nk)
if !okc || !okk {
return
}
cert, err := e.ReadFile(vc)
if err != nil {
global.Error(err, "read tls client cert", "file", vc)
return
}
key, err := e.ReadFile(vk)
if err != nil {
global.Error(err, "read tls client key", "file", vk)
return
}
crt, err := tls.X509KeyPair(cert, key)
if err != nil {
global.Error(err, "create tls client key pair")
return
}
fn(crt)
}
}
func keyWithNamespace(ns, key string) string {
if ns == "" {
return key
}
return fmt.Sprintf("%s_%s", ns, key)
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
n, v, found := strings.Cut(header, "=")
if !found {
global.Error(errors.New("missing '="), "parse headers", "input", header)
continue
}
name, err := url.QueryUnescape(n)
if err != nil {
global.Error(err, "escape header key", "key", n)
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(v)
if err != nil {
global.Error(err, "escape header value", "value", v)
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return cp, nil
}

View File

@ -0,0 +1,42 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go

View File

@ -0,0 +1,221 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
import (
"crypto/tls"
"crypto/x509"
"net/url"
"os"
"path"
"strings"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// DefaultEnvOptionsReader is the default environments reader.
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: os.ReadFile,
Namespace: "OTEL_EXPORTER_OTLP",
}
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
func ApplyGRPCEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
return cfg
}
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
func ApplyHTTPEnvConfigs(cfg Config) Config {
opts := getOptionsFromEnv()
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
return cfg
}
func getOptionsFromEnv() []GenericOption {
opts := []GenericOption{}
tlsConf := &tls.Config{}
DefaultEnvOptionsReader.Apply(
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Metrics.Endpoint = u.Host
// For OTLP/HTTP endpoint URLs without a per-signal
// configuration, the passed endpoint is used as a base URL
// and the signals are sent to these paths relative to that.
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
opts = append(opts, withEndpointScheme(u))
opts = append(opts, newSplitOption(func(cfg Config) Config {
cfg.Metrics.Endpoint = u.Host
// For endpoint URLs for OTLP/HTTP per-signal variables, the
// URL MUST be used as-is without any modification. The only
// exception is that if an URL contains no path part, the root
// path / MUST be used.
path := u.Path
if path == "" {
path = "/"
}
cfg.Metrics.URLPath = path
return cfg
}, withEndpointForGRPC(u)))
}),
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
)
return opts
}
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
return func(cfg Config) Config {
// For OTLP/gRPC endpoints, this is the target to which the
// exporter is going to send telemetry.
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
return cfg
}
}
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if v, ok := e.GetEnvValue(n); ok {
cp := NoCompression
if v == "gzip" {
cp = GzipCompression
}
fn(cp)
}
}
}
func withEndpointScheme(u *url.URL) GenericOption {
switch strings.ToLower(u.Scheme) {
case "http", "unix":
return WithInsecure()
default:
return WithSecure()
}
}
// revive:disable-next-line:flag-parameter
func withInsecure(b bool) GenericOption {
if b {
return WithInsecure()
}
return WithSecure()
}
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if c.RootCAs != nil || len(c.Certificates) > 0 {
fn(c)
}
}
}
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if s, ok := e.GetEnvValue(n); ok {
switch strings.ToLower(s) {
case "cumulative":
fn(cumulativeTemporality)
case "delta":
fn(deltaTemporality)
case "lowmemory":
fn(lowMemory)
default:
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
}
}
}
}
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
return metricdata.CumulativeTemporality
}
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
switch ik {
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
return metricdata.DeltaTemporality
default:
return metricdata.CumulativeTemporality
}
}
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
switch ik {
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
return metricdata.DeltaTemporality
default:
return metricdata.CumulativeTemporality
}
}
func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if s, ok := e.GetEnvValue(n); ok {
switch strings.ToLower(s) {
case "explicit_bucket_histogram":
fn(metric.DefaultAggregationSelector)
case "base2_exponential_bucket_histogram":
fn(func(kind metric.InstrumentKind) metric.Aggregation {
if kind == metric.InstrumentKindHistogram {
return metric.AggregationBase2ExponentialHistogram{
MaxSize: 160,
MaxScale: 20,
NoMinMax: false,
}
}
return metric.DefaultAggregationSelector(kind)
})
default:
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
}
}
}
}

View File

@ -0,0 +1,359 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
import (
"crypto/tls"
"fmt"
"path"
"strings"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/encoding/gzip"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
"go.opentelemetry.io/otel/sdk/metric"
)
const (
// DefaultMaxAttempts describes how many times the driver
// should retry the sending of the payload in case of a
// retryable error.
DefaultMaxAttempts int = 5
// DefaultMetricsPath is a default URL path for endpoint that
// receives metrics.
DefaultMetricsPath string = "/v1/metrics"
// DefaultBackoff is a default base backoff time used in the
// exponential backoff strategy.
DefaultBackoff time.Duration = 300 * time.Millisecond
// DefaultTimeout is a default max waiting time for the backend to process
// each span or metrics batch.
DefaultTimeout time.Duration = 10 * time.Second
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
TemporalitySelector metric.TemporalitySelector
AggregationSelector metric.AggregationSelector
}
Config struct {
// Signal specific configurations
Metrics SignalConfig
RetryConfig retry.Config
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
GRPCConn *grpc.ClientConn
}
)
// NewHTTPConfig returns a new Config with all settings applied from opts and
// any unset setting using the default HTTP config values.
func NewHTTPConfig(opts ...HTTPOption) Config {
cfg := Config{
Metrics: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
URLPath: DefaultMetricsPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
TemporalitySelector: metric.DefaultTemporalitySelector,
AggregationSelector: metric.DefaultAggregationSelector,
},
RetryConfig: retry.DefaultConfig,
}
cfg = ApplyHTTPEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyHTTPOption(cfg)
}
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
return cfg
}
// cleanPath returns a path with all spaces trimmed and all redundancies
// removed. If urlPath is empty or cleaning it results in an empty string,
// defaultPath is returned instead.
func cleanPath(urlPath string, defaultPath string) string {
tmp := path.Clean(strings.TrimSpace(urlPath))
if tmp == "." {
return defaultPath
}
if !path.IsAbs(tmp) {
tmp = fmt.Sprintf("/%s", tmp)
}
return tmp
}
// NewGRPCConfig returns a new Config with all settings applied from opts and
// any unset setting using the default gRPC config values.
func NewGRPCConfig(opts ...GRPCOption) Config {
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
cfg := Config{
Metrics: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
URLPath: DefaultMetricsPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
TemporalitySelector: metric.DefaultTemporalitySelector,
AggregationSelector: metric.DefaultAggregationSelector,
},
RetryConfig: retry.DefaultConfig,
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
}
cfg = ApplyGRPCEnvConfigs(cfg)
for _, opt := range opts {
cfg = opt.ApplyGRPCOption(cfg)
}
if cfg.ServiceConfig != "" {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
}
// Priroritize GRPCCredentials over Insecure (passing both is an error).
if cfg.Metrics.GRPCCredentials != nil {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
} else if cfg.Metrics.Insecure {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
} else {
// Default to using the host's root CA.
creds := credentials.NewTLS(nil)
cfg.Metrics.GRPCCredentials = creds
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
}
if cfg.Metrics.Compression == GzipCompression {
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(cfg.DialOptions) != 0 {
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
}
if cfg.ReconnectionPeriod != 0 {
p := grpc.ConnectParams{
Backoff: backoff.DefaultConfig,
MinConnectTimeout: cfg.ReconnectionPeriod,
}
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
}
return cfg
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(Config) Config
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(Config) Config
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(Config) Config
}
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
return g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
return g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg Config) Config) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(Config) Config
grpcFn func(Config) Config
}
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
return g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
return g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(Config) Config
}
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
return h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(Config) Config
}
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
return h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Endpoint = endpoint
return cfg
})
}
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Compression = compression
return cfg
})
}
func WithURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.URLPath = urlPath
return cfg
})
}
func WithRetry(rc retry.Config) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.RetryConfig = rc
return cfg
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg Config) Config {
cfg.Metrics.TLSCfg = tlsCfg.Clone()
return cfg
}, func(cfg Config) Config {
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
return cfg
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Insecure = true
return cfg
})
}
func WithSecure() GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Insecure = false
return cfg
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Headers = headers
return cfg
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.Timeout = duration
return cfg
})
}
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.TemporalitySelector = selector
return cfg
})
}
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
return newGenericOption(func(cfg Config) Config {
cfg.Metrics.AggregationSelector = selector
return cfg
})
}

View File

@ -0,0 +1,58 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
import "time"
const (
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
DefaultCollectorGRPCPort uint16 = 4317
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
DefaultCollectorHTTPPort uint16 = 4318
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// Compression describes the compression used for payloads sent to the
// collector.
type Compression int
const (
// NoCompression tells the driver to send payloads without
// compression.
NoCompression Compression = iota
// GzipCompression tells the driver to send payloads after
// compressing them with gzip.
GzipCompression
)
// RetrySettings defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type RetrySettings struct {
// Enabled indicates whether to not retry sending batches in case of export failure.
Enabled bool
// InitialInterval the time to wait after the first failure before retrying.
InitialInterval time.Duration
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
// consecutive retries will always be `MaxInterval`.
MaxInterval time.Duration
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
// Once this value is reached, the data is discarded.
MaxElapsedTime time.Duration
}

View File

@ -0,0 +1,49 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
import (
"crypto/tls"
"crypto/x509"
"errors"
"os"
)
// ReadTLSConfigFromFile reads a PEM certificate file and creates
// a tls.Config that will use this certifate to verify a server certificate.
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
b, err := os.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,67 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
import "fmt"
// PartialSuccess represents the underlying error for all handling
// OTLP partial success messages. Use `errors.Is(err,
// PartialSuccess{})` to test whether an error passed to the OTel
// error handler belongs to this category.
type PartialSuccess struct {
ErrorMessage string
RejectedItems int64
RejectedKind string
}
var _ error = PartialSuccess{}
// Error implements the error interface.
func (ps PartialSuccess) Error() string {
msg := ps.ErrorMessage
if msg == "" {
msg = "empty message"
}
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
}
// Is supports the errors.Is() interface.
func (ps PartialSuccess) Is(err error) bool {
_, ok := err.(PartialSuccess)
return ok
}
// TracePartialSuccessError returns an error describing a partial success
// response for the trace signal.
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
return PartialSuccess{
ErrorMessage: errorMessage,
RejectedItems: itemsRejected,
RejectedKind: "spans",
}
}
// MetricPartialSuccessError returns an error describing a partial success
// response for the metric signal.
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
return PartialSuccess{
ErrorMessage: errorMessage,
RejectedItems: itemsRejected,
RejectedKind: "metric data points",
}
}

View File

@ -0,0 +1,156 @@
// Code created by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package retry provides request retry functionality that can perform
// configurable exponential backoff for transient errors and honor any
// explicit throttle responses received.
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
import (
"context"
"fmt"
"time"
"github.com/cenkalti/backoff/v4"
)
// DefaultConfig are the recommended defaults to use.
var DefaultConfig = Config{
Enabled: true,
InitialInterval: 5 * time.Second,
MaxInterval: 30 * time.Second,
MaxElapsedTime: time.Minute,
}
// Config defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type Config struct {
// Enabled indicates whether to not retry sending batches in case of
// export failure.
Enabled bool
// InitialInterval the time to wait after the first failure before
// retrying.
InitialInterval time.Duration
// MaxInterval is the upper bound on backoff interval. Once this value is
// reached the delay between consecutive retries will always be
// `MaxInterval`.
MaxInterval time.Duration
// MaxElapsedTime is the maximum amount of time (including retries) spent
// trying to send a request/batch. Once this value is reached, the data
// is discarded.
MaxElapsedTime time.Duration
}
// RequestFunc wraps a request with retry logic.
type RequestFunc func(context.Context, func(context.Context) error) error
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
// duration should be honored that was included in the error.
//
// The function must return true if the error argument is retry-able,
// otherwise it must return false for the first return parameter.
//
// The function must return a non-zero time.Duration if the error contains
// explicit throttle duration that should be honored, otherwise it must return
// a zero valued time.Duration.
type EvaluateFunc func(error) (bool, time.Duration)
// RequestFunc returns a RequestFunc using the evaluate function to determine
// if requests can be retried and based on the exponential backoff
// configuration of c.
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
if !c.Enabled {
return func(ctx context.Context, fn func(context.Context) error) error {
return fn(ctx)
}
}
return func(ctx context.Context, fn func(context.Context) error) error {
// Do not use NewExponentialBackOff since it calls Reset and the code here
// must call Reset after changing the InitialInterval (this saves an
// unnecessary call to Now).
b := &backoff.ExponentialBackOff{
InitialInterval: c.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
MaxElapsedTime: c.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
b.Reset()
for {
err := fn(ctx)
if err == nil {
return nil
}
retryable, throttle := evaluate(err)
if !retryable {
return err
}
bOff := b.NextBackOff()
if bOff == backoff.Stop {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
var delay time.Duration
if bOff > throttle {
delay = bOff
} else {
elapsed := b.GetElapsedTime()
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
return fmt.Errorf("max retry time would elapse: %w", err)
}
delay = throttle
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
return fmt.Errorf("%w: %s", ctxErr, err)
}
}
}
}
// Allow override for testing.
var waitFunc = wait
// wait takes the caller's context, and the amount of time to wait. It will
// return nil if the timer fires before or at the same time as the context's
// deadline. This indicates that the call can be retried.
func wait(ctx context.Context, delay time.Duration) error {
timer := time.NewTimer(delay)
defer timer.Stop()
select {
case <-ctx.Done():
// Handle the case where the timer and context deadline end
// simultaneously by prioritizing the timer expiration nil value
// response.
select {
case <-timer.C:
default:
return ctx.Err()
}
case <-timer.C:
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More