mirror of https://github.com/docker/buildx.git
Merge pull request #2577 from tonistiigi/vendor-buildkit-v0.15.0-rc1
vendor: update buildkit to v0.15.0-rc1
This commit is contained in:
commit
b96ad59f64
22
go.mod
22
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/docker/buildx
|
||||
|
||||
go 1.21
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
|
@ -20,7 +20,7 @@ require (
|
|||
github.com/docker/cli-docs-tool v0.8.0
|
||||
github.com/docker/docker v27.0.3+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/gofrs/flock v0.12.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
|
@ -29,7 +29,7 @@ require (
|
|||
github.com/hashicorp/hcl/v2 v2.20.1
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
github.com/moby/buildkit v0.14.1-0.20240703051140-f7bda278b7e2
|
||||
github.com/moby/buildkit v0.15.0-rc1
|
||||
github.com/moby/sys/mountinfo v0.7.1
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/morikuni/aec v1.0.0
|
||||
|
@ -50,10 +50,10 @@ require (
|
|||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/term v0.18.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.org/x/text v0.15.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.2
|
||||
|
@ -95,7 +95,7 @@ require (
|
|||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
|
@ -138,7 +138,7 @@ require (
|
|||
github.com/prometheus/client_golang v1.17.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
|
@ -160,9 +160,9 @@ require (
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
|
|
43
go.sum
43
go.sum
|
@ -164,8 +164,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
|
@ -183,8 +184,8 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
|||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc=
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/flock v0.12.0 h1:xHW8t8GPAiGtqz7KxiSqfOEXwpOaqhpYZrTE2MQBgXY=
|
||||
github.com/gofrs/flock v0.12.0/go.mod h1:FirDy1Ing0mI2+kB6wk+vyyAH+e6xiE+EYA0jnzV9jc=
|
||||
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
|
||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
|
@ -217,8 +218,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
|||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -305,8 +306,8 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z
|
|||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.14.1-0.20240703051140-f7bda278b7e2 h1:Kdn7g1KHhnk0g7m2I2UAptHY2Ro6bR7pK7nriGqYs6Y=
|
||||
github.com/moby/buildkit v0.14.1-0.20240703051140-f7bda278b7e2/go.mod h1:9GdI+26B4zv3BP2enl8kiupiSsX/01cu9mnhc2UAJ4g=
|
||||
github.com/moby/buildkit v0.15.0-rc1 h1:74cW+CaRhCX8b/0sTEwWM6zPxgHJb90imPZMf+E42TY=
|
||||
github.com/moby/buildkit v0.15.0-rc1/go.mod h1:ik25J3PkZrQc2dDquClV6jXMjjtoXDCLFySlfZDk5j0=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
|
@ -389,8 +390,8 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
|
|||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
|
@ -506,8 +507,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -522,8 +523,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -532,8 +533,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -548,16 +549,16 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
|
|
@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
|
|||
| Adding a name to a logger | `WithName` | no API |
|
||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
|
||||
|
||||
The high-level slog API is explicitly meant to be one of many different APIs
|
||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
|
||||
package.
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by
|
||||
some conversion functions.
|
||||
|
||||
### Inspiration
|
||||
|
||||
|
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
|
|||
## slog interoperability
|
||||
|
||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
|
||||
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
|
||||
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
|
||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||
slog API. `slogr` itself leaves that to the caller.
|
||||
slog API.
|
||||
|
||||
## Using a `logr.Sink` as backend for slog
|
||||
### Using a `logr.LogSink` as backend for slog
|
||||
|
||||
Ideally, a logr sink implementation should support both logr and slog by
|
||||
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
|
||||
implementing both the normal logr interface(s) and `SlogSink`. Because
|
||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||
possible to implement both slog.Handler and logr.Sink in the same
|
||||
type](https://github.com/golang/go/issues/59110).
|
||||
|
||||
If both are supported, log calls can go from the high-level APIs to the backend
|
||||
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
|
||||
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
|
||||
convert back and forth without adding additional wrappers, with one exception:
|
||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
log calls.
|
||||
|
||||
Such an implementation should also support values that implement specific
|
||||
|
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
|
|||
These drawbacks are severe enough that applications using a mixture of slog and
|
||||
logr should switch to a different backend.
|
||||
|
||||
## Using a `slog.Handler` as backend for logr
|
||||
### Using a `slog.Handler` as backend for logr
|
||||
|
||||
Using a plain `slog.Handler` without support for logr works better than the
|
||||
other direction:
|
||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||
by negating them.
|
||||
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
|
||||
- Stack unwinding is done by the `SlogSink` and the resulting program
|
||||
counter is passed to the `slog.Handler`.
|
||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||
attribute with `logger` as key and the names separated by slash as value.
|
||||
|
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
|||
with logr implementations without slog support is not important, then
|
||||
`slog.Valuer` is sufficient.
|
||||
|
||||
## Context support for slog
|
||||
### Context support for slog
|
||||
|
||||
Storing a logger in a `context.Context` is not supported by
|
||||
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
|
||||
to fill this gap:
|
||||
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
|
||||
used to fill this gap. They store and retrieve a `slog.Logger` pointer
|
||||
under the same context key that is also used by `NewContext` and
|
||||
`FromContext` for `logr.Logger` value.
|
||||
|
||||
func HandlerFromContext(ctx context.Context) slog.Handler {
|
||||
logger, err := logr.FromContext(ctx)
|
||||
if err == nil {
|
||||
return slogr.NewSlogHandler(logger)
|
||||
}
|
||||
return slog.Default().Handler()
|
||||
}
|
||||
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
|
||||
automatically convert the `slog.Logger` to a
|
||||
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
|
||||
|
||||
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
|
||||
return logr.NewContext(ctx, slogr.NewLogr(handler))
|
||||
}
|
||||
With this approach, binaries which use either slog or logr are as efficient as
|
||||
possible with no unnecessary allocations. This is also why the API stores a
|
||||
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
|
||||
on retrieval would need to allocate one.
|
||||
|
||||
The downside is that storing and retrieving a `slog.Handler` needs more
|
||||
allocations compared to using a `logr.Logger`. Therefore the recommendation is
|
||||
to use the `logr.Logger` API in code which uses contextual logging.
|
||||
The downside is that switching back and forth needs more allocations. Because
|
||||
logr is the API that is already in use by different packages, in particular
|
||||
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
|
||||
uses contextual logging.
|
||||
|
||||
An alternative to adding values to a logger and storing that logger in the
|
||||
context is to store the values in the context and to configure a logging
|
||||
backend to extract those values when emitting log entries. This only works when
|
||||
log calls are passed the context, which is not supported by the logr API.
|
||||
|
||||
With the slog API, it is possible, but not
|
||||
required. https://github.com/veqryn/slog-context is a package for slog which
|
||||
provides additional support code for this approach. It also contains wrappers
|
||||
for the context functions in logr, so developers who prefer to not use the logr
|
||||
APIs directly can use those instead and the resulting code will still be
|
||||
interoperable with logr.
|
||||
|
||||
## FAQ
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
|
||||
// the value is always a Logger value. With Go >= 1.21, the value can be a
|
||||
// Logger value or a slog.Logger pointer.
|
||||
type contextKey struct{}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
//go:build !go1.21
|
||||
// +build !go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return v, nil
|
||||
case *slog.Logger:
|
||||
return FromSlogHandler(v.Handler()), nil
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
|
||||
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return slog.New(ToSlogHandler(v))
|
||||
case *slog.Logger:
|
||||
return v
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if logger, err := FromContext(ctx); err == nil {
|
||||
return logger
|
||||
}
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
|
||||
// provided slog.Logger.
|
||||
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
|
@ -100,6 +100,11 @@ type Options struct {
|
|||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||
// If not specified, the info level will be logged as "level".
|
||||
// If this is set to "", the info level will not be logged at all.
|
||||
LogInfoLevel *string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
|
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
if opts.LogInfoLevel == nil {
|
||||
opts.LogInfoLevel = new(string)
|
||||
*opts.LogInfoLevel = "level"
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
|
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
|||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
parentValuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
group string // for slog groups
|
||||
groupDepth int
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
|
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
|
|||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{')
|
||||
buf.WriteByte('{') // for the whole line
|
||||
}
|
||||
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
if len(f.valuesStr) > 0 {
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
if continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
} else {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
groupDepth := f.groupDepth
|
||||
if f.group != "" {
|
||||
if f.valuesStr != "" || len(args) != 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
} else {
|
||||
// The group was empty
|
||||
groupDepth--
|
||||
}
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}')
|
||||
|
||||
for i := 0; i < groupDepth; i++ {
|
||||
buf.WriteByte('}') // for the groups
|
||||
}
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}') // for the whole line
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
copied := false
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
if !copied {
|
||||
newList := make([]any, len(kvList))
|
||||
copy(newList, kvList)
|
||||
kvList = newList
|
||||
copied = true
|
||||
}
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
|
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
|
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
|
|||
}
|
||||
}
|
||||
|
||||
if escapeKeys {
|
||||
buf.WriteString(prettyString(k))
|
||||
} else {
|
||||
// this is faster
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(':')
|
||||
} else {
|
||||
buf.WriteByte('=')
|
||||
}
|
||||
buf.WriteString(f.quoted(k, escapeKeys))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) quoted(str string, escape bool) string {
|
||||
if escape {
|
||||
return prettyString(str)
|
||||
}
|
||||
// this is faster
|
||||
return `"` + str + `"`
|
||||
}
|
||||
|
||||
func (f Formatter) comma() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ','
|
||||
}
|
||||
return ' '
|
||||
}
|
||||
|
||||
func (f Formatter) colon() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ':'
|
||||
}
|
||||
return '='
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value any) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
|
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
continue
|
||||
}
|
||||
if printComma {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
printComma = true // if we got here, we are rendering a field
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
|
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.quoted(name, false))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
|
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
|
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
|
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
|||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
|
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
|
|||
return kvList
|
||||
}
|
||||
|
||||
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||
// the current saved values and starts them anew. This is needed to satisfy
|
||||
// slog.
|
||||
func (f *Formatter) startGroup(group string) {
|
||||
// Unnamed groups are just inlined.
|
||||
if group == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Any saved values can no longer be changed.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
continuing := false
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if f.group != "" && f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||
// is actually rendered (because we have N scopes to close).
|
||||
|
||||
f.parentValuesStr = buf.String()
|
||||
|
||||
// Start collecting new values.
|
||||
f.group = group
|
||||
f.groupDepth++
|
||||
f.valuesStr = ""
|
||||
f.values = nil
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
|
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
|
|||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "level", level, "msg", msg)
|
||||
if key := *f.opts.LogInfoLevel; key != "" {
|
||||
args = append(args, key, level)
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var _ logr.SlogSink = &fnlogger{}
|
||||
|
||||
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||
|
||||
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
return true
|
||||
})
|
||||
|
||||
if record.Level >= slog.LevelError {
|
||||
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
}
|
||||
l.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||
l.startGroup(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, grpKVs)
|
||||
}
|
||||
if attr.Key == "" {
|
||||
// slog says we have to inline these
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else {
|
||||
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||
}
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
|
@ -207,10 +207,6 @@ limitations under the License.
|
|||
// those.
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||
// a Logger which discards all log lines.
|
||||
|
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
|
|||
return l.sink == nil
|
||||
}
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context.
|
||||
type contextKey struct{}
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
|
|
|
@ -17,18 +17,16 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package slogr
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type slogHandler struct {
|
||||
// May be nil, in which case all logs get discarded.
|
||||
sink logr.LogSink
|
||||
sink LogSink
|
||||
// Non-nil if sink is non-nil and implements SlogSink.
|
||||
slogSink SlogSink
|
||||
|
||||
|
@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level {
|
|||
return l.levelBias
|
||||
}
|
||||
|
||||
func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool {
|
||||
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||
}
|
||||
|
||||
|
@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
|||
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
if attr.Key != "" {
|
||||
kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
|
||||
}
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
return true
|
||||
})
|
||||
if record.Level >= slog.LevelError {
|
||||
|
@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
|||
// are called by Handle, code in slog gets skipped.
|
||||
//
|
||||
// This offset currently (Go 1.21.0) works for calls through
|
||||
// slog.New(NewSlogHandler(...)). There's no guarantee that the call
|
||||
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
|
||||
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||
// still better than not adjusting at all....
|
||||
//
|
||||
// This cannot be done when constructing the handler because NewLogr needs
|
||||
// This cannot be done when constructing the handler because FromSlogHandler needs
|
||||
// access to the original sink without this adjustment. A second copy would
|
||||
// work, but then WithAttrs would have to be called for both of them.
|
||||
func (l *slogHandler) sinkWithCallDepth() logr.LogSink {
|
||||
if sink, ok := l.sink.(logr.CallDepthLogSink); ok {
|
||||
func (l *slogHandler) sinkWithCallDepth() LogSink {
|
||||
if sink, ok := l.sink.(CallDepthLogSink); ok {
|
||||
return sink.WithCallDepth(2)
|
||||
}
|
||||
return l.sink
|
||||
|
@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
|||
return l
|
||||
}
|
||||
|
||||
copy := *l
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
copy.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
copy.sink = copy.slogSink
|
||||
clone.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
if attr.Key != "" {
|
||||
kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any())
|
||||
}
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
}
|
||||
copy.sink = l.sink.WithValues(kvList...)
|
||||
clone.sink = l.sink.WithValues(kvList...)
|
||||
}
|
||||
return ©
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
copy := *l
|
||||
if l.slogSink != nil {
|
||||
copy.slogSink = l.slogSink.WithGroup(name)
|
||||
copy.sink = l.slogSink
|
||||
} else {
|
||||
copy.groupPrefix = copy.addGroupPrefix(name)
|
||||
if name == "" {
|
||||
// slog says to inline empty groups
|
||||
return l
|
||||
}
|
||||
return ©
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithGroup(name)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *slogHandler) addGroupPrefix(name string) string {
|
||||
if l.groupPrefix == "" {
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
prefix := groupPrefix
|
||||
if attr.Key != "" {
|
||||
prefix = addPrefix(groupPrefix, attr.Key)
|
||||
}
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, prefix, grpKVs)
|
||||
}
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
func addPrefix(prefix, name string) string {
|
||||
if prefix == "" {
|
||||
return name
|
||||
}
|
||||
return l.groupPrefix + groupSeparator + name
|
||||
if name == "" {
|
||||
return prefix
|
||||
}
|
||||
return prefix + groupSeparator + name
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a logr.LogSink and that API did not historically document whether
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(slogr.NewSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
result += l.levelBias // in case the original logr.Logger had a V level
|
||||
result += l.levelBias // in case the original Logger had a V level
|
||||
if result < 0 {
|
||||
result = 0 // because logr.LogSink doesn't expect negative V levels
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromSlogHandler returns a Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
func FromSlogHandler(handler slog.Handler) Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return Discard()
|
||||
}
|
||||
return New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return New(&slogSink{handler: handler})
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the Logger:
|
||||
//
|
||||
// logger := <some Logger with 0 as verbosity level>
|
||||
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
func ToSlogHandler(logger Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in FromSlogHandler
|
||||
// and ToSlogHandler.
|
||||
type SlogSink interface {
|
||||
LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
|
@ -23,10 +23,11 @@ limitations under the License.
|
|||
//
|
||||
// See the README in the top-level [./logr] package for a discussion of
|
||||
// interoperability.
|
||||
//
|
||||
// Deprecated: use the main logr package instead.
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
@ -34,75 +35,27 @@ import (
|
|||
|
||||
// NewLogr returns a logr.Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
// Deprecated: use [logr.FromSlogHandler] instead.
|
||||
func NewLogr(handler slog.Handler) logr.Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return logr.Discard()
|
||||
}
|
||||
return logr.New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return logr.New(&slogSink{handler: handler})
|
||||
return logr.FromSlogHandler(handler)
|
||||
}
|
||||
|
||||
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the logr.Logger:
|
||||
//
|
||||
// logger := <some logr.Logger with 0 as verbosity level>
|
||||
// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the logr.Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func NewSlogHandler(logger logr.Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func ToSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
// logging through the slog.Logger or slog.Handler APIs better.
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in NewLogr
|
||||
// and NewSlogHandler.
|
||||
type SlogSink interface {
|
||||
logr.LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
||||
// Deprecated: use [logr.SlogSink] instead.
|
||||
type SlogSink = logr.SlogSink
|
||||
|
|
|
@ -17,24 +17,22 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package slogr
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var (
|
||||
_ logr.LogSink = &slogSink{}
|
||||
_ logr.CallDepthLogSink = &slogSink{}
|
||||
_ Underlier = &slogSink{}
|
||||
_ LogSink = &slogSink{}
|
||||
_ CallDepthLogSink = &slogSink{}
|
||||
_ Underlier = &slogSink{}
|
||||
)
|
||||
|
||||
// Underlier is implemented by the LogSink returned by NewLogr.
|
||||
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
|
||||
type Underlier interface {
|
||||
// GetUnderlying returns the Handler used by the LogSink.
|
||||
GetUnderlying() slog.Handler
|
||||
|
@ -54,7 +52,7 @@ type slogSink struct {
|
|||
handler slog.Handler
|
||||
}
|
||||
|
||||
func (l *slogSink) Init(info logr.RuntimeInfo) {
|
||||
func (l *slogSink) Init(info RuntimeInfo) {
|
||||
l.callDepth = info.CallDepth
|
||||
}
|
||||
|
||||
|
@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler {
|
|||
return l.handler
|
||||
}
|
||||
|
||||
func (l *slogSink) WithCallDepth(depth int) logr.LogSink {
|
||||
func (l *slogSink) WithCallDepth(depth int) LogSink {
|
||||
newLogger := *l
|
||||
newLogger.callDepth += depth
|
||||
return &newLogger
|
||||
|
@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf
|
|||
record.AddAttrs(slog.Any(errKey, err))
|
||||
}
|
||||
record.Add(kvList...)
|
||||
l.handler.Handle(context.Background(), record)
|
||||
_ = l.handler.Handle(context.Background(), record)
|
||||
}
|
||||
|
||||
func (l slogSink) WithName(name string) logr.LogSink {
|
||||
func (l slogSink) WithName(name string) LogSink {
|
||||
if l.name != "" {
|
||||
l.name = l.name + "/"
|
||||
l.name += "/"
|
||||
}
|
||||
l.name += name
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
|
||||
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
|
||||
return &l
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
run:
|
||||
timeout: 10m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- asasalint
|
||||
- bidichk
|
||||
- dogsled
|
||||
- dupword
|
||||
- durationcheck
|
||||
- err113
|
||||
- errname
|
||||
- errorlint
|
||||
- fatcontext
|
||||
- forbidigo
|
||||
- gocheckcompilerdirectives
|
||||
- gochecknoinits
|
||||
- gocritic
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- goheader
|
||||
- goimports
|
||||
- gomoddirectives
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- mirror
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- stylecheck
|
||||
- tenv
|
||||
- testifylint
|
||||
- thelper
|
||||
- unconvert
|
||||
- unparam
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
godox:
|
||||
keywords:
|
||||
- FIXME
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2015 Tim Heckman. All rights reserved.
|
||||
Copyright 2018-{{ YEAR }} The Gofrs. All rights reserved.
|
||||
Use of this source code is governed by the BSD 3-Clause
|
||||
license that can be found in the LICENSE file.
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- style
|
||||
- performance
|
||||
disabled-checks:
|
||||
- paramTypeCombine # already handle by gofumpt.extra-rules
|
||||
- whyNoLint # already handle by nonolint
|
||||
- unnamedResult
|
||||
- hugeParam
|
||||
- sloppyReassign
|
||||
- rangeValCopy
|
||||
- octalLiteral
|
||||
- ptrToRefParam
|
||||
- appendAssign
|
||||
- ruleguard
|
||||
- httpNoBody
|
||||
- exposedSyncMutex
|
||||
|
||||
revive:
|
||||
rules:
|
||||
- name: struct-tag
|
||||
- name: blank-imports
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: error-naming
|
||||
- name: exported
|
||||
- name: if-return
|
||||
- name: increment-decrement
|
||||
- name: var-naming
|
||||
- name: var-declaration
|
||||
- name: package-comments
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
- name: indent-error-flow
|
||||
- name: errorf
|
||||
- name: empty-block
|
||||
- name: superfluous-else
|
||||
- name: unused-parameter
|
||||
- name: unreachable-code
|
||||
- name: redefines-builtin-id
|
||||
|
||||
issues:
|
||||
exclude-use-default: true
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
||||
output:
|
||||
show-stats: true
|
||||
sort-results: true
|
||||
sort-order:
|
||||
- linter
|
||||
- file
|
|
@ -1,10 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
script: go test -v -check.vv -race ./...
|
||||
sudo: false
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
on_failure: always
|
|
@ -1,3 +1,4 @@
|
|||
Copyright (c) 2018-2024, The Gofrs
|
||||
Copyright (c) 2015-2020, Tim Heckman
|
||||
All rights reserved.
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
.PHONY: lint test test_race build_cross_os
|
||||
|
||||
default: lint test build_cross_os
|
||||
|
||||
test:
|
||||
go test -v -cover ./...
|
||||
|
||||
test_race:
|
||||
CGO_ENABLED=1 go test -v -race ./...
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
build_cross_os:
|
||||
./build.sh
|
|
@ -1,26 +1,22 @@
|
|||
# flock
|
||||
[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock)
|
||||
[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock)
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/gofrs/flock.svg)](https://pkg.go.dev/github.com/gofrs/flock)
|
||||
[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock)
|
||||
|
||||
`flock` implements a thread-safe sync.Locker interface for file locking. It also
|
||||
includes a non-blocking TryLock() function to allow locking without blocking execution.
|
||||
`flock` implements a thread-safe file lock.
|
||||
|
||||
## License
|
||||
`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
|
||||
|
||||
## Go Compatibility
|
||||
This package makes use of the `context` package that was introduced in Go 1.7. As such, this
|
||||
package has an implicit dependency on Go 1.7+.
|
||||
It also includes a non-blocking `TryLock()` function to allow locking without blocking execution.
|
||||
|
||||
## Installation
|
||||
```
|
||||
|
||||
```bash
|
||||
go get -u github.com/gofrs/flock
|
||||
```
|
||||
|
||||
## Usage
|
||||
```Go
|
||||
|
||||
```go
|
||||
import "github.com/gofrs/flock"
|
||||
|
||||
fileLock := flock.New("/var/lock/go-lock.lock")
|
||||
|
@ -38,4 +34,12 @@ if locked {
|
|||
```
|
||||
|
||||
For more detailed usage information take a look at the package API docs on
|
||||
[GoDoc](https://godoc.org/github.com/gofrs/flock).
|
||||
[GoDoc](https://pkg.go.dev/github.com/gofrs/flock).
|
||||
|
||||
## License
|
||||
|
||||
`flock` is released under the BSD 3-Clause License. See the [`LICENSE`](./LICENSE) file for more details.
|
||||
|
||||
## Project History
|
||||
|
||||
This project was originally `github.com/theckman/go-flock`, it was transferred to Gofrs by the original author [Tim Heckman ](https://github.com/theckman).
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We support the latest version of this library.
|
||||
We do not guarantee support of previous versions.
|
||||
|
||||
If a defect is reported, it will generally be fixed on the latest version (provided it exists) irrespective of whether it was introduced in a prior version.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a potential security vulnerability, please create a [security advisory](https://github.com/gofrs/flock/security/advisories/new).
|
||||
|
||||
For us to respond to your report most effectively, please include any of the following:
|
||||
|
||||
- Steps to reproduce or a proof-of-concept
|
||||
- Any relevant information, including the versions used
|
||||
|
||||
## Security Scorecard
|
||||
|
||||
This project submits security [results](https://scorecard.dev/viewer/?uri=github.com/gofrs/flock) to the [OpenSSF Scorecard](https://securityscorecards.dev/).
|
|
@ -1,25 +0,0 @@
|
|||
version: '{build}'
|
||||
|
||||
build: false
|
||||
deploy: false
|
||||
|
||||
clone_folder: 'c:\gopath\src\github.com\gofrs\flock'
|
||||
|
||||
environment:
|
||||
GOPATH: 'c:\gopath'
|
||||
GOVERSION: '1.15'
|
||||
|
||||
init:
|
||||
- git config --global core.autocrlf input
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
|
||||
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
|
||||
- set Path=c:\go\bin;c:\gopath\bin;%Path%
|
||||
- go version
|
||||
- go env
|
||||
|
||||
test_script:
|
||||
- go get -t ./...
|
||||
- go test -race -v ./...
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash -e
|
||||
|
||||
# Not supported by flock:
|
||||
# - plan9/*
|
||||
# - js/wasm
|
||||
# - wasp1/wasm
|
||||
|
||||
for row in $(go tool dist list -json | jq -r '.[] | @base64'); do
|
||||
_jq() {
|
||||
echo ${row} | base64 --decode | jq -r ${1}
|
||||
}
|
||||
|
||||
GOOS=$(_jq '.GOOS')
|
||||
GOARCH=$(_jq '.GOARCH')
|
||||
|
||||
echo "$GOOS/$GOARCH"
|
||||
GOOS=$GOOS GOARCH=$GOARCH go build
|
||||
done
|
|
@ -1,4 +1,5 @@
|
|||
// Copyright 2015 Tim Heckman. All rights reserved.
|
||||
// Copyright 2018-2024 The Gofrs. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 3-Clause
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
|
@ -18,12 +19,29 @@ package flock
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Option func(f *Flock)
|
||||
|
||||
// SetFlag sets the flag used to create/open the file.
|
||||
func SetFlag(flag int) Option {
|
||||
return func(f *Flock) {
|
||||
f.flag = flag
|
||||
}
|
||||
}
|
||||
|
||||
// SetPermissions sets the OS permissions to set on the file.
|
||||
func SetPermissions(perm fs.FileMode) Option {
|
||||
return func(f *Flock) {
|
||||
f.perm = perm
|
||||
}
|
||||
}
|
||||
|
||||
// Flock is the struct type to handle file locking. All fields are unexported,
|
||||
// with access to some of the fields provided by getter methods (Path() and Locked()).
|
||||
type Flock struct {
|
||||
|
@ -32,12 +50,37 @@ type Flock struct {
|
|||
fh *os.File
|
||||
l bool
|
||||
r bool
|
||||
|
||||
// flag is the flag used to create/open the file.
|
||||
flag int
|
||||
// perm is the OS permissions to set on the file.
|
||||
perm fs.FileMode
|
||||
}
|
||||
|
||||
// New returns a new instance of *Flock. The only parameter
|
||||
// it takes is the path to the desired lockfile.
|
||||
func New(path string) *Flock {
|
||||
return &Flock{path: path}
|
||||
func New(path string, opts ...Option) *Flock {
|
||||
// create it if it doesn't exist, and open the file read-only.
|
||||
flags := os.O_CREATE
|
||||
switch runtime.GOOS {
|
||||
case "aix", "solaris", "illumos":
|
||||
// AIX cannot preform write-lock (i.e. exclusive) on a read-only file.
|
||||
flags |= os.O_RDWR
|
||||
default:
|
||||
flags |= os.O_RDONLY
|
||||
}
|
||||
|
||||
f := &Flock{
|
||||
path: path,
|
||||
flag: flags,
|
||||
perm: fs.FileMode(0o600),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(f)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFlock returns a new instance of *Flock. The only parameter
|
||||
|
@ -67,6 +110,7 @@ func (f *Flock) Path() string {
|
|||
func (f *Flock) Locked() bool {
|
||||
f.m.RLock()
|
||||
defer f.m.RUnlock()
|
||||
|
||||
return f.l
|
||||
}
|
||||
|
||||
|
@ -76,6 +120,7 @@ func (f *Flock) Locked() bool {
|
|||
func (f *Flock) RLocked() bool {
|
||||
f.m.RLock()
|
||||
defer f.m.RUnlock()
|
||||
|
||||
return f.r
|
||||
}
|
||||
|
||||
|
@ -83,16 +128,18 @@ func (f *Flock) String() string {
|
|||
return f.path
|
||||
}
|
||||
|
||||
// TryLockContext repeatedly tries to take an exclusive lock until one of the
|
||||
// conditions is met: TryLock succeeds, TryLock fails with error, or Context
|
||||
// Done channel is closed.
|
||||
// TryLockContext repeatedly tries to take an exclusive lock until one of the conditions is met:
|
||||
// - TryLock succeeds
|
||||
// - TryLock fails with error
|
||||
// - Context Done channel is closed.
|
||||
func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
|
||||
return tryCtx(ctx, f.TryLock, retryDelay)
|
||||
}
|
||||
|
||||
// TryRLockContext repeatedly tries to take a shared lock until one of the
|
||||
// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
|
||||
// Done channel is closed.
|
||||
// TryRLockContext repeatedly tries to take a shared lock until one of the conditions is met:
|
||||
// - TryRLock succeeds
|
||||
// - TryRLock fails with error
|
||||
// - Context Done channel is closed.
|
||||
func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
|
||||
return tryCtx(ctx, f.TryRLock, retryDelay)
|
||||
}
|
||||
|
@ -101,10 +148,12 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
|
|||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
|
||||
for {
|
||||
if ok, err := fn(); ok || err != nil {
|
||||
return ok, err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
|
@ -116,29 +165,33 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati
|
|||
|
||||
func (f *Flock) setFh() error {
|
||||
// open a new os.File instance
|
||||
// create it if it doesn't exist, and open the file read-only.
|
||||
flags := os.O_CREATE
|
||||
if runtime.GOOS == "aix" {
|
||||
// AIX cannot preform write-lock (ie exclusive) on a
|
||||
// read-only file.
|
||||
flags |= os.O_RDWR
|
||||
} else {
|
||||
flags |= os.O_RDONLY
|
||||
}
|
||||
fh, err := os.OpenFile(f.path, flags, os.FileMode(0600))
|
||||
fh, err := os.OpenFile(f.path, f.flag, f.perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set the filehandle on the struct
|
||||
// set the file handle on the struct
|
||||
f.fh = fh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensure the file handle is closed if no lock is held
|
||||
// ensure the file handle is closed if no lock is held.
|
||||
func (f *Flock) ensureFhState() {
|
||||
if !f.l && !f.r && f.fh != nil {
|
||||
f.fh.Close()
|
||||
f.fh = nil
|
||||
if f.l || f.r || f.fh == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = f.fh.Close()
|
||||
|
||||
f.fh = nil
|
||||
}
|
||||
|
||||
func (f *Flock) reset() {
|
||||
f.l = false
|
||||
f.r = false
|
||||
|
||||
_ = f.fh.Close()
|
||||
|
||||
f.fh = nil
|
||||
}
|
||||
|
|
|
@ -1,281 +0,0 @@
|
|||
// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is
|
||||
// governed by the BSD 3-Clause license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code implements the filelock API using POSIX 'fcntl' locks, which attach
|
||||
// to an (inode, process) pair rather than a file descriptor. To avoid unlocking
|
||||
// files prematurely when the same file is opened through different descriptors,
|
||||
// we allow only one read-lock at a time.
|
||||
//
|
||||
// This code is adapted from the Go package:
|
||||
// cmd/go/internal/lockedfile/internal/filelock
|
||||
|
||||
//+build aix
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type lockType int16
|
||||
|
||||
const (
|
||||
readLock lockType = unix.F_RDLCK
|
||||
writeLock lockType = unix.F_WRLCK
|
||||
)
|
||||
|
||||
type cmdType int
|
||||
|
||||
const (
|
||||
tryLock cmdType = unix.F_SETLK
|
||||
waitLock cmdType = unix.F_SETLKW
|
||||
)
|
||||
|
||||
type inode = uint64
|
||||
|
||||
type inodeLock struct {
|
||||
owner *Flock
|
||||
queue []<-chan *Flock
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
inodes = map[*Flock]inode{}
|
||||
locks = map[inode]inodeLock{}
|
||||
)
|
||||
|
||||
// Lock is a blocking call to try and take an exclusive file lock. It will wait
|
||||
// until it is able to obtain the exclusive file lock. It's recommended that
|
||||
// TryLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already exclusive-locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
//
|
||||
// If the *Flock has a shared lock (RLock), this may transparently replace the
|
||||
// shared lock with an exclusive lock on some UNIX-like operating systems. Be
|
||||
// careful when using exclusive locks in conjunction with shared locks
|
||||
// (RLock()), because calling Unlock() may accidentally release the exclusive
|
||||
// lock that was once a shared lock.
|
||||
func (f *Flock) Lock() error {
|
||||
return f.lock(&f.l, writeLock)
|
||||
}
|
||||
|
||||
// RLock is a blocking call to try and take a shared file lock. It will wait
|
||||
// until it is able to obtain the shared file lock. It's recommended that
|
||||
// TryRLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already shared-locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
func (f *Flock) RLock() error {
|
||||
return f.lock(&f.r, readLock)
|
||||
}
|
||||
|
||||
func (f *Flock) lock(locked *bool, flag lockType) error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
if *locked {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.fh == nil {
|
||||
if err := f.setFh(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
if _, err := f.doLock(waitLock, flag, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*locked = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
|
||||
// POSIX locks apply per inode and process, and the lock for an inode is
|
||||
// released when *any* descriptor for that inode is closed. So we need to
|
||||
// synchronize access to each inode internally, and must serialize lock and
|
||||
// unlock calls that refer to the same inode through different descriptors.
|
||||
fi, err := f.fh.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ino := inode(fi.Sys().(*syscall.Stat_t).Ino)
|
||||
|
||||
mu.Lock()
|
||||
if i, dup := inodes[f]; dup && i != ino {
|
||||
mu.Unlock()
|
||||
return false, &os.PathError{
|
||||
Path: f.Path(),
|
||||
Err: errors.New("inode for file changed since last Lock or RLock"),
|
||||
}
|
||||
}
|
||||
|
||||
inodes[f] = ino
|
||||
|
||||
var wait chan *Flock
|
||||
l := locks[ino]
|
||||
if l.owner == f {
|
||||
// This file already owns the lock, but the call may change its lock type.
|
||||
} else if l.owner == nil {
|
||||
// No owner: it's ours now.
|
||||
l.owner = f
|
||||
} else if !blocking {
|
||||
// Already owned: cannot take the lock.
|
||||
mu.Unlock()
|
||||
return false, nil
|
||||
} else {
|
||||
// Already owned: add a channel to wait on.
|
||||
wait = make(chan *Flock)
|
||||
l.queue = append(l.queue, wait)
|
||||
}
|
||||
locks[ino] = l
|
||||
mu.Unlock()
|
||||
|
||||
if wait != nil {
|
||||
wait <- f
|
||||
}
|
||||
|
||||
err = setlkw(f.fh.Fd(), cmd, lt)
|
||||
|
||||
if err != nil {
|
||||
f.doUnlock()
|
||||
if cmd == tryLock && err == unix.EACCES {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (f *Flock) Unlock() error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
// if we aren't locked or if the lockfile instance is nil
|
||||
// just return a nil error because we are unlocked
|
||||
if (!f.l && !f.r) || f.fh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := f.doUnlock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.fh.Close()
|
||||
|
||||
f.l = false
|
||||
f.r = false
|
||||
f.fh = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Flock) doUnlock() (err error) {
|
||||
var owner *Flock
|
||||
mu.Lock()
|
||||
ino, ok := inodes[f]
|
||||
if ok {
|
||||
owner = locks[ino].owner
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
if owner == f {
|
||||
err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
l := locks[ino]
|
||||
if len(l.queue) == 0 {
|
||||
// No waiters: remove the map entry.
|
||||
delete(locks, ino)
|
||||
} else {
|
||||
// The first waiter is sending us their file now.
|
||||
// Receive it and update the queue.
|
||||
l.owner = <-l.queue[0]
|
||||
l.queue = l.queue[1:]
|
||||
locks[ino] = l
|
||||
}
|
||||
delete(inodes, f)
|
||||
mu.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// TryLock is the preferred function for taking an exclusive file lock. This
|
||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
||||
// the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the exclusive
|
||||
// file lock, the function will return false instead of waiting for the lock. If
|
||||
// we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
func (f *Flock) TryLock() (bool, error) {
|
||||
return f.try(&f.l, writeLock)
|
||||
}
|
||||
|
||||
// TryRLock is the preferred function for taking a shared file lock. This
|
||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
||||
// the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the shared file
|
||||
// lock, the function will return false instead of waiting for the lock. If we
|
||||
// get the lock, we also set the *Flock instance as being share-locked.
|
||||
func (f *Flock) TryRLock() (bool, error) {
|
||||
return f.try(&f.r, readLock)
|
||||
}
|
||||
|
||||
func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
if *locked {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if f.fh == nil {
|
||||
if err := f.setFh(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
haslock, err := f.doLock(tryLock, flag, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
*locked = haslock
|
||||
return haslock, nil
|
||||
}
|
||||
|
||||
// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
|
||||
func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
|
||||
for {
|
||||
err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
|
||||
Type: int16(lt),
|
||||
Whence: io.SeekStart,
|
||||
Start: 0,
|
||||
Len: 0, // All bytes.
|
||||
})
|
||||
if err != unix.EINTR {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
//go:build (!unix && !windows) || plan9
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
func (f *Flock) Lock() error {
|
||||
return &fs.PathError{
|
||||
Op: "Lock",
|
||||
Path: f.Path(),
|
||||
Err: errors.ErrUnsupported,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Flock) RLock() error {
|
||||
return &fs.PathError{
|
||||
Op: "RLock",
|
||||
Path: f.Path(),
|
||||
Err: errors.ErrUnsupported,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Flock) Unlock() error {
|
||||
return &fs.PathError{
|
||||
Op: "Unlock",
|
||||
Path: f.Path(),
|
||||
Err: errors.ErrUnsupported,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Flock) TryLock() (bool, error) {
|
||||
return false, f.Lock()
|
||||
}
|
||||
|
||||
func (f *Flock) TryRLock() (bool, error) {
|
||||
return false, f.RLock()
|
||||
}
|
|
@ -1,42 +1,44 @@
|
|||
// Copyright 2015 Tim Heckman. All rights reserved.
|
||||
// Copyright 2018-2024 The Gofrs. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 3-Clause
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !aix,!windows
|
||||
//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Lock is a blocking call to try and take an exclusive file lock. It will wait
|
||||
// until it is able to obtain the exclusive file lock. It's recommended that
|
||||
// TryLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
// Lock is a blocking call to try and take an exclusive file lock.
|
||||
// It will wait until it is able to obtain the exclusive file lock.
|
||||
// It's recommended that TryLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already exclusive-locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
// If we are already exclusive-locked,
|
||||
// this function short-circuits and returns immediately assuming it can take the mutex lock.
|
||||
//
|
||||
// If the *Flock has a shared lock (RLock), this may transparently replace the
|
||||
// shared lock with an exclusive lock on some UNIX-like operating systems. Be
|
||||
// careful when using exclusive locks in conjunction with shared locks
|
||||
// (RLock()), because calling Unlock() may accidentally release the exclusive
|
||||
// lock that was once a shared lock.
|
||||
// If the *Flock has a shared lock (RLock),
|
||||
// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
|
||||
// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
|
||||
// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
|
||||
func (f *Flock) Lock() error {
|
||||
return f.lock(&f.l, syscall.LOCK_EX)
|
||||
return f.lock(&f.l, unix.LOCK_EX)
|
||||
}
|
||||
|
||||
// RLock is a blocking call to try and take a shared file lock. It will wait
|
||||
// until it is able to obtain the shared file lock. It's recommended that
|
||||
// TryRLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
// RLock is a blocking call to try and take a shared file lock.
|
||||
// It will wait until it is able to obtain the shared file lock.
|
||||
// It's recommended that TryRLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already shared-locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
// If we are already shared-locked,
|
||||
// this function short-circuits and returns immediately assuming it can take the mutex lock.
|
||||
func (f *Flock) RLock() error {
|
||||
return f.lock(&f.r, syscall.LOCK_SH)
|
||||
return f.lock(&f.r, unix.LOCK_SH)
|
||||
}
|
||||
|
||||
func (f *Flock) lock(locked *bool, flag int) error {
|
||||
|
@ -51,10 +53,12 @@ func (f *Flock) lock(locked *bool, flag int) error {
|
|||
if err := f.setFh(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
|
||||
err := unix.Flock(int(f.fh.Fd()), flag)
|
||||
if err != nil {
|
||||
shouldRetry, reopenErr := f.reopenFDOnError(err)
|
||||
if reopenErr != nil {
|
||||
return reopenErr
|
||||
|
@ -64,71 +68,74 @@ func (f *Flock) lock(locked *bool, flag int) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
|
||||
err = unix.Flock(int(f.fh.Fd()), flag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*locked = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
|
||||
// while it is running the Locked() and RLocked() functions will be blocked.
|
||||
// Unlock is a function to unlock the file.
|
||||
// This file takes a RW-mutex lock,
|
||||
// so while it is running the Locked() and RLocked() functions will be blocked.
|
||||
//
|
||||
// This function short-circuits if we are unlocked already. If not, it calls
|
||||
// syscall.LOCK_UN on the file and closes the file descriptor. It does not
|
||||
// remove the file from disk. It's up to your application to do.
|
||||
// This function short-circuits if we are unlocked already.
|
||||
// If not, it calls unix.LOCK_UN on the file and closes the file descriptor.
|
||||
// It does not remove the file from disk. It's up to your application to do.
|
||||
//
|
||||
// Please note, if your shared lock became an exclusive lock this may
|
||||
// unintentionally drop the exclusive lock if called by the consumer that
|
||||
// believes they have a shared lock. Please see Lock() for more details.
|
||||
// Please note,
|
||||
// if your shared lock became an exclusive lock,
|
||||
// this may unintentionally drop the exclusive lock if called by the consumer that believes they have a shared lock.
|
||||
// Please see Lock() for more details.
|
||||
func (f *Flock) Unlock() error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
// if we aren't locked or if the lockfile instance is nil
|
||||
// just return a nil error because we are unlocked
|
||||
// If we aren't locked or if the lockfile instance is nil
|
||||
// just return a nil error because we are unlocked.
|
||||
if (!f.l && !f.r) || f.fh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mark the file as unlocked
|
||||
if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
|
||||
// Mark the file as unlocked.
|
||||
err := unix.Flock(int(f.fh.Fd()), unix.LOCK_UN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.fh.Close()
|
||||
|
||||
f.l = false
|
||||
f.r = false
|
||||
f.fh = nil
|
||||
f.reset()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryLock is the preferred function for taking an exclusive file lock. This
|
||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
||||
// the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
// TryLock is the preferred function for taking an exclusive file lock.
|
||||
// This function takes an RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time
|
||||
// if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the exclusive
|
||||
// file lock, the function will return false instead of waiting for the lock. If
|
||||
// we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the exclusive file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
func (f *Flock) TryLock() (bool, error) {
|
||||
return f.try(&f.l, syscall.LOCK_EX)
|
||||
return f.try(&f.l, unix.LOCK_EX)
|
||||
}
|
||||
|
||||
// TryRLock is the preferred function for taking a shared file lock. This
|
||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
||||
// the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
// TryRLock is the preferred function for taking a shared file lock.
|
||||
// This function takes an RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time
|
||||
// if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the shared file
|
||||
// lock, the function will return false instead of waiting for the lock. If we
|
||||
// get the lock, we also set the *Flock instance as being share-locked.
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the shared file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being share-locked.
|
||||
func (f *Flock) TryRLock() (bool, error) {
|
||||
return f.try(&f.r, syscall.LOCK_SH)
|
||||
return f.try(&f.r, unix.LOCK_SH)
|
||||
}
|
||||
|
||||
func (f *Flock) try(locked *bool, flag int) (bool, error) {
|
||||
|
@ -143,22 +150,25 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) {
|
|||
if err := f.setFh(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
var retried bool
|
||||
retry:
|
||||
err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
|
||||
err := unix.Flock(int(f.fh.Fd()), flag|unix.LOCK_NB)
|
||||
|
||||
switch err {
|
||||
case syscall.EWOULDBLOCK:
|
||||
switch {
|
||||
case errors.Is(err, unix.EWOULDBLOCK):
|
||||
return false, nil
|
||||
case nil:
|
||||
case err == nil:
|
||||
*locked = true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !retried {
|
||||
if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
|
||||
shouldRetry, reopenErr := f.reopenFDOnError(err)
|
||||
if reopenErr != nil {
|
||||
return false, reopenErr
|
||||
} else if shouldRetry {
|
||||
retried = true
|
||||
|
@ -169,29 +179,34 @@ retry:
|
|||
return false, err
|
||||
}
|
||||
|
||||
// reopenFDOnError determines whether we should reopen the file handle
|
||||
// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
|
||||
// Since Linux 3.4 (commit 55725513)
|
||||
// Probably NFSv4 where flock() is emulated by fcntl().
|
||||
// reopenFDOnError determines whether we should reopen the file handle in readwrite mode and try again.
|
||||
// This comes from `util-linux/sys-utils/flock.c`:
|
||||
// > Since Linux 3.4 (commit 55725513)
|
||||
// > Probably NFSv4 where flock() is emulated by fcntl().
|
||||
func (f *Flock) reopenFDOnError(err error) (bool, error) {
|
||||
if err != syscall.EIO && err != syscall.EBADF {
|
||||
if !errors.Is(err, unix.EIO) && !errors.Is(err, unix.EBADF) {
|
||||
return false, nil
|
||||
}
|
||||
if st, err := f.fh.Stat(); err == nil {
|
||||
// if the file is able to be read and written
|
||||
if st.Mode()&0600 == 0600 {
|
||||
f.fh.Close()
|
||||
f.fh = nil
|
||||
|
||||
// reopen in read-write mode and set the filehandle
|
||||
fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
f.fh = fh
|
||||
return true, nil
|
||||
}
|
||||
st, err := f.fh.Stat()
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
if st.Mode()&f.perm != f.perm {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
_ = f.fh.Close()
|
||||
f.fh = nil
|
||||
|
||||
// reopen in read-write mode and set the file handle
|
||||
fh, err := os.OpenFile(f.path, f.flag, f.perm)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
f.fh = fh
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,393 @@
|
|||
// Copyright 2015 Tim Heckman. All rights reserved.
|
||||
// Copyright 2018-2024 The Gofrs. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 3-Clause
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code implements the filelock API using POSIX 'fcntl' locks,
|
||||
// which attach to an (inode, process) pair rather than a file descriptor.
|
||||
// To avoid unlocking files prematurely when the same file is opened through different descriptors,
|
||||
// we allow only one read-lock at a time.
|
||||
//
|
||||
// This code is adapted from the Go package (go.22):
|
||||
// https://github.com/golang/go/blob/release-branch.go1.22/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go
|
||||
|
||||
//go:build aix || (solaris && !illumos)
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L28
|
||||
type lockType int16
|
||||
|
||||
// String returns the name of the function corresponding to lt
|
||||
// (Lock, RLock, or Unlock).
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go#L67
|
||||
func (lt lockType) String() string {
|
||||
switch lt {
|
||||
case readLock:
|
||||
return "RLock"
|
||||
case writeLock:
|
||||
return "Lock"
|
||||
default:
|
||||
return "Unlock"
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L30-L33
|
||||
const (
|
||||
readLock lockType = unix.F_RDLCK
|
||||
writeLock lockType = unix.F_WRLCK
|
||||
)
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L35
|
||||
type inode = uint64
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L37-L40
|
||||
type inodeLock struct {
|
||||
owner *Flock
|
||||
queue []<-chan *Flock
|
||||
}
|
||||
|
||||
type cmdType int
|
||||
|
||||
const (
|
||||
tryLock cmdType = unix.F_SETLK
|
||||
waitLock cmdType = unix.F_SETLKW
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
inodes = map[*Flock]inode{}
|
||||
locks = map[inode]inodeLock{}
|
||||
)
|
||||
|
||||
// Lock is a blocking call to try and take an exclusive file lock.
|
||||
// It will wait until it is able to obtain the exclusive file lock.
|
||||
// It's recommended that TryLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already exclusive-locked, this function short-circuits and
|
||||
// returns immediately assuming it can take the mutex lock.
|
||||
//
|
||||
// If the *Flock has a shared lock (RLock),
|
||||
// this may transparently replace the shared lock with an exclusive lock on some UNIX-like operating systems.
|
||||
// Be careful when using exclusive locks in conjunction with shared locks (RLock()),
|
||||
// because calling Unlock() may accidentally release the exclusive lock that was once a shared lock.
|
||||
func (f *Flock) Lock() error {
|
||||
return f.lock(&f.l, writeLock)
|
||||
}
|
||||
|
||||
// RLock is a blocking call to try and take a shared file lock.
|
||||
// It will wait until it is able to obtain the shared file lock.
|
||||
// It's recommended that TryRLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already shared-locked, this function short-circuits and
|
||||
// returns immediately assuming it can take the mutex lock.
|
||||
func (f *Flock) RLock() error {
|
||||
return f.lock(&f.r, readLock)
|
||||
}
|
||||
|
||||
func (f *Flock) lock(locked *bool, flag lockType) error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
if *locked {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.fh == nil {
|
||||
if err := f.setFh(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
_, err := f.doLock(waitLock, flag, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*locked = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L48
|
||||
func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) {
|
||||
// POSIX locks apply per inode and process,
|
||||
// and the lock for an inode is released when *any* descriptor for that inode is closed.
|
||||
// So we need to synchronize access to each inode internally,
|
||||
// and must serialize lock and unlock calls that refer to the same inode through different descriptors.
|
||||
fi, err := f.fh.Stat()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Note(ldez): don't replace `syscall.Stat_t` by `unix.Stat_t` because `FileInfo.Sys()` returns `syscall.Stat_t`
|
||||
ino := fi.Sys().(*syscall.Stat_t).Ino
|
||||
|
||||
mu.Lock()
|
||||
|
||||
if i, dup := inodes[f]; dup && i != ino {
|
||||
mu.Unlock()
|
||||
return false, &fs.PathError{
|
||||
Op: lt.String(),
|
||||
Path: f.Path(),
|
||||
Err: errors.New("inode for file changed since last Lock or RLock"),
|
||||
}
|
||||
}
|
||||
|
||||
inodes[f] = ino
|
||||
|
||||
var wait chan *Flock
|
||||
|
||||
l := locks[ino]
|
||||
|
||||
switch {
|
||||
case l.owner == f:
|
||||
// This file already owns the lock, but the call may change its lock type.
|
||||
case l.owner == nil:
|
||||
// No owner: it's ours now.
|
||||
l.owner = f
|
||||
|
||||
case !blocking:
|
||||
// Already owned: cannot take the lock.
|
||||
mu.Unlock()
|
||||
return false, nil
|
||||
|
||||
default:
|
||||
// Already owned: add a channel to wait on.
|
||||
wait = make(chan *Flock)
|
||||
l.queue = append(l.queue, wait)
|
||||
}
|
||||
|
||||
locks[ino] = l
|
||||
|
||||
mu.Unlock()
|
||||
|
||||
if wait != nil {
|
||||
wait <- f
|
||||
}
|
||||
|
||||
// Spurious EDEADLK errors arise on platforms that compute deadlock graphs at
|
||||
// the process, rather than thread, level. Consider processes P and Q, with
|
||||
// threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be
|
||||
// reported as a deadlock on systems that consider only process granularity:
|
||||
//
|
||||
// P.1 locks file A.
|
||||
// Q.3 locks file B.
|
||||
// Q.3 blocks on file A.
|
||||
// P.2 blocks on file B. (This is erroneously reported as a deadlock.)
|
||||
// P.1 unlocks file A.
|
||||
// Q.3 unblocks and locks file A.
|
||||
// Q.3 unlocks files A and B.
|
||||
// P.2 unblocks and locks file B.
|
||||
// P.2 unlocks file B.
|
||||
//
|
||||
// These spurious errors were observed in practice on AIX and Solaris in
|
||||
// cmd/go: see https://golang.org/issue/32817.
|
||||
//
|
||||
// We work around this bug by treating EDEADLK as always spurious. If there
|
||||
// really is a lock-ordering bug between the interacting processes, it will
|
||||
// become a livelock instead, but that's not appreciably worse than if we had
|
||||
// a proper flock implementation (which generally does not even attempt to
|
||||
// diagnose deadlocks).
|
||||
//
|
||||
// In the above example, that changes the trace to:
|
||||
//
|
||||
// P.1 locks file A.
|
||||
// Q.3 locks file B.
|
||||
// Q.3 blocks on file A.
|
||||
// P.2 spuriously fails to lock file B and goes to sleep.
|
||||
// P.1 unlocks file A.
|
||||
// Q.3 unblocks and locks file A.
|
||||
// Q.3 unlocks files A and B.
|
||||
// P.2 wakes up and locks file B.
|
||||
// P.2 unlocks file B.
|
||||
//
|
||||
// We know that the retry loop will not introduce a *spurious* livelock
|
||||
// because, according to the POSIX specification, EDEADLK is only to be
|
||||
// returned when “the lock is blocked by a lock from another process”.
|
||||
// If that process is blocked on some lock that we are holding, then the
|
||||
// resulting livelock is due to a real deadlock (and would manifest as such
|
||||
// when using, for example, the flock implementation of this package).
|
||||
// If the other process is *not* blocked on some other lock that we are
|
||||
// holding, then it will eventually release the requested lock.
|
||||
|
||||
nextSleep := 1 * time.Millisecond
|
||||
const maxSleep = 500 * time.Millisecond
|
||||
for {
|
||||
err = setlkw(f.fh.Fd(), cmd, lt)
|
||||
if !errors.Is(err, unix.EDEADLK) {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(nextSleep)
|
||||
|
||||
nextSleep += nextSleep
|
||||
if nextSleep > maxSleep {
|
||||
nextSleep = maxSleep
|
||||
}
|
||||
// Apply 10% jitter to avoid synchronizing collisions when we finally unblock.
|
||||
nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
f.doUnlock()
|
||||
|
||||
if cmd == tryLock && errors.Is(err, unix.EACCES) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, &fs.PathError{
|
||||
Op: lt.String(),
|
||||
Path: f.Path(),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (f *Flock) Unlock() error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
// If we aren't locked or if the lockfile instance is nil
|
||||
// just return a nil error because we are unlocked.
|
||||
if (!f.l && !f.r) || f.fh == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := f.doUnlock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reset()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L163
|
||||
func (f *Flock) doUnlock() (err error) {
|
||||
var owner *Flock
|
||||
|
||||
mu.Lock()
|
||||
|
||||
ino, ok := inodes[f]
|
||||
if ok {
|
||||
owner = locks[ino].owner
|
||||
}
|
||||
|
||||
mu.Unlock()
|
||||
|
||||
if owner == f {
|
||||
err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
|
||||
l := locks[ino]
|
||||
|
||||
if len(l.queue) == 0 {
|
||||
// No waiters: remove the map entry.
|
||||
delete(locks, ino)
|
||||
} else {
|
||||
// The first waiter is sending us their file now.
|
||||
// Receive it and update the queue.
|
||||
l.owner = <-l.queue[0]
|
||||
l.queue = l.queue[1:]
|
||||
locks[ino] = l
|
||||
}
|
||||
|
||||
delete(inodes, f)
|
||||
|
||||
mu.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// TryLock is the preferred function for taking an exclusive file lock.
|
||||
// This function takes an RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time
|
||||
// if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the exclusive file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
func (f *Flock) TryLock() (bool, error) {
|
||||
return f.try(&f.l, writeLock)
|
||||
}
|
||||
|
||||
// TryRLock is the preferred function for taking a shared file lock.
|
||||
// This function takes an RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time
|
||||
// if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the shared file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being share-locked.
|
||||
func (f *Flock) TryRLock() (bool, error) {
|
||||
return f.try(&f.r, readLock)
|
||||
}
|
||||
|
||||
func (f *Flock) try(locked *bool, flag lockType) (bool, error) {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
||||
if *locked {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if f.fh == nil {
|
||||
if err := f.setFh(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
hasLock, err := f.doLock(tryLock, flag, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
*locked = hasLock
|
||||
|
||||
return hasLock, nil
|
||||
}
|
||||
|
||||
// setlkw calls FcntlFlock with cmd for the entire file indicated by fd.
|
||||
// https://github.com/golang/go/blob/09aeb6e33ab426eff4676a3baf694d5a3019e9fc/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go#L198
|
||||
func setlkw(fd uintptr, cmd cmdType, lt lockType) error {
|
||||
for {
|
||||
err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{
|
||||
Type: int16(lt),
|
||||
Whence: io.SeekStart,
|
||||
Start: 0,
|
||||
Len: 0, // All bytes.
|
||||
})
|
||||
if !errors.Is(err, unix.EINTR) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2015 Tim Heckman. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 3-Clause
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32, _ = syscall.LoadLibrary("kernel32.dll")
|
||||
procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx")
|
||||
procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
winLockfileFailImmediately = 0x00000001
|
||||
winLockfileExclusiveLock = 0x00000002
|
||||
winLockfileSharedLock = 0x00000000
|
||||
)
|
||||
|
||||
// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
|
||||
// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
|
||||
//
|
||||
// > The function requests an exclusive lock. Otherwise, it requests a shared
|
||||
// > lock.
|
||||
//
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
|
||||
func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
|
||||
r1, _, errNo := syscall.Syscall6(
|
||||
uintptr(procLockFileEx),
|
||||
6,
|
||||
uintptr(handle),
|
||||
uintptr(flags),
|
||||
uintptr(reserved),
|
||||
uintptr(numberOfBytesToLockLow),
|
||||
uintptr(numberOfBytesToLockHigh),
|
||||
uintptr(unsafe.Pointer(offset)))
|
||||
|
||||
if r1 != 1 {
|
||||
if errNo == 0 {
|
||||
return false, syscall.EINVAL
|
||||
}
|
||||
|
||||
return false, errNo
|
||||
}
|
||||
|
||||
return true, 0
|
||||
}
|
||||
|
||||
func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
|
||||
r1, _, errNo := syscall.Syscall6(
|
||||
uintptr(procUnlockFileEx),
|
||||
5,
|
||||
uintptr(handle),
|
||||
uintptr(reserved),
|
||||
uintptr(numberOfBytesToLockLow),
|
||||
uintptr(numberOfBytesToLockHigh),
|
||||
uintptr(unsafe.Pointer(offset)),
|
||||
0)
|
||||
|
||||
if r1 != 1 {
|
||||
if errNo == 0 {
|
||||
return false, syscall.EINVAL
|
||||
}
|
||||
|
||||
return false, errNo
|
||||
}
|
||||
|
||||
return true, 0
|
||||
}
|
|
@ -1,35 +1,48 @@
|
|||
// Copyright 2015 Tim Heckman. All rights reserved.
|
||||
// Copyright 2018-2024 The Gofrs. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 3-Clause
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build windows
|
||||
|
||||
package flock
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// ErrorLockViolation is the error code returned from the Windows syscall when a
|
||||
// lock would block and you ask to fail immediately.
|
||||
const ErrorLockViolation syscall.Errno = 0x21 // 33
|
||||
|
||||
// Lock is a blocking call to try and take an exclusive file lock. It will wait
|
||||
// until it is able to obtain the exclusive file lock. It's recommended that
|
||||
// TryLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows `LockFileEX` docs,
|
||||
// which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
|
||||
//
|
||||
// If we are already locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
// > The function requests an exclusive lock. Otherwise, it requests a shared lock.
|
||||
//
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
const winLockfileSharedLock = 0x00000000
|
||||
|
||||
// ErrorLockViolation is the error code returned from the Windows syscall when a lock would block,
|
||||
// and you ask to fail immediately.
|
||||
const ErrorLockViolation windows.Errno = 0x21 // 33
|
||||
|
||||
// Lock is a blocking call to try and take an exclusive file lock.
|
||||
// It will wait until it is able to obtain the exclusive file lock.
|
||||
// It's recommended that TryLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already locked, this function short-circuits and
|
||||
// returns immediately assuming it can take the mutex lock.
|
||||
func (f *Flock) Lock() error {
|
||||
return f.lock(&f.l, winLockfileExclusiveLock)
|
||||
return f.lock(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
|
||||
}
|
||||
|
||||
// RLock is a blocking call to try and take a shared file lock. It will wait
|
||||
// until it is able to obtain the shared file lock. It's recommended that
|
||||
// TryRLock() be used over this function. This function may block the ability to
|
||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
// RLock is a blocking call to try and take a shared file lock.
|
||||
// It will wait until it is able to obtain the shared file lock.
|
||||
// It's recommended that TryRLock() be used over this function.
|
||||
// This function may block the ability to query the current Locked() or RLocked() status due to a RW-mutex lock.
|
||||
//
|
||||
// If we are already locked, this function short-circuits and returns
|
||||
// immediately assuming it can take the mutex lock.
|
||||
// If we are already locked, this function short-circuits and
|
||||
// returns immediately assuming it can take the mutex lock.
|
||||
func (f *Flock) RLock() error {
|
||||
return f.lock(&f.r, winLockfileSharedLock)
|
||||
}
|
||||
|
@ -46,23 +59,28 @@ func (f *Flock) lock(locked *bool, flag uint32) error {
|
|||
if err := f.setFh(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
|
||||
return errNo
|
||||
err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag, 0, 1, 0, &windows.Overlapped{})
|
||||
if err != nil && !errors.Is(err, windows.Errno(0)) {
|
||||
return err
|
||||
}
|
||||
|
||||
*locked = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
|
||||
// while it is running the Locked() and RLocked() functions will be blocked.
|
||||
// Unlock is a function to unlock the file.
|
||||
// This file takes a RW-mutex lock,
|
||||
// so while it is running the Locked() and RLocked() functions will be blocked.
|
||||
//
|
||||
// This function short-circuits if we are unlocked already. If not, it calls
|
||||
// UnlockFileEx() on the file and closes the file descriptor. It does not remove
|
||||
// the file from disk. It's up to your application to do.
|
||||
// This function short-circuits if we are unlocked already.
|
||||
// If not, it calls UnlockFileEx() on the file and closes the file descriptor.
|
||||
// It does not remove the file from disk.
|
||||
// It's up to your application to do.
|
||||
func (f *Flock) Unlock() error {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
|
@ -74,39 +92,37 @@ func (f *Flock) Unlock() error {
|
|||
}
|
||||
|
||||
// mark the file as unlocked
|
||||
if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
|
||||
return errNo
|
||||
err := windows.UnlockFileEx(windows.Handle(f.fh.Fd()), 0, 1, 0, &windows.Overlapped{})
|
||||
if err != nil && !errors.Is(err, windows.Errno(0)) {
|
||||
return err
|
||||
}
|
||||
|
||||
f.fh.Close()
|
||||
|
||||
f.l = false
|
||||
f.r = false
|
||||
f.fh = nil
|
||||
f.reset()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryLock is the preferred function for taking an exclusive file lock. This
|
||||
// function does take a RW-mutex lock before it tries to lock the file, so there
|
||||
// is the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
// TryLock is the preferred function for taking an exclusive file lock.
|
||||
// This function does take a RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time
|
||||
// if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the exclusive
|
||||
// file lock, the function will return false instead of waiting for the lock. If
|
||||
// we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the exclusive file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being exclusive-locked.
|
||||
func (f *Flock) TryLock() (bool, error) {
|
||||
return f.try(&f.l, winLockfileExclusiveLock)
|
||||
return f.try(&f.l, windows.LOCKFILE_EXCLUSIVE_LOCK)
|
||||
}
|
||||
|
||||
// TryRLock is the preferred function for taking a shared file lock. This
|
||||
// function does take a RW-mutex lock before it tries to lock the file, so there
|
||||
// is the possibility that this function may block for a short time if another
|
||||
// goroutine is trying to take any action.
|
||||
// TryRLock is the preferred function for taking a shared file lock.
|
||||
// This function does take a RW-mutex lock before it tries to lock the file,
|
||||
// so there is the possibility that this function may block for a short time if another goroutine is trying to take any action.
|
||||
//
|
||||
// The actual file lock is non-blocking. If we are unable to get the shared file
|
||||
// lock, the function will return false instead of waiting for the lock. If we
|
||||
// get the lock, we also set the *Flock instance as being shared-locked.
|
||||
// The actual file lock is non-blocking.
|
||||
// If we are unable to get the shared file lock,
|
||||
// the function will return false instead of waiting for the lock.
|
||||
// If we get the lock, we also set the *Flock instance as being shared-locked.
|
||||
func (f *Flock) TryRLock() (bool, error) {
|
||||
return f.try(&f.r, winLockfileSharedLock)
|
||||
}
|
||||
|
@ -123,17 +139,17 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
|
|||
if err := f.setFh(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
defer f.ensureFhState()
|
||||
}
|
||||
|
||||
_, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
|
||||
|
||||
if errNo > 0 {
|
||||
if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
|
||||
err := windows.LockFileEx(windows.Handle(f.fh.Fd()), flag|windows.LOCKFILE_FAIL_IMMEDIATELY, 0, 1, 0, &windows.Overlapped{})
|
||||
if err != nil && !errors.Is(err, windows.Errno(0)) {
|
||||
if errors.Is(err, ErrorLockViolation) || errors.Is(err, windows.ERROR_IO_PENDING) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, errNo
|
||||
return false, err
|
||||
}
|
||||
|
||||
*locked = true
|
||||
|
|
|
@ -132,4 +132,20 @@ var (
|
|||
return fmt.Sprintf("Setting platform to predefined %s in FROM is redundant as this is the default behavior", platformVar)
|
||||
},
|
||||
}
|
||||
RuleSecretsUsedInArgOrEnv = LinterRule[func(string, string) string]{
|
||||
Name: "SecretsUsedInArgOrEnv",
|
||||
Description: "Sensitive data should not be used in the ARG or ENV commands",
|
||||
URL: "https://docs.docker.com/go/dockerfile/rule/secrets-used-in-arg-or-env/",
|
||||
Format: func(instruction, secretKey string) string {
|
||||
return fmt.Sprintf("Do not use ARG or ENV instructions for sensitive data (%s %q)", instruction, secretKey)
|
||||
},
|
||||
}
|
||||
RuleInvalidDefaultArgInFrom = LinterRule[func(string) string]{
|
||||
Name: "InvalidDefaultArgInFrom",
|
||||
Description: "Default value for global ARG results in an empty or invalid base image name",
|
||||
URL: "https://docs.docker.com/go/dockerfile/rule/invalid-default-arg-in-from/",
|
||||
Format: func(baseName string) string {
|
||||
return fmt.Sprintf("Default value for ARG %v results in empty or invalid base image name", baseName)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -119,20 +119,13 @@ func NewGitCLI(opts ...Option) *GitCLI {
|
|||
// New returns a new git client with the same config as the current one, but
|
||||
// with the given options applied on top.
|
||||
func (cli *GitCLI) New(opts ...Option) *GitCLI {
|
||||
c := &GitCLI{
|
||||
git: cli.git,
|
||||
dir: cli.dir,
|
||||
workTree: cli.workTree,
|
||||
gitDir: cli.gitDir,
|
||||
args: append([]string{}, cli.args...),
|
||||
streams: cli.streams,
|
||||
sshAuthSock: cli.sshAuthSock,
|
||||
sshKnownHosts: cli.sshKnownHosts,
|
||||
}
|
||||
clone := *cli
|
||||
clone.args = append([]string{}, cli.args...)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
opt(&clone)
|
||||
}
|
||||
return c
|
||||
return &clone
|
||||
}
|
||||
|
||||
// Run executes a git command with the given args.
|
||||
|
|
|
@ -1,9 +1,16 @@
|
|||
---
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- godot
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linter-settings:
|
||||
godot:
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
|
||||
* Paul Gier <pgier@redhat.com> @pgier
|
||||
* Paul Gier <paulgier@gmail.com> @pgier
|
||||
* Ben Kochie <superq@gmail.com> @SuperQ
|
||||
|
|
|
@ -49,23 +49,23 @@ endif
|
|||
GOTEST := $(GO) test
|
||||
GOTEST_DIR :=
|
||||
ifneq ($(CIRCLE_JOB),)
|
||||
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||
ifneq ($(shell command -v gotestsum 2> /dev/null),)
|
||||
GOTEST_DIR := test-results
|
||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.15.0
|
||||
PROMU_VERSION ?= 0.17.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||
|
@ -169,16 +169,20 @@ common-vet:
|
|||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-lint-fix
|
||||
common-lint-fix: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint fix"
|
||||
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-yamllint
|
||||
common-yamllint:
|
||||
@echo ">> running yamllint on all YAML files in the repository"
|
||||
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||
ifeq (, $(shell command -v yamllint 2> /dev/null))
|
||||
@echo "yamllint not installed so skipping"
|
||||
else
|
||||
yamllint .
|
||||
|
@ -204,6 +208,10 @@ common-tarball: promu
|
|||
@echo ">> building release tarball"
|
||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
.PHONY: common-docker-repo-name
|
||||
common-docker-repo-name:
|
||||
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
||||
|
||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
|
|
|
@ -55,7 +55,7 @@ type ARPEntry struct {
|
|||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
||||
return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
||||
}
|
||||
|
||||
return parseARPEntries(data)
|
||||
|
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
|||
} else if width == expectedDataWidth {
|
||||
entry, err := parseARPEntry(columns)
|
||||
if err != nil {
|
||||
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
||||
return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
} else {
|
||||
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
||||
return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
|||
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
|
||||
}
|
||||
|
||||
node := strings.TrimRight(parts[1], ",")
|
||||
zone := strings.TrimRight(parts[3], ",")
|
||||
node := strings.TrimSuffix(parts[1], ",")
|
||||
zone := strings.TrimSuffix(parts[3], ",")
|
||||
arraySize := len(parts[4:])
|
||||
|
||||
if bucketCount == -1 {
|
||||
|
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
|||
for i := 0; i < arraySize; i++ {
|
||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
||||
return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
|
|||
firstLine := firstNonEmptyLine(scanner)
|
||||
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
|
||||
if !match || !strings.Contains(firstLine, ":") {
|
||||
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
||||
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
|
@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
|
|||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
|
|
|
@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
|
|||
path := fs.proc.Path("crypto")
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
|
||||
|
||||
}
|
||||
|
||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
||||
}
|
||||
|
||||
return crypto, nil
|
||||
|
@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
|
|||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
|
|
|
@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
|||
|
||||
m, err := parseFscacheinfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
|
||||
return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
|
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
|||
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
||||
var err error
|
||||
if len(fields) < len(setFields) {
|
||||
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
||||
return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
||||
}
|
||||
|
||||
for i := range setFields {
|
||||
|
|
|
@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
|
|||
case 46:
|
||||
ip = net.ParseIP(s[1:40])
|
||||
if ip == nil {
|
||||
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
||||
return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
||||
}
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
||||
return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
||||
}
|
||||
|
||||
portString := s[len(s)-4:]
|
||||
if len(portString) != 4 {
|
||||
return nil, 0,
|
||||
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
||||
fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
||||
}
|
||||
port, err := strconv.ParseUint(portString, 16, 16)
|
||||
if err != nil {
|
||||
|
|
|
@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
|
|||
for i, load := range parts[0:3] {
|
||||
loads[i], err = strconv.ParseFloat(load, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
||||
}
|
||||
}
|
||||
return &LoadAvg{
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
var (
|
||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
|
||||
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
||||
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
||||
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
||||
|
@ -50,6 +50,8 @@ type MDStat struct {
|
|||
BlocksTotal int64
|
||||
// Number of blocks on the device that are in sync.
|
||||
BlocksSynced int64
|
||||
// Number of blocks on the device that need to be synced.
|
||||
BlocksToBeSynced int64
|
||||
// progress percentage of current sync
|
||||
BlocksSyncedPct float64
|
||||
// estimated finishing time for current sync (in minutes)
|
||||
|
@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
|
|||
}
|
||||
mdstat, err := parseMDStat(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
||||
}
|
||||
return mdstat, nil
|
||||
}
|
||||
|
@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
|
||||
deviceFields := strings.Fields(line)
|
||||
if len(deviceFields) < 3 {
|
||||
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
|
||||
return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
|
||||
}
|
||||
mdName := deviceFields[0] // mdx
|
||||
state := deviceFields[2] // active or inactive
|
||||
|
@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
||||
}
|
||||
|
||||
syncLineIdx := i + 2
|
||||
|
@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
|
||||
// If device is syncing at the moment, get the number of currently
|
||||
// synced bytes, otherwise that number equals the size of the device.
|
||||
syncedBlocks := size
|
||||
blocksSynced := size
|
||||
blocksToBeSynced := size
|
||||
speed := float64(0)
|
||||
finish := float64(0)
|
||||
pct := float64(0)
|
||||
|
@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
// Handle case when resync=PENDING or resync=DELAYED.
|
||||
if strings.Contains(lines[syncLineIdx], "PENDING") ||
|
||||
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||
syncedBlocks = 0
|
||||
blocksSynced = 0
|
||||
} else {
|
||||
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
DisksSpare: spare,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
BlocksSynced: blocksSynced,
|
||||
BlocksToBeSynced: blocksToBeSynced,
|
||||
BlocksSyncedPct: pct,
|
||||
BlocksSyncedFinishTime: finish,
|
||||
BlocksSyncedSpeed: speed,
|
||||
|
@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||
statusFields := strings.Fields(statusLine)
|
||||
if len(statusFields) < 1 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
sizeStr := statusFields[0]
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||
|
@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
|
|||
|
||||
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||
if len(matches) != 5 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
|
||||
}
|
||||
down = int64(strings.Count(matches[4], "_"))
|
||||
|
||||
return active, total, down, size, nil
|
||||
}
|
||||
|
||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
|
||||
func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
|
||||
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
|
||||
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
|
||||
}
|
||||
|
||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
blocks := strings.Split(matches[1], "/")
|
||||
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
|
||||
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
|
||||
}
|
||||
|
||||
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
|
||||
if err != nil {
|
||||
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
|
||||
}
|
||||
|
||||
// Get percentage complete
|
||||
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
||||
}
|
||||
|
||||
// Get time expected left to complete
|
||||
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
finish, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
||||
}
|
||||
|
||||
// Get recovery speed
|
||||
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
speed, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
||||
}
|
||||
|
||||
return syncedBlocks, pct, finish, speed, nil
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
|
||||
}
|
||||
|
||||
func evalComponentDevices(deviceFields []string) []string {
|
||||
|
|
|
@ -126,6 +126,7 @@ type Meminfo struct {
|
|||
VmallocUsed *uint64
|
||||
// largest contiguous block of vmalloc area which is free
|
||||
VmallocChunk *uint64
|
||||
Percpu *uint64
|
||||
HardwareCorrupted *uint64
|
||||
AnonHugePages *uint64
|
||||
ShmemHugePages *uint64
|
||||
|
@ -140,6 +141,55 @@ type Meminfo struct {
|
|||
DirectMap4k *uint64
|
||||
DirectMap2M *uint64
|
||||
DirectMap1G *uint64
|
||||
|
||||
// The struct fields below are the byte-normalized counterparts to the
|
||||
// existing struct fields. Values are normalized using the optional
|
||||
// unit field in the meminfo line.
|
||||
MemTotalBytes *uint64
|
||||
MemFreeBytes *uint64
|
||||
MemAvailableBytes *uint64
|
||||
BuffersBytes *uint64
|
||||
CachedBytes *uint64
|
||||
SwapCachedBytes *uint64
|
||||
ActiveBytes *uint64
|
||||
InactiveBytes *uint64
|
||||
ActiveAnonBytes *uint64
|
||||
InactiveAnonBytes *uint64
|
||||
ActiveFileBytes *uint64
|
||||
InactiveFileBytes *uint64
|
||||
UnevictableBytes *uint64
|
||||
MlockedBytes *uint64
|
||||
SwapTotalBytes *uint64
|
||||
SwapFreeBytes *uint64
|
||||
DirtyBytes *uint64
|
||||
WritebackBytes *uint64
|
||||
AnonPagesBytes *uint64
|
||||
MappedBytes *uint64
|
||||
ShmemBytes *uint64
|
||||
SlabBytes *uint64
|
||||
SReclaimableBytes *uint64
|
||||
SUnreclaimBytes *uint64
|
||||
KernelStackBytes *uint64
|
||||
PageTablesBytes *uint64
|
||||
NFSUnstableBytes *uint64
|
||||
BounceBytes *uint64
|
||||
WritebackTmpBytes *uint64
|
||||
CommitLimitBytes *uint64
|
||||
CommittedASBytes *uint64
|
||||
VmallocTotalBytes *uint64
|
||||
VmallocUsedBytes *uint64
|
||||
VmallocChunkBytes *uint64
|
||||
PercpuBytes *uint64
|
||||
HardwareCorruptedBytes *uint64
|
||||
AnonHugePagesBytes *uint64
|
||||
ShmemHugePagesBytes *uint64
|
||||
ShmemPmdMappedBytes *uint64
|
||||
CmaTotalBytes *uint64
|
||||
CmaFreeBytes *uint64
|
||||
HugepagesizeBytes *uint64
|
||||
DirectMap4kBytes *uint64
|
||||
DirectMap2MBytes *uint64
|
||||
DirectMap1GBytes *uint64
|
||||
}
|
||||
|
||||
// Meminfo returns an information about current kernel/system memory statistics.
|
||||
|
@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
|
|||
|
||||
m, err := parseMemInfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
|
||||
return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
|
@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||
var m Meminfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Each line has at least a name and value; we ignore the unit.
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
var val, valBytes uint64
|
||||
|
||||
v, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
val, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// No unit present, use the parsed the value as bytes directly.
|
||||
valBytes = val
|
||||
case 3:
|
||||
// Unit present in optional 3rd field, convert it to
|
||||
// bytes. The only unit supported within the Linux
|
||||
// kernel is `kB`.
|
||||
if fields[2] != "kB" {
|
||||
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
|
||||
}
|
||||
|
||||
valBytes = 1024 * val
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "MemTotal:":
|
||||
m.MemTotal = &v
|
||||
m.MemTotal = &val
|
||||
m.MemTotalBytes = &valBytes
|
||||
case "MemFree:":
|
||||
m.MemFree = &v
|
||||
m.MemFree = &val
|
||||
m.MemFreeBytes = &valBytes
|
||||
case "MemAvailable:":
|
||||
m.MemAvailable = &v
|
||||
m.MemAvailable = &val
|
||||
m.MemAvailableBytes = &valBytes
|
||||
case "Buffers:":
|
||||
m.Buffers = &v
|
||||
m.Buffers = &val
|
||||
m.BuffersBytes = &valBytes
|
||||
case "Cached:":
|
||||
m.Cached = &v
|
||||
m.Cached = &val
|
||||
m.CachedBytes = &valBytes
|
||||
case "SwapCached:":
|
||||
m.SwapCached = &v
|
||||
m.SwapCached = &val
|
||||
m.SwapCachedBytes = &valBytes
|
||||
case "Active:":
|
||||
m.Active = &v
|
||||
m.Active = &val
|
||||
m.ActiveBytes = &valBytes
|
||||
case "Inactive:":
|
||||
m.Inactive = &v
|
||||
m.Inactive = &val
|
||||
m.InactiveBytes = &valBytes
|
||||
case "Active(anon):":
|
||||
m.ActiveAnon = &v
|
||||
m.ActiveAnon = &val
|
||||
m.ActiveAnonBytes = &valBytes
|
||||
case "Inactive(anon):":
|
||||
m.InactiveAnon = &v
|
||||
m.InactiveAnon = &val
|
||||
m.InactiveAnonBytes = &valBytes
|
||||
case "Active(file):":
|
||||
m.ActiveFile = &v
|
||||
m.ActiveFile = &val
|
||||
m.ActiveFileBytes = &valBytes
|
||||
case "Inactive(file):":
|
||||
m.InactiveFile = &v
|
||||
m.InactiveFile = &val
|
||||
m.InactiveFileBytes = &valBytes
|
||||
case "Unevictable:":
|
||||
m.Unevictable = &v
|
||||
m.Unevictable = &val
|
||||
m.UnevictableBytes = &valBytes
|
||||
case "Mlocked:":
|
||||
m.Mlocked = &v
|
||||
m.Mlocked = &val
|
||||
m.MlockedBytes = &valBytes
|
||||
case "SwapTotal:":
|
||||
m.SwapTotal = &v
|
||||
m.SwapTotal = &val
|
||||
m.SwapTotalBytes = &valBytes
|
||||
case "SwapFree:":
|
||||
m.SwapFree = &v
|
||||
m.SwapFree = &val
|
||||
m.SwapFreeBytes = &valBytes
|
||||
case "Dirty:":
|
||||
m.Dirty = &v
|
||||
m.Dirty = &val
|
||||
m.DirtyBytes = &valBytes
|
||||
case "Writeback:":
|
||||
m.Writeback = &v
|
||||
m.Writeback = &val
|
||||
m.WritebackBytes = &valBytes
|
||||
case "AnonPages:":
|
||||
m.AnonPages = &v
|
||||
m.AnonPages = &val
|
||||
m.AnonPagesBytes = &valBytes
|
||||
case "Mapped:":
|
||||
m.Mapped = &v
|
||||
m.Mapped = &val
|
||||
m.MappedBytes = &valBytes
|
||||
case "Shmem:":
|
||||
m.Shmem = &v
|
||||
m.Shmem = &val
|
||||
m.ShmemBytes = &valBytes
|
||||
case "Slab:":
|
||||
m.Slab = &v
|
||||
m.Slab = &val
|
||||
m.SlabBytes = &valBytes
|
||||
case "SReclaimable:":
|
||||
m.SReclaimable = &v
|
||||
m.SReclaimable = &val
|
||||
m.SReclaimableBytes = &valBytes
|
||||
case "SUnreclaim:":
|
||||
m.SUnreclaim = &v
|
||||
m.SUnreclaim = &val
|
||||
m.SUnreclaimBytes = &valBytes
|
||||
case "KernelStack:":
|
||||
m.KernelStack = &v
|
||||
m.KernelStack = &val
|
||||
m.KernelStackBytes = &valBytes
|
||||
case "PageTables:":
|
||||
m.PageTables = &v
|
||||
m.PageTables = &val
|
||||
m.PageTablesBytes = &valBytes
|
||||
case "NFS_Unstable:":
|
||||
m.NFSUnstable = &v
|
||||
m.NFSUnstable = &val
|
||||
m.NFSUnstableBytes = &valBytes
|
||||
case "Bounce:":
|
||||
m.Bounce = &v
|
||||
m.Bounce = &val
|
||||
m.BounceBytes = &valBytes
|
||||
case "WritebackTmp:":
|
||||
m.WritebackTmp = &v
|
||||
m.WritebackTmp = &val
|
||||
m.WritebackTmpBytes = &valBytes
|
||||
case "CommitLimit:":
|
||||
m.CommitLimit = &v
|
||||
m.CommitLimit = &val
|
||||
m.CommitLimitBytes = &valBytes
|
||||
case "Committed_AS:":
|
||||
m.CommittedAS = &v
|
||||
m.CommittedAS = &val
|
||||
m.CommittedASBytes = &valBytes
|
||||
case "VmallocTotal:":
|
||||
m.VmallocTotal = &v
|
||||
m.VmallocTotal = &val
|
||||
m.VmallocTotalBytes = &valBytes
|
||||
case "VmallocUsed:":
|
||||
m.VmallocUsed = &v
|
||||
m.VmallocUsed = &val
|
||||
m.VmallocUsedBytes = &valBytes
|
||||
case "VmallocChunk:":
|
||||
m.VmallocChunk = &v
|
||||
m.VmallocChunk = &val
|
||||
m.VmallocChunkBytes = &valBytes
|
||||
case "Percpu:":
|
||||
m.Percpu = &val
|
||||
m.PercpuBytes = &valBytes
|
||||
case "HardwareCorrupted:":
|
||||
m.HardwareCorrupted = &v
|
||||
m.HardwareCorrupted = &val
|
||||
m.HardwareCorruptedBytes = &valBytes
|
||||
case "AnonHugePages:":
|
||||
m.AnonHugePages = &v
|
||||
m.AnonHugePages = &val
|
||||
m.AnonHugePagesBytes = &valBytes
|
||||
case "ShmemHugePages:":
|
||||
m.ShmemHugePages = &v
|
||||
m.ShmemHugePages = &val
|
||||
m.ShmemHugePagesBytes = &valBytes
|
||||
case "ShmemPmdMapped:":
|
||||
m.ShmemPmdMapped = &v
|
||||
m.ShmemPmdMapped = &val
|
||||
m.ShmemPmdMappedBytes = &valBytes
|
||||
case "CmaTotal:":
|
||||
m.CmaTotal = &v
|
||||
m.CmaTotal = &val
|
||||
m.CmaTotalBytes = &valBytes
|
||||
case "CmaFree:":
|
||||
m.CmaFree = &v
|
||||
m.CmaFree = &val
|
||||
m.CmaFreeBytes = &valBytes
|
||||
case "HugePages_Total:":
|
||||
m.HugePagesTotal = &v
|
||||
m.HugePagesTotal = &val
|
||||
case "HugePages_Free:":
|
||||
m.HugePagesFree = &v
|
||||
m.HugePagesFree = &val
|
||||
case "HugePages_Rsvd:":
|
||||
m.HugePagesRsvd = &v
|
||||
m.HugePagesRsvd = &val
|
||||
case "HugePages_Surp:":
|
||||
m.HugePagesSurp = &v
|
||||
m.HugePagesSurp = &val
|
||||
case "Hugepagesize:":
|
||||
m.Hugepagesize = &v
|
||||
m.Hugepagesize = &val
|
||||
m.HugepagesizeBytes = &valBytes
|
||||
case "DirectMap4k:":
|
||||
m.DirectMap4k = &v
|
||||
m.DirectMap4k = &val
|
||||
m.DirectMap4kBytes = &valBytes
|
||||
case "DirectMap2M:":
|
||||
m.DirectMap2M = &v
|
||||
m.DirectMap2M = &val
|
||||
m.DirectMap2MBytes = &valBytes
|
||||
case "DirectMap1G:":
|
||||
m.DirectMap1G = &v
|
||||
m.DirectMap1G = &val
|
||||
m.DirectMap1GBytes = &valBytes
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
|
|||
if mountInfo[6] != "" {
|
||||
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||
}
|
||||
}
|
||||
return mount, nil
|
||||
|
|
|
@ -88,7 +88,7 @@ type MountStatsNFS struct {
|
|||
// Statistics broken down by filesystem operation.
|
||||
Operations []NFSOperationStats
|
||||
// Statistics about the NFS RPC transport.
|
||||
Transport NFSTransportStats
|
||||
Transport []NFSTransportStats
|
||||
}
|
||||
|
||||
// mountStats implements MountStats.
|
||||
|
@ -194,8 +194,6 @@ type NFSOperationStats struct {
|
|||
CumulativeTotalResponseMilliseconds uint64
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestMilliseconds uint64
|
||||
// The average time from the point the client sends RPC requests until it receives the response.
|
||||
AverageRTTMilliseconds float64
|
||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||
Errors uint64
|
||||
}
|
||||
|
@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
stats.Transport = *tstats
|
||||
stats.Transport = append(stats.Transport, *tstats)
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
|
@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||
CumulativeTotalResponseMilliseconds: ns[6],
|
||||
CumulativeTotalRequestMilliseconds: ns[7],
|
||||
}
|
||||
if ns[0] != 0 {
|
||||
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
|
||||
}
|
||||
|
||||
if len(ns) > 8 {
|
||||
opStats.Errors = ns[8]
|
||||
|
@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
||||
return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
||||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
|
|
|
@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
|||
|
||||
stat, err := parseConntrackStat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
|
@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||
entries, err := util.ParseHexUint64s(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
||||
}
|
||||
numEntries := len(entries)
|
||||
if numEntries < 16 || numEntries > 17 {
|
||||
|
|
|
@ -50,10 +50,13 @@ type (
|
|||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
Sl uint64
|
||||
|
@ -66,6 +69,7 @@ type (
|
|||
RxQueue uint64
|
||||
UID uint64
|
||||
Inode uint64
|
||||
Drops *uint64
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
|
|||
defer f.Close()
|
||||
|
||||
var netIPSocket NetIPSocket
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
|
|||
defer f.Close()
|
||||
|
||||
var netIPSocketSummary NetIPSocketSummary
|
||||
var udpPacketDrops uint64
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netIPSocketSummary.TxQueueLength += line.TxQueue
|
||||
netIPSocketSummary.RxQueueLength += line.RxQueue
|
||||
netIPSocketSummary.UsedSockets++
|
||||
if isUDP {
|
||||
udpPacketDrops += *line.Drops
|
||||
netIPSocketSummary.Drops = &udpPacketDrops
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
|
@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
|
|||
var byteIP []byte
|
||||
byteIP, err := hex.DecodeString(hexIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
||||
}
|
||||
switch len(byteIP) {
|
||||
case 4:
|
||||
|
@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
|
|||
}
|
||||
return i, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
|
||||
return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
|
||||
line := &netIPSocketLine{}
|
||||
if len(fields) < 10 {
|
||||
return nil, fmt.Errorf(
|
||||
|
@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
|
@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||
return nil, err
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
|
@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||
return nil, err
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
|
@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
||||
}
|
||||
|
||||
// inode
|
||||
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||
}
|
||||
|
||||
// drops
|
||||
if isUDP {
|
||||
drops, err := strconv.ParseUint(fields[12], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
|
||||
}
|
||||
line.Drops = &drops
|
||||
}
|
||||
|
||||
return line, nil
|
||||
|
|
|
@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
|
|||
|
||||
stat, err := parseSockstat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
|
||||
return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
|
@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
|
|||
// The remaining fields are key/value pairs.
|
||||
kvs, err := parseSockstatKVs(fields[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
||||
return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
||||
}
|
||||
|
||||
// The first field is the protocol. We must trim its colon suffix.
|
||||
|
|
|
@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
|
|||
|
||||
entries, err := parseSoftnet(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
// Copyright 2023 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TLSStat struct represents data in /proc/net/tls_stat.
|
||||
// See https://docs.kernel.org/networking/tls.html#statistics
|
||||
type TLSStat struct {
|
||||
// number of TX sessions currently installed where host handles cryptography
|
||||
TLSCurrTxSw int
|
||||
// number of RX sessions currently installed where host handles cryptography
|
||||
TLSCurrRxSw int
|
||||
// number of TX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrTxDevice int
|
||||
// number of RX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrRxDevice int
|
||||
//number of TX sessions opened with host cryptography
|
||||
TLSTxSw int
|
||||
//number of RX sessions opened with host cryptography
|
||||
TLSRxSw int
|
||||
// number of TX sessions opened with NIC cryptography
|
||||
TLSTxDevice int
|
||||
// number of RX sessions opened with NIC cryptography
|
||||
TLSRxDevice int
|
||||
// record decryption failed (e.g. due to incorrect authentication tag)
|
||||
TLSDecryptError int
|
||||
// number of RX resyncs sent to NICs handling cryptography
|
||||
TLSRxDeviceResync int
|
||||
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
|
||||
TLSDecryptRetry int
|
||||
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
|
||||
TLSRxNoPadViolation int
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func NewTLSStat() (TLSStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewTLSStat()
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func (fs FS) NewTLSStat() (TLSStat, error) {
|
||||
file, err := os.Open(fs.proc.Path("net/tls_stat"))
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
tlsstat = TLSStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "TlsCurrTxSw":
|
||||
tlsstat.TLSCurrTxSw = value
|
||||
case "TlsCurrRxSw":
|
||||
tlsstat.TLSCurrRxSw = value
|
||||
case "TlsCurrTxDevice":
|
||||
tlsstat.TLSCurrTxDevice = value
|
||||
case "TlsCurrRxDevice":
|
||||
tlsstat.TLSCurrRxDevice = value
|
||||
case "TlsTxSw":
|
||||
tlsstat.TLSTxSw = value
|
||||
case "TlsRxSw":
|
||||
tlsstat.TLSRxSw = value
|
||||
case "TlsTxDevice":
|
||||
tlsstat.TLSTxDevice = value
|
||||
case "TlsRxDevice":
|
||||
tlsstat.TLSRxDevice = value
|
||||
case "TlsDecryptError":
|
||||
tlsstat.TLSDecryptError = value
|
||||
case "TlsRxDeviceResync":
|
||||
tlsstat.TLSRxDeviceResync = value
|
||||
case "TlsDecryptRetry":
|
||||
tlsstat.TLSDecryptRetry = value
|
||||
case "TlsRxNoPadViolation":
|
||||
tlsstat.TLSRxNoPadViolation = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return tlsstat, s.Err()
|
||||
}
|
|
@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
|||
line := s.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
nu.Rows = append(nu.Rows, item)
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return &nu, nil
|
||||
|
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
|||
|
||||
users, err := u.parseUsers(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
|
||||
return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
|
||||
}
|
||||
|
||||
flags, err := u.parseFlags(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
||||
}
|
||||
|
||||
typ, err := u.parseType(fields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
||||
return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
||||
}
|
||||
|
||||
state, err := u.parseState(fields[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
||||
return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
||||
}
|
||||
|
||||
var inode uint64
|
||||
if hasInode {
|
||||
inode, err = u.parseInode(fields[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
||||
return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
|
|||
|
||||
m, err := parseWireless(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
|||
|
||||
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
||||
return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
||||
}
|
||||
|
||||
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
||||
return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
||||
}
|
||||
|
||||
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
||||
return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
||||
}
|
||||
|
||||
dnwid, err := strconv.Atoi(stats[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
||||
}
|
||||
|
||||
dcrypt, err := strconv.Atoi(stats[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
||||
}
|
||||
|
||||
dfrag, err := strconv.Atoi(stats[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
||||
}
|
||||
|
||||
dretry, err := strconv.Atoi(stats[7])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
||||
}
|
||||
|
||||
dmisc, err := strconv.Atoi(stats[8])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
||||
}
|
||||
|
||||
mbeacon, err := strconv.Atoi(stats[9])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
||||
return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
||||
}
|
||||
|
||||
w := &Wireless{
|
||||
|
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
|||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
||||
return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
||||
}
|
||||
|
||||
return interfaces, nil
|
||||
|
|
|
@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
}
|
||||
|
||||
p := Procs{}
|
||||
|
@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
|
|||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||
return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
|
||||
}
|
||||
|
||||
// Wchan returns the wchan (wait channel) of a process.
|
||||
|
@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
|
|||
for i, n := range names {
|
||||
fd, err := strconv.ParseInt(n, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
||||
}
|
||||
fds[i] = uintptr(fd)
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
|
|
|
@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
|
|||
}
|
||||
i, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
|
||||
return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
|||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
|
||||
return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
|
||||
}
|
||||
|
||||
ns := make(Namespaces, len(names))
|
||||
|
@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
|||
typ := fields[0]
|
||||
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
|
||||
return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
|
||||
}
|
||||
|
||||
ns[name] = Namespace{typ, uint32(inode)}
|
||||
|
|
|
@ -61,7 +61,7 @@ type PSIStats struct {
|
|||
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||
if err != nil {
|
||||
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||
return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||
}
|
||||
|
||||
return parsePSIStats(bytes.NewReader(data))
|
||||
|
|
|
@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
|
|||
}
|
||||
|
||||
v := strings.TrimSpace(kv[1])
|
||||
v = strings.TrimRight(v, " kB")
|
||||
v = strings.TrimSuffix(v, " kB")
|
||||
|
||||
vKBytes, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
@ -110,6 +110,11 @@ type ProcStat struct {
|
|||
Policy uint
|
||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||
DelayAcctBlkIOTicks uint64
|
||||
// Guest time of the process (time spent running a virtual CPU for a guest
|
||||
// operating system), measured in clock ticks.
|
||||
GuestTime int
|
||||
// Guest time of the process's children, measured in clock ticks.
|
||||
CGuestTime int
|
||||
|
||||
proc FS
|
||||
}
|
||||
|
@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||
&s.RTPriority,
|
||||
&s.Policy,
|
||||
&s.DelayAcctBlkIOTicks,
|
||||
&s.GuestTime,
|
||||
&s.CGuestTime,
|
||||
)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
|
|
|
@ -15,6 +15,7 @@ package procfs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"math/bits"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -76,9 +77,9 @@ type ProcStatus struct {
|
|||
NonVoluntaryCtxtSwitches uint64
|
||||
|
||||
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
|
||||
UIDs [4]string
|
||||
UIDs [4]uint64
|
||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||
GIDs [4]string
|
||||
GIDs [4]uint64
|
||||
|
||||
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
||||
CpusAllowedList []uint64
|
||||
|
@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
|
|||
// convert kB to B
|
||||
vBytes := vKBytes * 1024
|
||||
|
||||
s.fillStatus(k, v, vKBytes, vBytes)
|
||||
err = s.fillStatus(k, v, vKBytes, vBytes)
|
||||
if err != nil {
|
||||
return ProcStatus{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
|
||||
switch k {
|
||||
case "Tgid":
|
||||
s.TGID = int(vUint)
|
||||
case "Name":
|
||||
s.Name = vString
|
||||
case "Uid":
|
||||
copy(s.UIDs[:], strings.Split(vString, "\t"))
|
||||
var err error
|
||||
for i, v := range strings.Split(vString, "\t") {
|
||||
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "Gid":
|
||||
copy(s.GIDs[:], strings.Split(vString, "\t"))
|
||||
var err error
|
||||
for i, v := range strings.Split(vString, "\t") {
|
||||
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "NSpid":
|
||||
s.NSpids = calcNSPidsList(vString)
|
||||
case "VmPeak":
|
||||
|
@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
|||
s.CpusAllowedList = calcCpusAllowedList(vString)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TotalCtxtSwitches returns the total context switch.
|
||||
|
|
|
@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
|
|||
vp := util.NewValueParser(f)
|
||||
values[i] = vp.Int()
|
||||
if err := vp.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
||||
return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
|
|
|
@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
|
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
|
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
|
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
|
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
|
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
|
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
|
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
|
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
|
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return softirqs, scanner.Err()
|
||||
|
|
|
@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
|||
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
||||
}
|
||||
if count == 0 {
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
|
||||
|
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
|||
|
||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||
if err != nil {
|
||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
return cpuStat, cpuID, nil
|
||||
|
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
|||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||
|
||||
if err != nil {
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
return softIRQStat, total, nil
|
||||
|
@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
|||
switch {
|
||||
case parts[0] == "btime":
|
||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "intr":
|
||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
numberedIRQs := parts[2:]
|
||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||
for i, count := range numberedIRQs {
|
||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "ctxt":
|
||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "processes":
|
||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_running":
|
||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_blocked":
|
||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "softirq":
|
||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||
|
@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
|||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
|
|
|
@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
|
|||
|
||||
swap.Size, err = strconv.Atoi(swapFields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
||||
}
|
||||
swap.Used, err = strconv.Atoi(swapFields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
||||
}
|
||||
swap.Priority, err = strconv.Atoi(swapFields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
||||
}
|
||||
|
||||
return swap, nil
|
||||
|
|
|
@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
|||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
|
||||
return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
|
||||
}
|
||||
|
||||
t := Procs{}
|
||||
|
|
|
@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
|||
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
||||
return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
||||
}
|
||||
zoneinfo, err := parseZoneinfo(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
||||
return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
||||
}
|
||||
return zoneinfo, nil
|
||||
}
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
#define CONSTBASE R16
|
||||
#define BLOCKS R17
|
||||
|
||||
// for VPERMXOR
|
||||
#define MASK R18
|
||||
|
||||
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
|
||||
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
|
||||
DATA consts<>+0x10(SB)/8, $0x0000000000000001
|
||||
|
@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
|
|||
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
|
||||
DATA consts<>+0x90(SB)/8, $0x0000000100000000
|
||||
DATA consts<>+0x98(SB)/8, $0x0000000300000002
|
||||
GLOBL consts<>(SB), RODATA, $0xa0
|
||||
DATA consts<>+0xa0(SB)/8, $0x5566774411223300
|
||||
DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88
|
||||
DATA consts<>+0xb0(SB)/8, $0x6677445522330011
|
||||
DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899
|
||||
GLOBL consts<>(SB), RODATA, $0xc0
|
||||
|
||||
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
|
@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
|||
MOVD $48, R10
|
||||
MOVD $64, R11
|
||||
SRD $6, LEN, BLOCKS
|
||||
// for VPERMXOR
|
||||
MOVD $consts<>+0xa0(SB), MASK
|
||||
MOVD $16, R20
|
||||
// V16
|
||||
LXVW4X (CONSTBASE)(R0), VS48
|
||||
ADD $80,CONSTBASE
|
||||
|
@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
|||
// V28
|
||||
LXVW4X (CONSTBASE)(R11), VS60
|
||||
|
||||
// Load mask constants for VPERMXOR
|
||||
LXVW4X (MASK)(R0), V20
|
||||
LXVW4X (MASK)(R20), V21
|
||||
|
||||
// splat slot from V19 -> V26
|
||||
VSPLTW $0, V19, V26
|
||||
|
||||
|
@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
|||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
|
||||
PCALIGN $16
|
||||
loop_outer_vsx:
|
||||
// V0, V1, V2, V3
|
||||
LXVW4X (R0)(CONSTBASE), VS32
|
||||
|
@ -128,22 +142,17 @@ loop_outer_vsx:
|
|||
VSPLTISW $12, V28
|
||||
VSPLTISW $8, V29
|
||||
VSPLTISW $7, V30
|
||||
|
||||
PCALIGN $16
|
||||
loop_vsx:
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
VRLW V15, V27, V15
|
||||
VPERMXOR V12, V0, V21, V12
|
||||
VPERMXOR V13, V1, V21, V13
|
||||
VPERMXOR V14, V2, V21, V14
|
||||
VPERMXOR V15, V3, V21, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
|
@ -165,15 +174,10 @@ loop_vsx:
|
|||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
VRLW V15, V29, V15
|
||||
VPERMXOR V12, V0, V20, V12
|
||||
VPERMXOR V13, V1, V20, V13
|
||||
VPERMXOR V14, V2, V20, V14
|
||||
VPERMXOR V15, V3, V20, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
|
@ -195,15 +199,10 @@ loop_vsx:
|
|||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V27, V15
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
VPERMXOR V15, V0, V21, V15
|
||||
VPERMXOR V12, V1, V21, V12
|
||||
VPERMXOR V13, V2, V21, V13
|
||||
VPERMXOR V14, V3, V21, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
|
@ -225,15 +224,10 @@ loop_vsx:
|
|||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V29, V15
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
VPERMXOR V15, V0, V20, V15
|
||||
VPERMXOR V12, V1, V20, V12
|
||||
VPERMXOR V13, V2, V20, V13
|
||||
VPERMXOR V14, V3, V20, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
|
@ -249,48 +243,48 @@ loop_vsx:
|
|||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
VRLW V4, V30, V4
|
||||
BC 16, LT, loop_vsx
|
||||
BDNZ loop_vsx
|
||||
|
||||
VADDUWM V12, V26, V12
|
||||
|
||||
WORD $0x13600F8C // VMRGEW V0, V1, V27
|
||||
WORD $0x13821F8C // VMRGEW V2, V3, V28
|
||||
VMRGEW V0, V1, V27
|
||||
VMRGEW V2, V3, V28
|
||||
|
||||
WORD $0x10000E8C // VMRGOW V0, V1, V0
|
||||
WORD $0x10421E8C // VMRGOW V2, V3, V2
|
||||
VMRGOW V0, V1, V0
|
||||
VMRGOW V2, V3, V2
|
||||
|
||||
WORD $0x13A42F8C // VMRGEW V4, V5, V29
|
||||
WORD $0x13C63F8C // VMRGEW V6, V7, V30
|
||||
VMRGEW V4, V5, V29
|
||||
VMRGEW V6, V7, V30
|
||||
|
||||
XXPERMDI VS32, VS34, $0, VS33
|
||||
XXPERMDI VS32, VS34, $3, VS35
|
||||
XXPERMDI VS59, VS60, $0, VS32
|
||||
XXPERMDI VS59, VS60, $3, VS34
|
||||
|
||||
WORD $0x10842E8C // VMRGOW V4, V5, V4
|
||||
WORD $0x10C63E8C // VMRGOW V6, V7, V6
|
||||
VMRGOW V4, V5, V4
|
||||
VMRGOW V6, V7, V6
|
||||
|
||||
WORD $0x13684F8C // VMRGEW V8, V9, V27
|
||||
WORD $0x138A5F8C // VMRGEW V10, V11, V28
|
||||
VMRGEW V8, V9, V27
|
||||
VMRGEW V10, V11, V28
|
||||
|
||||
XXPERMDI VS36, VS38, $0, VS37
|
||||
XXPERMDI VS36, VS38, $3, VS39
|
||||
XXPERMDI VS61, VS62, $0, VS36
|
||||
XXPERMDI VS61, VS62, $3, VS38
|
||||
|
||||
WORD $0x11084E8C // VMRGOW V8, V9, V8
|
||||
WORD $0x114A5E8C // VMRGOW V10, V11, V10
|
||||
VMRGOW V8, V9, V8
|
||||
VMRGOW V10, V11, V10
|
||||
|
||||
WORD $0x13AC6F8C // VMRGEW V12, V13, V29
|
||||
WORD $0x13CE7F8C // VMRGEW V14, V15, V30
|
||||
VMRGEW V12, V13, V29
|
||||
VMRGEW V14, V15, V30
|
||||
|
||||
XXPERMDI VS40, VS42, $0, VS41
|
||||
XXPERMDI VS40, VS42, $3, VS43
|
||||
XXPERMDI VS59, VS60, $0, VS40
|
||||
XXPERMDI VS59, VS60, $3, VS42
|
||||
|
||||
WORD $0x118C6E8C // VMRGOW V12, V13, V12
|
||||
WORD $0x11CE7E8C // VMRGOW V14, V15, V14
|
||||
VMRGOW V12, V13, V12
|
||||
VMRGOW V14, V15, V14
|
||||
|
||||
VSPLTISW $4, V27
|
||||
VADDUWM V26, V27, V26
|
||||
|
@ -431,7 +425,7 @@ tail_vsx:
|
|||
ADD $-1, R11, R12
|
||||
ADD $-1, INP
|
||||
ADD $-1, OUT
|
||||
|
||||
PCALIGN $16
|
||||
looptail_vsx:
|
||||
// Copying the result to OUT
|
||||
// in bytes.
|
||||
|
@ -439,7 +433,7 @@ looptail_vsx:
|
|||
MOVBZU 1(INP), TMP
|
||||
XOR KEY, TMP, KEY
|
||||
MOVBU KEY, 1(OUT)
|
||||
BC 16, LT, looptail_vsx
|
||||
BDNZ looptail_vsx
|
||||
|
||||
// Clear the stack values
|
||||
STXVW4X VS48, (R11)(R0)
|
||||
|
|
|
@ -404,10 +404,10 @@ func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, e
|
|||
return false, err
|
||||
}
|
||||
|
||||
return confirmKeyAck(key, algo, c)
|
||||
return confirmKeyAck(key, c)
|
||||
}
|
||||
|
||||
func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
|
||||
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
|
||||
for {
|
||||
|
@ -425,7 +425,15 @@ func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
|
|||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) {
|
||||
// According to RFC 4252 Section 7 the algorithm in
|
||||
// SSH_MSG_USERAUTH_PK_OK should match that of the request but some
|
||||
// servers send the key type instead. OpenSSH allows any algorithm
|
||||
// that matches the public key, so we do the same.
|
||||
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
|
||||
if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
|
||||
return false, nil
|
||||
}
|
||||
if !bytes.Equal(msg.PubKey, pubKey) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
|
|
@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string {
|
|||
return "[" + strings.Join(errs, ", ") + "]"
|
||||
}
|
||||
|
||||
// ServerAuthCallbacks defines server-side authentication callbacks.
|
||||
type ServerAuthCallbacks struct {
|
||||
// PasswordCallback behaves like [ServerConfig.PasswordCallback].
|
||||
PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
|
||||
|
||||
// PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback].
|
||||
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
|
||||
|
||||
// KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback].
|
||||
KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
|
||||
|
||||
// GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig].
|
||||
GSSAPIWithMICConfig *GSSAPIWithMICConfig
|
||||
}
|
||||
|
||||
// PartialSuccessError can be returned by any of the [ServerConfig]
|
||||
// authentication callbacks to indicate to the client that authentication has
|
||||
// partially succeeded, but further steps are required.
|
||||
type PartialSuccessError struct {
|
||||
// Next defines the authentication callbacks to apply to further steps. The
|
||||
// available methods communicated to the client are based on the non-nil
|
||||
// ServerAuthCallbacks fields.
|
||||
Next ServerAuthCallbacks
|
||||
}
|
||||
|
||||
func (p *PartialSuccessError) Error() string {
|
||||
return "ssh: authenticated with partial success"
|
||||
}
|
||||
|
||||
// ErrNoAuth is the error value returned if no
|
||||
// authentication method has been passed yet. This happens as a normal
|
||||
// part of the authentication loop, since the client first tries
|
||||
|
@ -439,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
|
|||
var perms *Permissions
|
||||
|
||||
authFailures := 0
|
||||
noneAuthCount := 0
|
||||
var authErrs []error
|
||||
var displayedBanner bool
|
||||
partialSuccessReturned := false
|
||||
// Set the initial authentication callbacks from the config. They can be
|
||||
// changed if a PartialSuccessError is returned.
|
||||
authConfig := ServerAuthCallbacks{
|
||||
PasswordCallback: config.PasswordCallback,
|
||||
PublicKeyCallback: config.PublicKeyCallback,
|
||||
KeyboardInteractiveCallback: config.KeyboardInteractiveCallback,
|
||||
GSSAPIWithMICConfig: config.GSSAPIWithMICConfig,
|
||||
}
|
||||
|
||||
userAuthLoop:
|
||||
for {
|
||||
|
@ -471,6 +510,11 @@ userAuthLoop:
|
|||
return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
|
||||
}
|
||||
|
||||
if s.user != userAuthReq.User && partialSuccessReturned {
|
||||
return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q",
|
||||
s.user, userAuthReq.User)
|
||||
}
|
||||
|
||||
s.user = userAuthReq.User
|
||||
|
||||
if !displayedBanner && config.BannerCallback != nil {
|
||||
|
@ -491,20 +535,18 @@ userAuthLoop:
|
|||
|
||||
switch userAuthReq.Method {
|
||||
case "none":
|
||||
if config.NoClientAuth {
|
||||
noneAuthCount++
|
||||
// We don't allow none authentication after a partial success
|
||||
// response.
|
||||
if config.NoClientAuth && !partialSuccessReturned {
|
||||
if config.NoClientAuthCallback != nil {
|
||||
perms, authErr = config.NoClientAuthCallback(s)
|
||||
} else {
|
||||
authErr = nil
|
||||
}
|
||||
}
|
||||
|
||||
// allow initial attempt of 'none' without penalty
|
||||
if authFailures == 0 {
|
||||
authFailures--
|
||||
}
|
||||
case "password":
|
||||
if config.PasswordCallback == nil {
|
||||
if authConfig.PasswordCallback == nil {
|
||||
authErr = errors.New("ssh: password auth not configured")
|
||||
break
|
||||
}
|
||||
|
@ -518,17 +560,17 @@ userAuthLoop:
|
|||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
perms, authErr = config.PasswordCallback(s, password)
|
||||
perms, authErr = authConfig.PasswordCallback(s, password)
|
||||
case "keyboard-interactive":
|
||||
if config.KeyboardInteractiveCallback == nil {
|
||||
if authConfig.KeyboardInteractiveCallback == nil {
|
||||
authErr = errors.New("ssh: keyboard-interactive auth not configured")
|
||||
break
|
||||
}
|
||||
|
||||
prompter := &sshClientKeyboardInteractive{s}
|
||||
perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
|
||||
perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge)
|
||||
case "publickey":
|
||||
if config.PublicKeyCallback == nil {
|
||||
if authConfig.PublicKeyCallback == nil {
|
||||
authErr = errors.New("ssh: publickey auth not configured")
|
||||
break
|
||||
}
|
||||
|
@ -562,11 +604,18 @@ userAuthLoop:
|
|||
if !ok {
|
||||
candidate.user = s.user
|
||||
candidate.pubKeyData = pubKeyData
|
||||
candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
|
||||
if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
|
||||
candidate.result = checkSourceAddress(
|
||||
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
|
||||
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
|
||||
|
||||
if (candidate.result == nil || isPartialSuccessError) &&
|
||||
candidate.perms != nil &&
|
||||
candidate.perms.CriticalOptions != nil &&
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
|
||||
if err := checkSourceAddress(
|
||||
s.RemoteAddr(),
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption])
|
||||
candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil {
|
||||
candidate.result = err
|
||||
}
|
||||
}
|
||||
cache.add(candidate)
|
||||
}
|
||||
|
@ -578,8 +627,8 @@ userAuthLoop:
|
|||
if len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
if candidate.result == nil {
|
||||
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
|
||||
if candidate.result == nil || isPartialSuccessError {
|
||||
okMsg := userAuthPubKeyOkMsg{
|
||||
Algo: algo,
|
||||
PubKey: pubKeyData,
|
||||
|
@ -629,11 +678,11 @@ userAuthLoop:
|
|||
perms = candidate.perms
|
||||
}
|
||||
case "gssapi-with-mic":
|
||||
if config.GSSAPIWithMICConfig == nil {
|
||||
if authConfig.GSSAPIWithMICConfig == nil {
|
||||
authErr = errors.New("ssh: gssapi-with-mic auth not configured")
|
||||
break
|
||||
}
|
||||
gssapiConfig := config.GSSAPIWithMICConfig
|
||||
gssapiConfig := authConfig.GSSAPIWithMICConfig
|
||||
userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload)
|
||||
if err != nil {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
|
@ -689,49 +738,70 @@ userAuthLoop:
|
|||
break userAuthLoop
|
||||
}
|
||||
|
||||
authFailures++
|
||||
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
|
||||
// If we have hit the max attempts, don't bother sending the
|
||||
// final SSH_MSG_USERAUTH_FAILURE message, since there are
|
||||
// no more authentication methods which can be attempted,
|
||||
// and this message may cause the client to re-attempt
|
||||
// authentication while we send the disconnect message.
|
||||
// Continue, and trigger the disconnect at the start of
|
||||
// the loop.
|
||||
//
|
||||
// The SSH specification is somewhat confusing about this,
|
||||
// RFC 4252 Section 5.1 requires each authentication failure
|
||||
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
|
||||
// message, but Section 4 says the server should disconnect
|
||||
// after some number of attempts, but it isn't explicit which
|
||||
// message should take precedence (i.e. should there be a failure
|
||||
// message than a disconnect message, or if we are going to
|
||||
// disconnect, should we only send that message.)
|
||||
//
|
||||
// Either way, OpenSSH disconnects immediately after the last
|
||||
// failed authnetication attempt, and given they are typically
|
||||
// considered the golden implementation it seems reasonable
|
||||
// to match that behavior.
|
||||
continue
|
||||
var failureMsg userAuthFailureMsg
|
||||
|
||||
if partialSuccess, ok := authErr.(*PartialSuccessError); ok {
|
||||
// After a partial success error we don't allow changing the user
|
||||
// name and execute the NoClientAuthCallback.
|
||||
partialSuccessReturned = true
|
||||
|
||||
// In case a partial success is returned, the server may send
|
||||
// a new set of authentication methods.
|
||||
authConfig = partialSuccess.Next
|
||||
|
||||
// Reset pubkey cache, as the new PublicKeyCallback might
|
||||
// accept a different set of public keys.
|
||||
cache = pubKeyCache{}
|
||||
|
||||
// Send back a partial success message to the user.
|
||||
failureMsg.PartialSuccess = true
|
||||
} else {
|
||||
// Allow initial attempt of 'none' without penalty.
|
||||
if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 {
|
||||
authFailures++
|
||||
}
|
||||
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
|
||||
// If we have hit the max attempts, don't bother sending the
|
||||
// final SSH_MSG_USERAUTH_FAILURE message, since there are
|
||||
// no more authentication methods which can be attempted,
|
||||
// and this message may cause the client to re-attempt
|
||||
// authentication while we send the disconnect message.
|
||||
// Continue, and trigger the disconnect at the start of
|
||||
// the loop.
|
||||
//
|
||||
// The SSH specification is somewhat confusing about this,
|
||||
// RFC 4252 Section 5.1 requires each authentication failure
|
||||
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
|
||||
// message, but Section 4 says the server should disconnect
|
||||
// after some number of attempts, but it isn't explicit which
|
||||
// message should take precedence (i.e. should there be a failure
|
||||
// message than a disconnect message, or if we are going to
|
||||
// disconnect, should we only send that message.)
|
||||
//
|
||||
// Either way, OpenSSH disconnects immediately after the last
|
||||
// failed authentication attempt, and given they are typically
|
||||
// considered the golden implementation it seems reasonable
|
||||
// to match that behavior.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var failureMsg userAuthFailureMsg
|
||||
if config.PasswordCallback != nil {
|
||||
if authConfig.PasswordCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "password")
|
||||
}
|
||||
if config.PublicKeyCallback != nil {
|
||||
if authConfig.PublicKeyCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "publickey")
|
||||
}
|
||||
if config.KeyboardInteractiveCallback != nil {
|
||||
if authConfig.KeyboardInteractiveCallback != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
|
||||
}
|
||||
if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil &&
|
||||
config.GSSAPIWithMICConfig.AllowLogin != nil {
|
||||
if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil &&
|
||||
authConfig.GSSAPIWithMICConfig.AllowLogin != nil {
|
||||
failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic")
|
||||
}
|
||||
|
||||
if len(failureMsg.Methods) == 0 {
|
||||
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
|
||||
return nil, errors.New("ssh: no authentication methods available")
|
||||
}
|
||||
|
||||
if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
|
||||
|
|
|
@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML
|
|||
parsing specification respectively. While the tokenizer parses and normalizes
|
||||
individual HTML tokens, only the parser constructs the DOM tree from the
|
||||
tokenized HTML, as described in the tree construction stage of the
|
||||
specification, dynamically modifying or extending the docuemnt's DOM tree.
|
||||
specification, dynamically modifying or extending the document's DOM tree.
|
||||
|
||||
If your use case requires semantically well-formed HTML documents, as defined by
|
||||
the WHATWG specification, the parser should be used rather than the tokenizer.
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
var isTokenTable = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
|
@ -93,12 +93,7 @@ var isTokenTable = [127]bool{
|
|||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !IsTokenRune(r)
|
||||
return r < utf8.RuneSelf && isTokenTable[byte(r)]
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
|
@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool {
|
|||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !IsTokenRune(r) {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if !isTokenTable[v[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool {
|
|||
// returned error is ErrFrameTooLarge. Other errors may be of type
|
||||
// ConnectionError, StreamError, or anything else from the underlying
|
||||
// reader.
|
||||
//
|
||||
// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
|
||||
// indicates the stream responsible for the error.
|
||||
func (fr *Framer) ReadFrame() (Frame, error) {
|
||||
fr.errDetail = nil
|
||||
if fr.lastFrame != nil {
|
||||
|
@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int {
|
|||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
// merge them into the provided hf and returns a MetaHeadersFrame
|
||||
// with the decoded hpack values.
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) {
|
||||
if fr.AllowIllegalReads {
|
||||
return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
|
||||
}
|
||||
|
@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
// Also close the connection after any CONTINUATION frame following an
|
||||
|
@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||
}
|
||||
// It would be nice to send a RST_STREAM before sending the GOAWAY,
|
||||
// but the structure of the server's frame writer makes this difficult.
|
||||
return nil, ConnectionError(ErrCodeProtocol)
|
||||
return mh, ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
|
||||
if _, err := hdec.Write(frag); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
|
||||
if hc.HeadersEnded() {
|
||||
|
@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
|||
mh.HeadersFrame.invalidate()
|
||||
|
||||
if err := hdec.Close(); err != nil {
|
||||
return nil, ConnectionError(ErrCodeCompression)
|
||||
return mh, ConnectionError(ErrCodeCompression)
|
||||
}
|
||||
if invalid != nil {
|
||||
fr.errDetail = invalid
|
||||
|
|
|
@ -732,11 +732,7 @@ func isClosedConnError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// TODO: remove this string search and be more like the Windows
|
||||
// case below. That might involve modifying the standard library
|
||||
// to return better error types.
|
||||
str := err.Error()
|
||||
if strings.Contains(str, "use of closed network connection") {
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -1482,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
|
|||
sc.goAway(ErrCodeFlowControl)
|
||||
return true
|
||||
case ConnectionError:
|
||||
if res.f != nil {
|
||||
if id := res.f.Header().StreamID; id > sc.maxClientStreamID {
|
||||
sc.maxClientStreamID = id
|
||||
}
|
||||
}
|
||||
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
|
||||
sc.goAway(ErrCode(ev))
|
||||
return true // goAway will handle shutdown
|
||||
|
|
|
@ -936,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
|||
}
|
||||
last := f.LastStreamID
|
||||
for streamID, cs := range cc.streams {
|
||||
if streamID > last {
|
||||
if streamID <= last {
|
||||
// The server's GOAWAY indicates that it received this stream.
|
||||
// It will either finish processing it, or close the connection
|
||||
// without doing so. Either way, leave the stream alone for now.
|
||||
continue
|
||||
}
|
||||
if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo {
|
||||
// Don't retry the first stream on a connection if we get a non-NO error.
|
||||
// If the server is sending an error on a new connection,
|
||||
// retrying the request on a new one probably isn't going to work.
|
||||
cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode))
|
||||
} else {
|
||||
// Aborting the stream with errClentConnGotGoAway indicates that
|
||||
// the request should be retried on a new connection.
|
||||
cs.abortStreamLocked(errClientConnGotGoAway)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,11 +35,25 @@ type Weighted struct {
|
|||
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||
// ctx.Err() and leaves the semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
||||
done := ctx.Done()
|
||||
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-done:
|
||||
// ctx becoming done has "happened before" acquiring the semaphore,
|
||||
// whether it became done before the call began or while we were
|
||||
// waiting for the mutex. We prefer to fail even if we could acquire
|
||||
// the mutex without blocking.
|
||||
s.mu.Unlock()
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
if s.size-s.cur >= n && s.waiters.Len() == 0 {
|
||||
// Since we hold s.mu and haven't synchronized since checking done, if
|
||||
// ctx becomes done before we return here, it becoming done must have
|
||||
// "happened concurrently" with this call - it cannot "happen before"
|
||||
// we return in this branch. So, we're ok to always acquire here.
|
||||
s.cur += n
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
|
@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
|||
if n > s.size {
|
||||
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||
s.mu.Unlock()
|
||||
<-ctx.Done()
|
||||
<-done
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
|
@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
|||
s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
case <-done:
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-ready:
|
||||
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||
err = nil
|
||||
// Acquired the semaphore after we were canceled.
|
||||
// Pretend we didn't and put the tokens back.
|
||||
s.cur -= n
|
||||
s.notifyWaiters()
|
||||
default:
|
||||
isFront := s.waiters.Front() == elem
|
||||
s.waiters.Remove(elem)
|
||||
|
@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
|||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
return ctx.Err()
|
||||
|
||||
case <-ready:
|
||||
// Acquired the semaphore. Check that ctx isn't already done.
|
||||
// We check the done channel instead of calling ctx.Err because we
|
||||
// already have the channel, and ctx.Err is O(n) with the nesting
|
||||
// depth of ctx.
|
||||
select {
|
||||
case <-done:
|
||||
s.Release(n)
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -263,6 +263,7 @@ struct ltchars {
|
|||
#include <linux/sched.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/sock_diag.h>
|
||||
#include <linux/sockios.h>
|
||||
#include <linux/taskstats.h>
|
||||
#include <linux/tipc.h>
|
||||
|
@ -549,6 +550,7 @@ ccflags="$@"
|
|||
$2 !~ "NLA_TYPE_MASK" &&
|
||||
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
|
||||
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
|
||||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
|
||||
$2 ~ /^FIORDCHK$/ ||
|
||||
$2 ~ /^SIOC/ ||
|
||||
$2 ~ /^TIOC/ ||
|
||||
|
|
|
@ -502,6 +502,7 @@ const (
|
|||
BPF_IMM = 0x0
|
||||
BPF_IND = 0x40
|
||||
BPF_JA = 0x0
|
||||
BPF_JCOND = 0xe0
|
||||
BPF_JEQ = 0x10
|
||||
BPF_JGE = 0x30
|
||||
BPF_JGT = 0x20
|
||||
|
@ -657,6 +658,9 @@ const (
|
|||
CAN_NPROTO = 0x8
|
||||
CAN_RAW = 0x1
|
||||
CAN_RAW_FILTER_MAX = 0x200
|
||||
CAN_RAW_XL_VCID_RX_FILTER = 0x4
|
||||
CAN_RAW_XL_VCID_TX_PASS = 0x2
|
||||
CAN_RAW_XL_VCID_TX_SET = 0x1
|
||||
CAN_RTR_FLAG = 0x40000000
|
||||
CAN_SFF_ID_BITS = 0xb
|
||||
CAN_SFF_MASK = 0x7ff
|
||||
|
@ -1339,6 +1343,7 @@ const (
|
|||
F_OFD_SETLK = 0x25
|
||||
F_OFD_SETLKW = 0x26
|
||||
F_OK = 0x0
|
||||
F_SEAL_EXEC = 0x20
|
||||
F_SEAL_FUTURE_WRITE = 0x10
|
||||
F_SEAL_GROW = 0x4
|
||||
F_SEAL_SEAL = 0x1
|
||||
|
@ -1627,6 +1632,7 @@ const (
|
|||
IP_FREEBIND = 0xf
|
||||
IP_HDRINCL = 0x3
|
||||
IP_IPSEC_POLICY = 0x10
|
||||
IP_LOCAL_PORT_RANGE = 0x33
|
||||
IP_MAXPACKET = 0xffff
|
||||
IP_MAX_MEMBERSHIPS = 0x14
|
||||
IP_MF = 0x2000
|
||||
|
@ -1653,6 +1659,7 @@ const (
|
|||
IP_PMTUDISC_OMIT = 0x5
|
||||
IP_PMTUDISC_PROBE = 0x3
|
||||
IP_PMTUDISC_WANT = 0x1
|
||||
IP_PROTOCOL = 0x34
|
||||
IP_RECVERR = 0xb
|
||||
IP_RECVERR_RFC4884 = 0x1a
|
||||
IP_RECVFRAGSIZE = 0x19
|
||||
|
@ -2169,7 +2176,7 @@ const (
|
|||
NFT_SECMARK_CTX_MAXLEN = 0x100
|
||||
NFT_SET_MAXNAMELEN = 0x100
|
||||
NFT_SOCKET_MAX = 0x3
|
||||
NFT_TABLE_F_MASK = 0x3
|
||||
NFT_TABLE_F_MASK = 0x7
|
||||
NFT_TABLE_MAXNAMELEN = 0x100
|
||||
NFT_TRACETYPE_MAX = 0x3
|
||||
NFT_TUNNEL_F_MASK = 0x7
|
||||
|
@ -2403,6 +2410,7 @@ const (
|
|||
PERF_RECORD_MISC_USER = 0x2
|
||||
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
|
||||
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
|
||||
PID_FS_MAGIC = 0x50494446
|
||||
PIPEFS_MAGIC = 0x50495045
|
||||
PPPIOCGNPMODE = 0xc008744c
|
||||
PPPIOCNEWUNIT = 0xc004743e
|
||||
|
@ -2896,8 +2904,9 @@ const (
|
|||
RWF_APPEND = 0x10
|
||||
RWF_DSYNC = 0x2
|
||||
RWF_HIPRI = 0x1
|
||||
RWF_NOAPPEND = 0x20
|
||||
RWF_NOWAIT = 0x8
|
||||
RWF_SUPPORTED = 0x1f
|
||||
RWF_SUPPORTED = 0x3f
|
||||
RWF_SYNC = 0x4
|
||||
RWF_WRITE_LIFE_NOT_SET = 0x0
|
||||
SCHED_BATCH = 0x3
|
||||
|
@ -2918,7 +2927,9 @@ const (
|
|||
SCHED_RESET_ON_FORK = 0x40000000
|
||||
SCHED_RR = 0x2
|
||||
SCM_CREDENTIALS = 0x2
|
||||
SCM_PIDFD = 0x4
|
||||
SCM_RIGHTS = 0x1
|
||||
SCM_SECURITY = 0x3
|
||||
SCM_TIMESTAMP = 0x1d
|
||||
SC_LOG_FLUSH = 0x100000
|
||||
SECCOMP_ADDFD_FLAG_SEND = 0x2
|
||||
|
@ -3051,6 +3062,8 @@ const (
|
|||
SIOCSMIIREG = 0x8949
|
||||
SIOCSRARP = 0x8962
|
||||
SIOCWANDEV = 0x894a
|
||||
SK_DIAG_BPF_STORAGE_MAX = 0x3
|
||||
SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1
|
||||
SMACK_MAGIC = 0x43415d53
|
||||
SMART_AUTOSAVE = 0xd2
|
||||
SMART_AUTO_OFFLINE = 0xdb
|
||||
|
@ -3071,6 +3084,8 @@ const (
|
|||
SOCKFS_MAGIC = 0x534f434b
|
||||
SOCK_BUF_LOCK_MASK = 0x3
|
||||
SOCK_DCCP = 0x6
|
||||
SOCK_DESTROY = 0x15
|
||||
SOCK_DIAG_BY_FAMILY = 0x14
|
||||
SOCK_IOC_TYPE = 0x89
|
||||
SOCK_PACKET = 0xa
|
||||
SOCK_RAW = 0x3
|
||||
|
@ -3260,6 +3275,7 @@ const (
|
|||
TCP_MAX_WINSHIFT = 0xe
|
||||
TCP_MD5SIG = 0xe
|
||||
TCP_MD5SIG_EXT = 0x20
|
||||
TCP_MD5SIG_FLAG_IFINDEX = 0x2
|
||||
TCP_MD5SIG_FLAG_PREFIX = 0x1
|
||||
TCP_MD5SIG_MAXKEYLEN = 0x50
|
||||
TCP_MSS = 0x200
|
||||
|
|
|
@ -118,6 +118,7 @@ const (
|
|||
IXOFF = 0x1000
|
||||
IXON = 0x400
|
||||
MAP_32BIT = 0x40
|
||||
MAP_ABOVE4G = 0x80
|
||||
MAP_ANON = 0x20
|
||||
MAP_ANONYMOUS = 0x20
|
||||
MAP_DENYWRITE = 0x800
|
||||
|
|
|
@ -118,6 +118,7 @@ const (
|
|||
IXOFF = 0x1000
|
||||
IXON = 0x400
|
||||
MAP_32BIT = 0x40
|
||||
MAP_ABOVE4G = 0x80
|
||||
MAP_ANON = 0x20
|
||||
MAP_ANONYMOUS = 0x20
|
||||
MAP_DENYWRITE = 0x800
|
||||
|
|
|
@ -87,6 +87,7 @@ const (
|
|||
FICLONE = 0x40049409
|
||||
FICLONERANGE = 0x4020940d
|
||||
FLUSHO = 0x1000
|
||||
FPMR_MAGIC = 0x46504d52
|
||||
FPSIMD_MAGIC = 0x46508001
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
|
|
|
@ -4605,7 +4605,7 @@ const (
|
|||
NL80211_ATTR_MAC_HINT = 0xc8
|
||||
NL80211_ATTR_MAC_MASK = 0xd7
|
||||
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
|
||||
NL80211_ATTR_MAX = 0x149
|
||||
NL80211_ATTR_MAX = 0x14a
|
||||
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
|
||||
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
|
||||
NL80211_ATTR_MAX_MATCH_SETS = 0x85
|
||||
|
@ -5209,7 +5209,7 @@ const (
|
|||
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
|
||||
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
|
||||
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
|
||||
NL80211_FREQUENCY_ATTR_MAX = 0x1f
|
||||
NL80211_FREQUENCY_ATTR_MAX = 0x20
|
||||
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
|
||||
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
|
||||
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
|
||||
|
@ -5703,7 +5703,7 @@ const (
|
|||
NL80211_STA_FLAG_ASSOCIATED = 0x7
|
||||
NL80211_STA_FLAG_AUTHENTICATED = 0x5
|
||||
NL80211_STA_FLAG_AUTHORIZED = 0x1
|
||||
NL80211_STA_FLAG_MAX = 0x7
|
||||
NL80211_STA_FLAG_MAX = 0x8
|
||||
NL80211_STA_FLAG_MAX_OLD_API = 0x6
|
||||
NL80211_STA_FLAG_MFP = 0x4
|
||||
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
|
||||
|
@ -6001,3 +6001,34 @@ type CachestatRange struct {
|
|||
Off uint64
|
||||
Len uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SK_MEMINFO_RMEM_ALLOC = 0x0
|
||||
SK_MEMINFO_RCVBUF = 0x1
|
||||
SK_MEMINFO_WMEM_ALLOC = 0x2
|
||||
SK_MEMINFO_SNDBUF = 0x3
|
||||
SK_MEMINFO_FWD_ALLOC = 0x4
|
||||
SK_MEMINFO_WMEM_QUEUED = 0x5
|
||||
SK_MEMINFO_OPTMEM = 0x6
|
||||
SK_MEMINFO_BACKLOG = 0x7
|
||||
SK_MEMINFO_DROPS = 0x8
|
||||
SK_MEMINFO_VARS = 0x9
|
||||
SKNLGRP_NONE = 0x0
|
||||
SKNLGRP_INET_TCP_DESTROY = 0x1
|
||||
SKNLGRP_INET_UDP_DESTROY = 0x2
|
||||
SKNLGRP_INET6_TCP_DESTROY = 0x3
|
||||
SKNLGRP_INET6_UDP_DESTROY = 0x4
|
||||
SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1
|
||||
SK_DIAG_BPF_STORAGE_REP_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE = 0x1
|
||||
SK_DIAG_BPF_STORAGE_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE_PAD = 0x1
|
||||
SK_DIAG_BPF_STORAGE_MAP_ID = 0x2
|
||||
SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3
|
||||
)
|
||||
|
||||
type SockDiagReq struct {
|
||||
Family uint8
|
||||
Protocol uint8
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ type UserInfo10 struct {
|
|||
//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
|
||||
//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation
|
||||
//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
|
||||
//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum
|
||||
|
||||
const (
|
||||
// do not reorder
|
||||
|
|
|
@ -401,6 +401,7 @@ var (
|
|||
procTransmitFile = modmswsock.NewProc("TransmitFile")
|
||||
procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree")
|
||||
procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation")
|
||||
procNetUserEnum = modnetapi32.NewProc("NetUserEnum")
|
||||
procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo")
|
||||
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
|
@ -3486,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
|
|||
return
|
||||
}
|
||||
|
||||
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
|
||||
r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
|
||||
if r0 != 0 {
|
||||
neterr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
|
||||
r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
|
||||
if r0 != 0 {
|
||||
|
|
|
@ -335,7 +335,7 @@ github.com/felixge/httpsnoop
|
|||
# github.com/fvbommel/sortorder v1.0.1
|
||||
## explicit; go 1.13
|
||||
github.com/fvbommel/sortorder
|
||||
# github.com/go-logr/logr v1.3.0
|
||||
# github.com/go-logr/logr v1.4.1
|
||||
## explicit; go 1.18
|
||||
github.com/go-logr/logr
|
||||
github.com/go-logr/logr/funcr
|
||||
|
@ -357,8 +357,8 @@ github.com/go-openapi/swag
|
|||
## explicit; go 1.18
|
||||
github.com/go-viper/mapstructure/v2
|
||||
github.com/go-viper/mapstructure/v2/internal/errors
|
||||
# github.com/gofrs/flock v0.8.1
|
||||
## explicit
|
||||
# github.com/gofrs/flock v0.12.0
|
||||
## explicit; go 1.21.0
|
||||
github.com/gofrs/flock
|
||||
# github.com/gogo/googleapis v1.4.1
|
||||
## explicit; go 1.12
|
||||
|
@ -518,8 +518,8 @@ github.com/mitchellh/go-wordwrap
|
|||
github.com/mitchellh/hashstructure/v2
|
||||
# github.com/mitchellh/mapstructure v1.5.0
|
||||
## explicit; go 1.14
|
||||
# github.com/moby/buildkit v0.14.1-0.20240703051140-f7bda278b7e2
|
||||
## explicit; go 1.21
|
||||
# github.com/moby/buildkit v0.15.0-rc1
|
||||
## explicit; go 1.21.0
|
||||
github.com/moby/buildkit/api/services/control
|
||||
github.com/moby/buildkit/api/types
|
||||
github.com/moby/buildkit/client
|
||||
|
@ -673,8 +673,8 @@ github.com/prometheus/client_model/go
|
|||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.12.0
|
||||
## explicit; go 1.19
|
||||
# github.com/prometheus/procfs v0.15.1
|
||||
## explicit; go 1.20
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
|
@ -851,7 +851,7 @@ go.opentelemetry.io/proto/otlp/common/v1
|
|||
go.opentelemetry.io/proto/otlp/metrics/v1
|
||||
go.opentelemetry.io/proto/otlp/resource/v1
|
||||
go.opentelemetry.io/proto/otlp/trace/v1
|
||||
# golang.org/x/crypto v0.21.0
|
||||
# golang.org/x/crypto v0.23.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blowfish
|
||||
|
@ -876,7 +876,7 @@ golang.org/x/exp/slices
|
|||
golang.org/x/mod/internal/lazyregexp
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.23.0
|
||||
# golang.org/x/net v0.25.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/html
|
||||
|
@ -893,21 +893,21 @@ golang.org/x/net/trace
|
|||
## explicit; go 1.18
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
# golang.org/x/sync v0.6.0
|
||||
# golang.org/x/sync v0.7.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.20.0
|
||||
# golang.org/x/sys v0.21.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
golang.org/x/sys/windows/registry
|
||||
# golang.org/x/term v0.18.0
|
||||
# golang.org/x/term v0.20.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.14.0
|
||||
# golang.org/x/text v0.15.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/text/cases
|
||||
golang.org/x/text/internal
|
||||
|
|
Loading…
Reference in New Issue