build: add experimental support for print flag

Print flag can be used to make additional information
requests about the build and print their results.

Currently Dockerfile supports: outline, targets, subrequests.describe

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2022-05-04 18:48:50 -07:00
parent 17dc0e1108
commit eefa8188e1
98 changed files with 8536 additions and 3339 deletions

View File

@ -81,7 +81,8 @@ type Options struct {
Ulimits *opts.UlimitOpt
// Linked marks this target as exclusively linked (not requested by the user).
Linked bool
Linked bool
PrintFunc string
}
type Inputs struct {
@ -1039,11 +1040,23 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
defer func() { <-done }()
cc := c
var printRes map[string][]byte
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
if opt.PrintFunc != "" {
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
} else {
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
}
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc
}
res, err := c.Solve(ctx, req)
if err != nil {
return nil, err
}
if opt.PrintFunc != "" {
printRes = res.Metadata
}
results.Set(resultKey(dp.driverIndex, k), res)
if resultHandleFunc != nil {
resultHandleFunc(dp.driverIndex, &ResultContext{cc, res})
@ -1055,6 +1068,10 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
}
res[i] = rr
for k, v := range printRes {
rr.ExporterResponse[k] = string(v)
}
d := drivers[dp.driverIndex].Driver
if d.IsMobyDriver() {
for _, e := range so.Exports {

View File

@ -48,6 +48,7 @@ const defaultTargetName = "default"
type buildOptions struct {
contextPath string
dockerfileName string
printFunc string
allow []string
buildArgs []string
@ -141,6 +142,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
Tags: in.tags,
Target: in.target,
Ulimits: in.ulimits,
PrintFunc: in.printFunc,
}
platforms, err := platformutil.Parse(in.platforms)
@ -307,6 +309,14 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
printWarnings(os.Stderr, printer.Warnings(), progressMode)
for k := range resp {
if opts[k].PrintFunc != "" {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
return "", nil, err
}
}
}
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
}
@ -463,6 +473,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
if isExperimental() {
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (outline, targets)")
}
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
@ -481,7 +495,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags.Var(options.ulimits, "ulimit", "Ulimit options")
if os.Getenv("BUILDX_EXPERIMENTAL") == "1" {
if isExperimental() {
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build. BUILDX_EXPERIMENTAL=1 is required.")
}
@ -652,3 +666,11 @@ func (w *wrapped) Error() string {
func (w *wrapped) Unwrap() error {
return w.err
}
func isExperimental() bool {
if v, ok := os.LookupEnv("BUILDKIT_EXPERIMENTAL"); ok {
vv, _ := strconv.ParseBool(v)
return vv
}
return false
}

35
commands/print.go Normal file
View File

@ -0,0 +1,35 @@
package commands
import (
"fmt"
"log"
"os"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets"
)
func printResult(f string, res map[string]string) error {
switch f {
case "outline":
if err := outline.PrintOutline([]byte(res["result.json"]), os.Stdout); err != nil {
return err
}
case "targets":
if err := targets.PrintTargets([]byte(res["result.json"]), os.Stdout); err != nil {
return err
}
case "subrequests.describe":
if err := subrequests.PrintDescribe([]byte(res["result.json"]), os.Stdout); err != nil {
return err
}
default:
if dt, ok := res["result.txt"]; ok {
fmt.Print(dt)
} else {
log.Printf("%s %+v", f, res)
}
}
return nil
}

12
go.mod
View File

@ -15,7 +15,7 @@ require (
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
github.com/hashicorp/hcl/v2 v2.8.2
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4
github.com/moby/buildkit v0.10.1-0.20220809151411-8488654e899b
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
@ -31,7 +31,7 @@ require (
go.opentelemetry.io/otel/trace v1.4.1
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
google.golang.org/grpc v1.45.0
google.golang.org/grpc v1.47.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.5
k8s.io/apimachinery v0.23.5
@ -95,7 +95,7 @@ require (
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/klauspost/compress v1.15.1 // indirect
github.com/klauspost/compress v1.15.7 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
@ -110,7 +110,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_golang v1.12.2 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
@ -138,9 +138,9 @@ require (
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect

31
go.sum
View File

@ -117,9 +117,9 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/compose-spec/compose-go v1.4.0 h1:zaYVAZ6lIByr7Jffi20AabfeUwcTrdXfH3X1R5HEm+g=
github.com/compose-spec/compose-go v1.4.0/go.mod h1:l7RUULbFFLzlQHuxtJr7SVLyWdqEpbJEGTWCgcu6Eqw=
@ -190,8 +190,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
@ -293,6 +293,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -377,8 +378,8 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALr
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.7 h1:7cgTQxJCU/vy+oP/E3B9RGbQTgbiVzIJWIKOLoAsPok=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -410,14 +411,14 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4 h1:+AwP6ma57EBQ5+eOSsg29MAylks33kt2MverACSqJv0=
github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4/go.mod h1:yle9eiU1fiJ/WhC4VTLOaQ6rxFou1mc4AhwScHwysi0=
github.com/moby/buildkit v0.10.1-0.20220809151411-8488654e899b h1:F/f/Ixaw8REoF5EjkMLh/2RBrsP0jeCPGBlczfqy9aw=
github.com/moby/buildkit v0.10.1-0.20220809151411-8488654e899b/go.mod h1:Wa+LkeUQ9NJTVXTAY38rhkfKVQcuCIo2fbavRSuGsbI=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
@ -474,8 +475,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -837,8 +838,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -965,8 +967,8 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 h1:FglFEfyj61zP3c6LgjmVHxYxZWXYul9oiS1EZqD5gLc=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -987,11 +989,10 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
# Linux perf files
perf.data
perf.data.old
# gdb history
.gdb_history

View File

@ -17,6 +17,53 @@ This package provides various compression algorithms.
# changelog
* June 3, 2022 (v1.15.6)
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
* zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
* zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
* zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
* gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
* s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
* s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
* snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
* s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
* May 25, 2022 (v1.15.5)
* s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
* s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
* huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
* zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
* zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
* Minimum version is Go 1.16, added CI test on 1.18.
* Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
@ -60,6 +107,9 @@ While the release has been extensively tested, it is recommended to testing when
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
<details>
<summary>See changes to v1.13.x</summary>
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@ -88,6 +138,8 @@ While the release has been extensively tested, it is recommended to testing when
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
</details>
<details>
<summary>See changes to v1.12.x</summary>

View File

@ -1,5 +0,0 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}

View File

@ -5,8 +5,6 @@
package huff0
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
return
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
b.bitContainer >>= 1 << 3
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
b.bitContainer >>= 2 << 3
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
b.bitContainer >>= 3 << 3
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
b.bitContainer >>= 4 << 3
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
b.bitContainer >>= 5 << 3
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
b.bitContainer >>= 6 << 3
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
b.bitContainer >>= 7 << 3
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
b.bitContainer = 0
b.nBits = 0
return
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}

View File

@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off

View File

@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false

View File

@ -11,7 +11,6 @@ import (
type dTable struct {
single []dEntrySingle
double []dEntryDouble
}
// single-symbols decoding
@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
// double-symbols decoding
type dEntryDouble struct {
seq [4]byte
nBits uint8
len uint8
}
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in)
s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{}
}
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56
const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.

View File

@ -1,488 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -1,197 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -2,30 +2,43 @@
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop.
// and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0
import (
"errors"
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
// go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
//go:noescape
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
// go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
//go:noescape
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
type decompress4xContext struct {
pbr0 *bitReaderShifted
pbr1 *bitReaderShifted
pbr2 *bitReaderShifted
pbr3 *bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
pbr0: &br[0],
pbr1: &br[1],
pbr2: &br[2],
pbr3: &br[3],
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
out: &out[0],
dstEvery: dstEvery,
tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
decompress4x_8b_main_loop_amd64(&ctx)
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
decompress4x_main_loop_amd64(&ctx)
}
if off != 0 {
break
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
decoded = ctx.decoded
out = out[decoded/4:]
}
// Decode remaining.
@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
type decompress1xContext struct {
pbr *bitReaderShifted
peekBits uint8
out *byte
outCap int
tbl *dEntrySingle
decoded int
}
// Error reported by asm implementations
const error_max_decoded_size_exeeded = -1
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:maxDecodedSize]
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
if maxDecodedSize >= 4 {
ctx := decompress1xContext{
pbr: &br,
out: &dst[0],
outCap: maxDecodedSize,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
tbl: &d.dt.single[0],
}
if cpuinfo.HasBMI2() {
decompress1x_main_loop_bmi2(&ctx)
} else {
decompress1x_main_loop_amd64(&ctx)
}
if ctx.decoded == error_max_decoded_size_exeeded {
return nil, ErrMaxDecodedSizeExceeded
}
dst = dst[:ctx.decoded]
}
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
return dst, br.close()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
return dst, nil
}
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}

View File

@ -0,0 +1,34 @@
// Package cpuinfo gives runtime info about the current CPU.
//
// This is a very limited module meant for use internally
// in this project. For more versatile solution check
// https://github.com/klauspost/cpuid.
package cpuinfo
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
func HasBMI1() bool {
return hasBMI1
}
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
func HasBMI2() bool {
return hasBMI2
}
// DisableBMI2 will disable BMI2, for testing purposes.
// Call returned function to restore previous state.
func DisableBMI2() func() {
old := hasBMI2
hasBMI2 = false
return func() {
hasBMI2 = old
}
}
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
func HasBMI() bool {
return HasBMI1() && HasBMI2()
}
var hasBMI1 bool
var hasBMI2 bool

View File

@ -0,0 +1,11 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package cpuinfo
// go:noescape
func x86extensions() (bmi1, bmi2 bool)
func init() {
hasBMI1, hasBMI2 = x86extensions()
}

View File

@ -0,0 +1,36 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
TEXT ·x86extensions(SB), NOSPLIT, $0
// 1. determine max EAX value
XORQ AX, AX
CPUID
CMPQ AX, $7
JB unsupported
// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
MOVQ $7, AX
MOVQ $0, CX
CPUID
BTQ $3, BX // bit 3 = BMI1
SETCS AL
BTQ $8, BX // bit 8 = BMI2
SETCS AH
MOVB AL, bmi1+0(FP)
MOVB AH, bmi2+1(FP)
RET
unsupported:
XORQ AX, AX
MOVB AL, bmi1+0(FP)
MOVB AL, bmi2+1(FP)
RET

View File

@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
### Benchmarks
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
The first two are streaming decodes and the last are smaller inputs.
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
```
BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
Concurrent blocks, performance:
Concurrent performance:
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
This reflects the performance around May 2020, but this may be out of date.
This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files

View File

@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v
}
func (b *bitReader) get16BitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {

View File

@ -5,8 +5,6 @@
package zstd
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {

View File

@ -5,9 +5,14 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@ -38,14 +43,14 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
compressedBlockOverAlloc = 16
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
maxCompressedLiteralSize = 1 << 18
maxRLELiteralSize = 1 << 20
maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff
maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@ -97,7 +102,6 @@ type blockDec struct {
// Block is RLE, this is the size.
RLESize uint32
tmp [4]byte
Type blockType
@ -136,7 +140,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
maxSize := maxBlockSize
maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
@ -157,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
maxSize = maxCompressedBlockSize
maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
maxSize = int(windowSize)
maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@ -190,9 +194,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize)
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
@ -360,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, litRegenSize)
b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
if litRegenSize > maxCompressedLiteralSize {
// Exceptional
b.literalBuf = make([]byte, litRegenSize)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
}
b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@ -397,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
var err error
// Use our out buffer.
huff.MaxDecodedSize = maxCompressedBlockSize
huff.MaxDecodedSize = litRegenSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
@ -429,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
huff := hist.huffTree
@ -448,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
return in, err
}
hist.huffTree = huff
huff.MaxDecodedSize = maxCompressedBlockSize
huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@ -463,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
// Re-cap to get extra size.
literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
@ -486,10 +487,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
err = hist.decoders.decodeSync(hist)
before := len(hist.decoders.out)
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil {
return err
}
if hist.decoders.maxSyncLen > 0 {
hist.decoders.maxSyncLen += uint64(before)
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
}
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
@ -632,6 +638,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
// Extract blocks...
if false && hist.dict == nil {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
var buf bytes.Buffer
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil
}
@ -650,6 +672,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err

View File

@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
func (b *byteBuf) remain() []byte {
return *b
}
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {

View File

@ -13,12 +13,6 @@ type byteReader struct {
off int
}
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)

View File

@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
frame.history.setDict(&dict)
}
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
if frame.WindowSize > d.o.maxWindowSize {
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if frame.FrameContentSize != fcsUnknown {
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
if cap(dst) == 0 {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
@ -437,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if len(next.b) > 0 {
if !d.o.ignoreChecksum && len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
@ -449,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
}
@ -533,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Update/Check CRC
if d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b)
if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b)
}
if d.current.d.Last {
d.current.err = d.frame.checkCRC()
if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil {
println("CRC error:", d.current.err)
return false
@ -629,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder:
// ASYNC:
// Spawn 4 go routines.
// 0: Read frames and decode blocks.
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
// 3: Wait for stream history, execute sequences, send stream history.
// Spawn 3 go routines.
// 0: Read frames and decode block literals.
// 1: Decode sequences.
// 2: Execute sequences, send to output.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
br := readerWrapper{r: r}
var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Prepare blocks...
go func() {
var hist history
var hasErr bool
for block := range seqPrepare {
if hasErr {
if block != nil {
seqDecode <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history")
}
hist.reset()
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
continue
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
close(seqDecode)
}()
// Async 2: Decode sequences...
// Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
@ -696,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
@ -750,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 3: new history")
println("Async 2: new history")
}
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
@ -837,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
decodeStream:
for {
var hist history
var hasErr bool
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
seqDecode <- block
}
return
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
return
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
frame := d.frame
if debugDecoder {
println("New frame...")
@ -863,7 +856,7 @@ decodeStream:
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
seqPrepare <- dec
decodeBlock(dec)
}
break decodeStream
}
@ -883,6 +876,10 @@ decodeStream:
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
}
hist.reset()
if h.dict != nil {
hist.setDict(h.dict)
}
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
@ -909,7 +906,7 @@ decodeStream:
}
err = dec.err
last := dec.Last
seqPrepare <- dec
decodeBlock(dec)
if err != nil {
break decodeStream
}
@ -918,7 +915,7 @@ decodeStream:
}
}
}
close(seqPrepare)
close(seqDecode)
wg.Wait()
d.frame.history.b = frameHistCache
}

View File

@ -19,6 +19,7 @@ type decoderOptions struct {
maxDecodedSize uint64
maxWindowSize uint64
dicts []dict
ignoreChecksum bool
}
func (o *decoderOptions) setDefault() {
@ -31,7 +32,7 @@ func (o *decoderOptions) setDefault() {
if o.concurrent > 4 {
o.concurrent = 4
}
o.maxDecodedSize = 1 << 63
o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@ -66,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
// Maximum and default is 1 << 63 bytes.
// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}

View File

@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -518,8 +518,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -674,8 +674,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -1047,8 +1047,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match

View File

@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -439,8 +439,8 @@ encodeLoop:
var t int32
for {
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@ -1002,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match

View File

@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
// If we can do everything in one block, prefer that.
if len(src) <= maxCompressedBlockSize {
if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {

View File

@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize {
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
// Alloc 2x window size if not low-mem, or very small window size.
d.history.allocFrameBuffer = d.history.windowSize * 2
// TODO: Maybe use FrameContent size
} else {
// Alloc with one additional block
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error {
return err
}
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if d.o.ignoreChecksum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
}
@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
}
return nil
}
// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
@ -326,6 +345,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
if d.FrameContentSize != fcsUnknown {
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
return dst, ErrDecoderSizeExceeded
}
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
@ -360,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
}
}

View File

@ -5,8 +5,10 @@
package zstd
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
const (
@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
// println(s.norm[:s.symbolLen], s.symbolLen)
return s.buildDtable()
}
func (s *fseDecoder) mustReadFrom(r io.Reader) {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
// dt [maxTablesize]decSymbol // Decompression table.
// symbolLen uint16 // Length of active part of the symbol table.
// actualTableLog uint8 // Selected tablelog.
// maxBits uint8 // Maximum number of additional bits
// // used for table creation to avoid allocations.
// stateTable [256]uint16
// norm [maxSymbolValue + 1]int16
// preDefined bool
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
}
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
func (d decSymbol) baseline() uint32 {
return uint32(d >> 32)
}
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
}
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
func (d *decSymbol) setBaseline(baseline uint32) {
const mask = 0xffffffff
*d = (*d & mask) | decSymbol(baseline)<<32
}
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
s.dt[0] = symbol
}
// buildDtable will build the decoding table.
func (s *fseDecoder) buildDtable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
symbolNext := s.stateTable[:256]
// Init, lay down lowprob symbols
{
for i, v := range s.norm[:s.symbolLen] {
if v == -1 {
s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
symbolNext[i] = 1
} else {
symbolNext[i] = uint16(v)
}
}
}
// Spread symbols
{
tableMask := tableSize - 1
step := tableStep(tableSize)
position := uint32(0)
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.dt[position].setAddBits(uint8(ss))
position = (position + step) & tableMask
for position > highThreshold {
// lowprob area
position = (position + step) & tableMask
}
}
}
if position != 0 {
// position must reach all cells once, otherwise normalizedCounter is incorrect
return errors.New("corrupted input (position != 0)")
}
}
// Build Decoding table
{
tableSize := uint16(1 << s.actualTableLog)
for u, v := range s.dt[:tableSize] {
symbol := v.addBits()
nextState := symbolNext[symbol]
symbolNext[symbol] = nextState + 1
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
s.dt[u&maxTableMask].setNBits(nBits)
newState := (nextState << nBits) - tableSize
if newState > tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
}
if newState == uint16(u) && nBits == 0 {
// Seems weird that this is possible with nbits > 0.
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
}
s.dt[u&maxTableMask].setNewState(newState)
}
}
return nil
}
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
// next returns the current symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) next(br *bitReader) {
lowBits := uint16(br.getBits(s.state.nbBits()))
s.state = s.dt[s.state.newState()+lowBits]
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (s *fseState) finished(br *bitReader) bool {
return br.finished() && s.state.nbBits() > 0
}
// final returns the current state symbol without decoding the next.
func (s *fseState) final() (int, uint8) {
return s.state.baselineInt(), s.state.addBits()
}
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}

View File

@ -0,0 +1,64 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
)
type buildDtableAsmContext struct {
// inputs
stateTable *uint16
norm *int16
dt *uint64
// outputs --- set by the procedure in the case of error;
// for interpretation please see the error handling part below
errParam1 uint64
errParam2 uint64
}
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
// Function returns non-zero exit code on error.
// go:noescape
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
// please keep in sync with _generate/gen_fse.go
const (
errorCorruptedNormalizedCounter = 1
errorNewStateTooBig = 2
errorNewStateNoBits = 3
)
// buildDtable will build the decoding table.
func (s *fseDecoder) buildDtable() error {
ctx := buildDtableAsmContext{
stateTable: (*uint16)(&s.stateTable[0]),
norm: (*int16)(&s.norm[0]),
dt: (*uint64)(&s.dt[0]),
}
code := buildDtable_asm(s, &ctx)
if code != 0 {
switch code {
case errorCorruptedNormalizedCounter:
position := ctx.errParam1
return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
case errorNewStateTooBig:
newState := decSymbol(ctx.errParam1)
size := ctx.errParam2
return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
case errorNewStateNoBits:
newState := decSymbol(ctx.errParam1)
oldState := decSymbol(ctx.errParam2)
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
default:
return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
}
}
return nil
}

View File

@ -0,0 +1,127 @@
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
// +build !appengine,!noasm,gc,!noasm
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
TEXT ·buildDtable_asm(SB), $0-24
MOVQ ctx+8(FP), CX
MOVQ s+0(FP), DI
// Load values
MOVBQZX 4098(DI), DX
XORQ AX, AX
BTSQ DX, AX
MOVQ (CX), BX
MOVQ 16(CX), SI
LEAQ -1(AX), R8
MOVQ 8(CX), CX
MOVWQZX 4096(DI), DI
// End load values
// Init, lay down lowprob symbols
XORQ R9, R9
JMP init_main_loop_condition
init_main_loop:
MOVWQSX (CX)(R9*2), R10
CMPW R10, $-1
JNE do_not_update_high_threshold
MOVB R9, 1(SI)(R8*8)
DECQ R8
MOVQ $0x0000000000000001, R10
do_not_update_high_threshold:
MOVW R10, (BX)(R9*2)
INCQ R9
init_main_loop_condition:
CMPQ R9, DI
JL init_main_loop
// Spread symbols
// Calculate table step
MOVQ AX, R9
SHRQ $0x01, R9
MOVQ AX, R10
SHRQ $0x03, R10
LEAQ 3(R9)(R10*1), R9
// Fill add bits values
LEAQ -1(AX), R10
XORQ R11, R11
XORQ R12, R12
JMP spread_main_loop_condition
spread_main_loop:
XORQ R13, R13
MOVWQSX (CX)(R12*2), R14
JMP spread_inner_loop_condition
spread_inner_loop:
MOVB R12, 1(SI)(R11*8)
adjust_position:
ADDQ R9, R11
ANDQ R10, R11
CMPQ R11, R8
JG adjust_position
INCQ R13
spread_inner_loop_condition:
CMPQ R13, R14
JL spread_inner_loop
INCQ R12
spread_main_loop_condition:
CMPQ R12, DI
JL spread_main_loop
TESTQ R11, R11
JZ spread_check_ok
MOVQ ctx+8(FP), AX
MOVQ R11, 24(AX)
MOVQ $+1, ret+16(FP)
RET
spread_check_ok:
// Build Decoding table
XORQ DI, DI
build_table_main_table:
MOVBQZX 1(SI)(DI*8), CX
MOVWQZX (BX)(CX*2), R8
LEAQ 1(R8), R9
MOVW R9, (BX)(CX*2)
MOVQ R8, R9
BSRQ R9, R9
MOVQ DX, CX
SUBQ R9, CX
SHLQ CL, R8
SUBQ AX, R8
MOVB CL, (SI)(DI*8)
MOVW R8, 2(SI)(DI*8)
CMPQ R8, AX
JLE build_table_check1_ok
MOVQ ctx+8(FP), CX
MOVQ R8, 24(CX)
MOVQ AX, 32(CX)
MOVQ $+2, ret+16(FP)
RET
build_table_check1_ok:
TESTB CL, CL
JNZ build_table_check2_ok
CMPW R8, DI
JNE build_table_check2_ok
MOVQ ctx+8(FP), AX
MOVQ R8, 24(AX)
MOVQ DI, 32(AX)
MOVQ $+3, ret+16(FP)
RET
build_table_check2_ok:
INCQ DI
CMPQ DI, AX
JL build_table_main_table
MOVQ $+0, ret+16(FP)
RET

View File

@ -0,0 +1,72 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
package zstd
import (
"errors"
"fmt"
)
// buildDtable will build the decoding table.
func (s *fseDecoder) buildDtable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
symbolNext := s.stateTable[:256]
// Init, lay down lowprob symbols
{
for i, v := range s.norm[:s.symbolLen] {
if v == -1 {
s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
symbolNext[i] = 1
} else {
symbolNext[i] = uint16(v)
}
}
}
// Spread symbols
{
tableMask := tableSize - 1
step := tableStep(tableSize)
position := uint32(0)
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.dt[position].setAddBits(uint8(ss))
position = (position + step) & tableMask
for position > highThreshold {
// lowprob area
position = (position + step) & tableMask
}
}
}
if position != 0 {
// position must reach all cells once, otherwise normalizedCounter is incorrect
return errors.New("corrupted input (position != 0)")
}
}
// Build Decoding table
{
tableSize := uint16(1 << s.actualTableLog)
for u, v := range s.dt[:tableSize] {
symbol := v.addBits()
nextState := symbolNext[symbol]
symbolNext[symbol] = nextState + 1
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
s.dt[u&maxTableMask].setNBits(nBits)
newState := (nextState << nBits) - tableSize
if newState > tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
}
if newState == uint16(u) && nBits == 0 {
// Seems weird that this is possible with nbits > 0.
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
}
s.dt[u&maxTableMask].setNewState(newState)
}
}
return nil
}

View File

@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *fseEncoder) prepare() (*fseEncoder, error) {
if s == nil {
s = &fseEncoder{}
}
s.useRLE = false
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
return s, nil
}
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()

View File

@ -1,11 +0,0 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

View File

@ -1,11 +0,0 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

View File

@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}

View File

@ -73,6 +73,7 @@ type sequenceDecs struct {
seqSize int
windowSize int
maxBits uint8
maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
if len(s.dict) == 0 {
return s.executeSimple(seqs, hist)
}
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
@ -327,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
@ -341,14 +203,18 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(history *history) error {
func (s *sequenceDecs) decodeSync(hist []byte) error {
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
@ -530,6 +396,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@ -543,8 +410,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
// Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
// Add final literals
@ -552,16 +419,6 @@ func (s *sequenceDecs) decodeSync(history *history) error {
return br.close()
}
// update states, at least 27 bits must be available.
func (s *sequenceDecs) update(br *bitReader) {
// Max 8 bits
s.litLengths.state.next(br)
// Max 9 bits
s.matchLengths.state.next(br)
// Max 8 bits
s.offsets.state.next(br)
}
var bitMask [16]uint16
func init() {
@ -570,87 +427,6 @@ func init() {
}
}
// update states, at least 27 bits must be available.
func (s *sequenceDecs) updateAlt(br *bitReader) {
// Update all 3 states at once. Approx 20% faster.
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
nBits := a.nbBits() + b.nbBits() + c.nbBits()
if nBits == 0 {
s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
lowBits = uint16(bits >> (c.nbBits() & 31))
lowBits &= bitMask[b.nbBits()&15]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
lowBits = uint16(bits) & bitMask[c.nbBits()&15]
s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
}
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
ml, mlB := mlState.final()
mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
return
}
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
return
}
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
return
}
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()

View File

@ -0,0 +1,362 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
type decodeSyncAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
litRemain int
out []byte
outPosition int
literals []byte
litPosition int
history []byte
windowSize int
ll int // set on error (not for all errors, please refer to _generate/gen.go)
ml int // set on error (not for all errors, please refer to _generate/gen.go)
mo int // set on error (not for all errors, please refer to _generate/gen.go)
}
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// decode sequences from the stream with the provided history but without a dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if len(s.dict) > 0 {
return false, nil
}
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
return false, nil
}
useSafe := false
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
useSafe = true
}
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
useSafe = true
}
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
iteration: s.nSeqs - 1,
litRemain: len(s.literals),
out: s.out,
outPosition: len(s.out),
literals: s.literals,
windowSize: s.windowSize,
history: hist,
}
s.seqSize = 0
startSize := len(s.out)
var errCode int
if cpuinfo.HasBMI2() {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
}
} else {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
}
}
switch errCode {
case noError:
break
case errorMatchLenOfsMismatch:
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
case errorMatchLenTooBig:
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
case errorMatchOffTooBig:
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
ctx.mo, ctx.outPosition+len(hist)-startSize)
case errorNotEnoughLiterals:
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
}
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
return true, err
}
s.literals = s.literals[ctx.litPosition:]
t := ctx.outPosition
s.out = s.out[:t]
// Add final literals
s.out = append(s.out, s.literals...)
if debugDecoder {
t += len(s.literals)
if t != len(s.out) {
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
}
}
return true, nil
}
// --------------------------------------------------------------------------------
type decodeAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
seqs []seqVals
litRemain int
}
const noError = 0
// error reported when mo == 0 && ml > 0
const errorMatchLenOfsMismatch = 1
// error reported when ml > maxMatchLen
const errorMatchLenTooBig = 2
// error reported when mo > available history or mo > s.windowSize
const errorMatchOffTooBig = 3
// error reported when the sum of literal lengths exeeceds the literal buffer size
const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
seqs: seqs,
iteration: len(seqs) - 1,
litRemain: len(s.literals),
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
if cpuinfo.HasBMI2() {
if lte56bits {
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
}
} else {
if lte56bits {
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
}
}
if errCode != 0 {
i := len(seqs) - ctx.iteration - 1
switch errCode {
case errorMatchLenOfsMismatch:
ml := ctx.seqs[i].ml
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
case errorMatchLenTooBig:
ml := ctx.seqs[i].ml
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
}
if ctx.litRemain < 0 {
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
len(s.literals), len(s.literals)-ctx.litRemain)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// --------------------------------------------------------------------------------
type executeAsmContext struct {
seqs []seqVals
seqIndex int
out []byte
history []byte
literals []byte
outPosition int
litPosition int
windowSize int
}
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
//
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
ctx := executeAsmContext{
seqs: seqs,
seqIndex: 0,
out: out,
history: hist,
outPosition: t,
litPosition: 0,
literals: s.literals,
windowSize: s.windowSize,
}
var ok bool
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
} else {
ok = sequenceDecs_executeSimple_amd64(&ctx)
}
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
}
s.literals = s.literals[ctx.litPosition:]
t = ctx.outPosition
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

3689
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,237 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
package zstd
import (
"fmt"
"io"
)
// decode sequences from the stream with the provided history but without dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return false, nil
}
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// executeSimple handles cases when a dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
for _, seq := range seqs {
// Add literals
copy(out[t:], s.literals[:seq.ll])
t += seq.ll
s.literals = s.literals[seq.ll:]
// Malformed input
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
}
// Copy from history.
if v := seq.mo - t; v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if seq.ml > v {
// Some goes into the current block.
// Copy remainder of history
copy(out[t:], hist[start:])
t += v
seq.ml -= v
} else {
copy(out[t:], hist[start:start+seq.ml])
t += seq.ml
continue
}
}
// We must be in the current buffer now
if seq.ml > 0 {
start := t - seq.mo
if seq.ml <= t-start {
// No overlap
copy(out[t:], out[start:start+seq.ml])
t += seq.ml
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
src := out[start : start+seq.ml]
dst := out[t:]
dst = dst[:len(src)]
t += len(src)
// Destination is the space we just added.
for i := range src {
dst[i] = src[i]
}
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

View File

@ -18,26 +18,44 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
// zipReaderPool is the default reader pool.
var zipReaderPool = sync.Pool{New: func() interface{} {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)
}
return z
}}
// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
dec.Reset(r)
} else {
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
panic(err)
}
dec = d
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
pool := &zipReaderPool
if len(opts) > 0 {
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
// Force concurrency 1
opts = append(opts, WithDecoderConcurrency(1))
// Create our own pool
pool = &sync.Pool{}
}
return func(r io.Reader) io.ReadCloser {
dec, ok := pool.Get().(*Decoder)
if ok {
dec.Reset(r)
} else {
d, err := NewReader(r, opts...)
if err != nil {
panic(err)
}
dec = d
}
return &pooledZipReader{dec: dec, pool: pool}
}
return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
mu sync.Mutex // guards Close and Read
dec *Decoder
mu sync.Mutex // guards Close and Read
pool *sync.Pool
dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
}
dec, err := r.dec.Read(p)
if err == io.EOF {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.dec.Reset(nil)
r.pool.Put(r.dec)
r.dec = nil
}
return dec, err
@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.pool.Put(r.dec)
r.dec = nil
}
return err
@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
return newZipReader
// Options can be specified. WithDecoderConcurrency(1) is forced,
// and by default a 128MB maximum decompression window is specified.
// The window size can be overridden if required.
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
return newZipReader(opts...)
}

View File

@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
}
}
// matchLenFast does matching, but will not match the last up to 7 bytes.
func matchLenFast(a, b []byte) int {
endI := len(a) & (math.MaxInt32 - 7)
for i := 0; i < endI; i += 8 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
return i + bits.TrailingZeros64(diff)>>3
}
}
return endI
}
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.

View File

@ -10,18 +10,18 @@ import (
)
type UsageInfo struct {
ID string
Mutable bool
InUse bool
Size int64
ID string `json:"id"`
Mutable bool `json:"mutable"`
InUse bool `json:"inUse"`
Size int64 `json:"size"`
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parents []string
Description string
RecordType UsageRecordType
Shared bool
CreatedAt time.Time `json:"createdAt"`
LastUsedAt *time.Time `json:"lastUsedAt"`
UsageCount int `json:"usageCount"`
Parents []string `json:"parents"`
Description string `json:"description"`
RecordType UsageRecordType `json:"recordType"`
Shared bool `json:"shared"`
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {

View File

@ -9,13 +9,13 @@ import (
)
type Info struct {
BuildkitVersion BuildkitVersion
BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
}
type BuildkitVersion struct {
Package string
Version string
Revision string
Package string `json:"package"`
Version string `json:"version"`
Revision string `json:"revision"`
}
func (c *Client) Info(ctx context.Context) (*Info, error) {

View File

@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
}
smc.index[l.SourceMap] = idx
}
smc.locations[dgst] = ls
smc.locations[dgst] = append(smc.locations[dgst], ls...)
}
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {

View File

@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect
if opMeta != nil {
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
}
s.Add(dgst, sls)
if _, ok := cache[dgst]; ok {
return def, nil
}
s.Add(dgst, sls)
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil

View File

@ -209,8 +209,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
<-time.After(3 * time.Second)
cancelStatus()
}()
bklog.G(ctx).Debugf("stopping session")
s.Close()
if !opt.SessionPreInitialized {
bklog.G(ctx).Debugf("stopping session")
s.Close()
}
}()
var pbd *pb.Definition
if def != nil {

View File

@ -0,0 +1,81 @@
package subrequests
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"text/tabwriter"
"github.com/moby/buildkit/frontend/gateway/client"
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/solver/errdefs"
"github.com/pkg/errors"
)
const RequestSubrequestsDescribe = "frontend.subrequests.describe"
var SubrequestsDescribeDefinition = Request{
Name: RequestSubrequestsDescribe,
Version: "1.0.0",
Type: TypeRPC,
Description: "List available subrequest types",
Metadata: []Named{
{Name: "result.json"},
{Name: "result.txt"},
},
}
func Describe(ctx context.Context, c client.Client) ([]Request, error) {
gwcaps := c.BuildOpts().Caps
if err := (&gwcaps).Supports(gwpb.CapFrontendCaps); err != nil {
return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe)
}
res, err := c.Solve(ctx, client.SolveRequest{
FrontendOpt: map[string]string{
"requestid": RequestSubrequestsDescribe,
"frontend.caps": "moby.buildkit.frontend.subrequests",
},
Frontend: "dockerfile.v0",
})
if err != nil {
var reqErr *errdefs.UnsupportedSubrequestError
if errors.As(err, &reqErr) {
return nil, err
}
var capErr *errdefs.UnsupportedFrontendCapError
if errors.As(err, &capErr) {
return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe)
}
return nil, err
}
dt, ok := res.Metadata["result.json"]
if !ok {
return nil, errors.Errorf("no result.json metadata in response")
}
var reqs []Request
if err := json.Unmarshal(dt, &reqs); err != nil {
return nil, errors.Wrap(err, "failed to parse describe result")
}
return reqs, nil
}
func PrintDescribe(dt []byte, w io.Writer) error {
var d []Request
if err := json.Unmarshal(dt, &d); err != nil {
return err
}
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
fmt.Fprintf(tw, "NAME\tVERSION\tDESCRIPTION\n")
for _, r := range d {
fmt.Fprintf(tw, "%s\t%s\t%s\n", strings.TrimPrefix(r.Name, "frontend."), r.Version, r.Description)
}
return tw.Flush()
}

View File

@ -0,0 +1,146 @@
package outline
import (
"bytes"
"encoding/json"
"fmt"
"io"
"text/tabwriter"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/solver/pb"
)
const RequestSubrequestsOutline = "frontend.outline"
var SubrequestsOutlineDefinition = subrequests.Request{
Name: RequestSubrequestsOutline,
Version: "1.0.0",
Type: subrequests.TypeRPC,
Description: "List all parameters current build target supports",
Opts: []subrequests.Named{
{
Name: "target",
Description: "Target build stage",
},
},
Metadata: []subrequests.Named{
{Name: "result.json"},
{Name: "result.txt"},
},
}
type Outline struct {
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
Args []Arg `json:"args,omitempty"`
Secrets []Secret `json:"secrets,omitempty"`
SSH []SSH `json:"ssh,omitempty"`
Cache []CacheMount `json:"cache,omitempty"`
Sources [][]byte `json:"sources,omitempty"`
}
func (o Outline) ToResult() (*client.Result, error) {
res := client.NewResult()
dt, err := json.MarshalIndent(o, "", " ")
if err != nil {
return nil, err
}
res.AddMeta("result.json", dt)
b := bytes.NewBuffer(nil)
if err := PrintOutline(dt, b); err != nil {
return nil, err
}
res.AddMeta("result.txt", b.Bytes())
res.AddMeta("version", []byte(SubrequestsOutlineDefinition.Version))
return res, nil
}
type Arg struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
Value string `json:"value,omitempty"`
Location *pb.Location `json:"location,omitempty"`
}
type Secret struct {
Name string `json:"name"`
Required bool `json:"required,omitempty"`
Location *pb.Location `json:"location,omitempty"`
}
type SSH struct {
Name string `json:"name"`
Required bool `json:"required,omitempty"`
Location *pb.Location `json:"location,omitempty"`
}
type CacheMount struct {
ID string `json:"ID"`
Location *pb.Location `json:"location,omitempty"`
}
func PrintOutline(dt []byte, w io.Writer) error {
var o Outline
if err := json.Unmarshal(dt, &o); err != nil {
return err
}
if o.Name != "" || o.Description != "" {
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
name := o.Name
if o.Name == "" {
name = "(default)"
}
fmt.Fprintf(tw, "TARGET:\t%s\n", name)
if o.Description != "" {
fmt.Fprintf(tw, "DESCRIPTION:\t%s\n", o.Description)
}
tw.Flush()
fmt.Println()
}
if len(o.Args) > 0 {
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprintf(tw, "BUILD ARG\tVALUE\tDESCRIPTION\n")
for _, a := range o.Args {
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Name, a.Value, a.Description)
}
tw.Flush()
fmt.Println()
}
if len(o.Secrets) > 0 {
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprintf(tw, "SECRET\tREQUIRED\n")
for _, s := range o.Secrets {
b := ""
if s.Required {
b = "true"
}
fmt.Fprintf(tw, "%s\t%s\n", s.Name, b)
}
tw.Flush()
fmt.Println()
}
if len(o.SSH) > 0 {
tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
fmt.Fprintf(tw, "SSH\tREQUIRED\n")
for _, s := range o.SSH {
b := ""
if s.Required {
b = "true"
}
fmt.Fprintf(tw, "%s\t%s\n", s.Name, b)
}
tw.Flush()
fmt.Println()
}
return nil
}

View File

@ -0,0 +1,84 @@
package targets
import (
"bytes"
"encoding/json"
"fmt"
"io"
"text/tabwriter"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/solver/pb"
)
const RequestTargets = "frontend.targets"
var SubrequestsTargetsDefinition = subrequests.Request{
Name: RequestTargets,
Version: "1.0.0",
Type: subrequests.TypeRPC,
Description: "List all targets current build supports",
Opts: []subrequests.Named{},
Metadata: []subrequests.Named{
{Name: "result.json"},
{Name: "result.txt"},
},
}
type List struct {
Targets []Target `json:"targets"`
Sources [][]byte `json:"sources"`
}
func (l List) ToResult() (*client.Result, error) {
res := client.NewResult()
dt, err := json.MarshalIndent(l, "", " ")
if err != nil {
return nil, err
}
res.AddMeta("result.json", dt)
b := bytes.NewBuffer(nil)
if err := PrintTargets(dt, b); err != nil {
return nil, err
}
res.AddMeta("result.txt", b.Bytes())
res.AddMeta("version", []byte(SubrequestsTargetsDefinition.Version))
return res, nil
}
type Target struct {
Name string `json:"name,omitempty"`
Default bool `json:"default,omitempty"`
Description string `json:"description,omitempty"`
Base string `json:"base,omitempty"`
Platform string `json:"platform,omitempty"`
Location *pb.Location `json:"location,omitempty"`
}
func PrintTargets(dt []byte, w io.Writer) error {
var l List
if err := json.Unmarshal(dt, &l); err != nil {
return err
}
tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
fmt.Fprintf(tw, "TARGET\tDESCRIPTION\n")
for _, t := range l.Targets {
name := t.Name
if name == "" && t.Default {
name = "(default)"
} else {
if t.Default {
name = fmt.Sprintf("%s (default)", name)
}
}
fmt.Fprintf(tw, "%s\t%s\n", name, t.Description)
}
return tw.Flush()
}

View File

@ -0,0 +1,21 @@
package subrequests
type Request struct {
Name string `json:"name"`
Version string `json:"version"`
Type RequestType `json:"type"`
Description string `json:"description"`
Opts []Named `json:"opts"`
Inputs []Named `json:"inputs"`
Metadata []Named `json:"metadata"`
Refs []Named `json:"refs"`
}
type Named struct {
Name string `json:"name"`
Description string `json:"description"`
}
type RequestType string
const TypeRPC RequestType = "rpc"

View File

@ -1586,8 +1586,8 @@ func (m *Range) GetEnd() Position {
// Position is single location in a source file
type Position struct {
Line int32 `protobuf:"varint,1,opt,name=Line,proto3" json:"Line,omitempty"`
Character int32 `protobuf:"varint,2,opt,name=Character,proto3" json:"Character,omitempty"`
Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"`
Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"`
}
func (m *Position) Reset() { *m = Position{} }
@ -2831,166 +2831,166 @@ func init() {
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
var fileDescriptor_8de16154b2733812 = []byte{
// 2538 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7,
0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4,
0x1b, 0xc8, 0xb2, 0x2d, 0xe1, 0xab, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xdb,
0xa2, 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6,
0x2d, 0x23, 0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x7f, 0xa2, 0xc7,
0xf6, 0x1e, 0xa0, 0x97, 0x1c, 0x7a, 0x08, 0x7a, 0x48, 0x0b, 0xe7, 0xd2, 0x3f, 0xa2, 0x05, 0x8a,
0x99, 0xdd, 0xf7, 0x83, 0x94, 0x02, 0xc7, 0x6d, 0xd1, 0x13, 0xe7, 0xcd, 0x7c, 0x76, 0x66, 0x76,
0x77, 0x66, 0x67, 0x76, 0x09, 0x0d, 0x19, 0xc5, 0x5b, 0x91, 0x92, 0x5a, 0xb2, 0x62, 0x74, 0xbc,
0x7a, 0xef, 0xc4, 0xd7, 0xa7, 0xd3, 0xe3, 0x2d, 0x4f, 0x4e, 0xb6, 0x4f, 0xe4, 0x89, 0xdc, 0x26,
0xd1, 0xf1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xad, 0x08, 0xc5, 0x41, 0xc4,
0xde, 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x4e, 0x63, 0x2b,
0x3a, 0xde, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0xb8, 0x10, 0x5e, 0xa7, 0xb8, 0x5e,
0xd8, 0x68, 0xee, 0x00, 0x02, 0x7a, 0x17, 0xc2, 0x1b, 0x44, 0x07, 0x4b, 0x9c, 0x24, 0xec, 0x03,
0xa8, 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a,
0x51, 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe8, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e,
0x54, 0x8e, 0xa7, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0x63, 0x64,
0x08, 0x9a, 0x08, 0x75, 0x22, 0x3a, 0xd5, 0x0c, 0xf4, 0x04, 0x19, 0x06, 0x44, 0x32, 0xb4, 0x35,
0xf2, 0xc7, 0xe3, 0x4e, 0x2d, 0xb3, 0xd5, 0xf5, 0xc7, 0x63, 0x63, 0x0b, 0x25, 0x6c, 0x03, 0xea,
0x51, 0xe0, 0xea, 0xb1, 0x54, 0x93, 0x0e, 0x64, 0x7e, 0x1f, 0x59, 0x1e, 0x4f, 0xa5, 0xec, 0x3e,
0x34, 0x3d, 0x19, 0xc6, 0x5a, 0xb9, 0x7e, 0xa8, 0xe3, 0x4e, 0x93, 0xc0, 0x6f, 0x22, 0xf8, 0x33,
0xa9, 0xce, 0x84, 0xda, 0xcf, 0x84, 0x3c, 0x8f, 0xdc, 0x2b, 0x43, 0x51, 0x46, 0xce, 0xaf, 0x0a,
0x50, 0x4f, 0xb4, 0x32, 0x07, 0x96, 0x77, 0x95, 0x77, 0xea, 0x6b, 0xe1, 0xe9, 0xa9, 0x12, 0x9d,
0xc2, 0x7a, 0x61, 0xa3, 0xc1, 0xe7, 0x78, 0xac, 0x05, 0xc5, 0xc1, 0x90, 0xd6, 0xbb, 0xc1, 0x8b,
0x83, 0x21, 0xeb, 0x40, 0xed, 0xb9, 0xab, 0x7c, 0x37, 0xd4, 0xb4, 0xc0, 0x0d, 0x9e, 0x7c, 0xb2,
0x9b, 0xd0, 0x18, 0x0c, 0x9f, 0x0b, 0x15, 0xfb, 0x32, 0xa4, 0x65, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6,
0x00, 0x06, 0xc3, 0x87, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7,
0xf9, 0x19, 0x54, 0x68, 0xab, 0xd9, 0xa7, 0x50, 0x1d, 0xf9, 0x27, 0x22, 0xd6, 0xc6, 0x9d, 0xbd,
0x9d, 0x2f, 0xbf, 0xb9, 0xb5, 0xf4, 0xe7, 0x6f, 0x6e, 0x6d, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4,
0x64, 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xde, 0x3e, 0x91, 0xf7, 0xcc, 0x90, 0xad, 0x2e, 0xfd, 0x70,
0xab, 0x81, 0xdd, 0x86, 0x8a, 0x1f, 0x8e, 0xc4, 0x05, 0xf9, 0x5f, 0xda, 0xbb, 0x6e, 0x55, 0x35,
0x07, 0x53, 0x1d, 0x4d, 0x75, 0x1f, 0x45, 0xdc, 0x20, 0x9c, 0x3f, 0x16, 0xa0, 0x6a, 0x42, 0x89,
0xdd, 0x84, 0xf2, 0x44, 0x68, 0x97, 0xec, 0x37, 0x77, 0xea, 0x66, 0x4b, 0xb5, 0xcb, 0x89, 0x8b,
0x51, 0x3a, 0x91, 0x53, 0x5c, 0xfb, 0x62, 0x16, 0xa5, 0x4f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x1f,
0xd4, 0x42, 0xa1, 0xcf, 0xa5, 0x3a, 0xa3, 0x35, 0x6a, 0x99, 0xb0, 0x38, 0x14, 0xfa, 0x89, 0x1c,
0x09, 0x9e, 0xc8, 0xd8, 0x5d, 0xa8, 0xc7, 0xc2, 0x9b, 0x2a, 0x5f, 0xcf, 0x68, 0xbd, 0x5a, 0x3b,
0x6d, 0x0a, 0x56, 0xcb, 0x23, 0x70, 0x8a, 0x60, 0x77, 0xa0, 0x11, 0x0b, 0x4f, 0x09, 0x2d, 0xc2,
0xcf, 0x69, 0xfd, 0x9a, 0x3b, 0x2b, 0x16, 0xae, 0x84, 0xee, 0x85, 0x9f, 0xf3, 0x4c, 0xee, 0xfc,
0xa2, 0x08, 0x65, 0xf4, 0x99, 0x31, 0x28, 0xbb, 0xea, 0xc4, 0x64, 0x54, 0x83, 0x13, 0xcd, 0xda,
0x50, 0x42, 0x1d, 0x45, 0x62, 0x21, 0x89, 0x1c, 0xef, 0x7c, 0x64, 0x37, 0x14, 0x49, 0x1c, 0x37,
0x8d, 0x85, 0xb2, 0xfb, 0x48, 0x34, 0xbb, 0x0d, 0x8d, 0x48, 0xc9, 0x8b, 0xd9, 0x0b, 0xe3, 0x41,
0x16, 0xa5, 0xc8, 0x44, 0x07, 0xea, 0x91, 0xa5, 0xd8, 0x26, 0x80, 0xb8, 0xd0, 0xca, 0x3d, 0x90,
0xb1, 0x8e, 0x3b, 0x55, 0xf2, 0x96, 0xe2, 0x1e, 0x19, 0xfd, 0x23, 0x9e, 0x93, 0xb2, 0x55, 0xa8,
0x9f, 0xca, 0x58, 0x87, 0xee, 0x44, 0x50, 0x86, 0x34, 0x78, 0xfa, 0xcd, 0x1c, 0xa8, 0x4e, 0x03,
0x7f, 0xe2, 0xeb, 0x4e, 0x23, 0xd3, 0xf1, 0x8c, 0x38, 0xdc, 0x4a, 0x30, 0x8a, 0xbd, 0x13, 0x25,
0xa7, 0xd1, 0x91, 0xab, 0x44, 0xa8, 0x29, 0x7f, 0x1a, 0x7c, 0x8e, 0xe7, 0xdc, 0x85, 0xaa, 0xb1,
0x8c, 0x13, 0x43, 0xca, 0xc6, 0x3a, 0xd1, 0x18, 0xe3, 0xfd, 0xa3, 0x24, 0xc6, 0xfb, 0x47, 0x4e,
0x17, 0xaa, 0xc6, 0x06, 0xa2, 0x0f, 0xd1, 0x2f, 0x8b, 0x46, 0x1a, 0x79, 0x43, 0x39, 0xd6, 0x26,
0xa6, 0x38, 0xd1, 0xa4, 0xd5, 0x55, 0x66, 0x05, 0x4b, 0x9c, 0x68, 0xe7, 0x11, 0x34, 0xd2, 0xbd,
0x21, 0x13, 0x5d, 0xab, 0xa6, 0xd8, 0xef, 0xe2, 0x00, 0x9a, 0xb0, 0x31, 0x4a, 0x34, 0x2e, 0x84,
0x8c, 0xb4, 0x2f, 0x43, 0x37, 0x20, 0x45, 0x75, 0x9e, 0x7e, 0x3b, 0xbf, 0x2e, 0x41, 0x85, 0x82,
0x8c, 0x6d, 0x60, 0x4c, 0x47, 0x53, 0x33, 0x83, 0xd2, 0x1e, 0xb3, 0x31, 0x0d, 0x94, 0x3d, 0x69,
0x48, 0x63, 0x26, 0xad, 0x62, 0x7c, 0x05, 0xc2, 0xd3, 0x52, 0x59, 0x3b, 0xe9, 0x37, 0xda, 0x1f,
0x61, 0x8e, 0x99, 0x2d, 0x27, 0x9a, 0xdd, 0x81, 0xaa, 0xa4, 0xc4, 0xa0, 0x5d, 0xff, 0x8e, 0x74,
0xb1, 0x10, 0x54, 0xae, 0x84, 0x3b, 0x92, 0x61, 0x30, 0xa3, 0x58, 0xa8, 0xf3, 0xf4, 0x1b, 0x43,
0x95, 0x32, 0xe1, 0xe9, 0x2c, 0x32, 0x07, 0x63, 0xcb, 0x84, 0xea, 0x93, 0x84, 0xc9, 0x33, 0x39,
0x1e, 0x7d, 0x4f, 0x27, 0xd1, 0x38, 0x1e, 0x44, 0xba, 0x73, 0x3d, 0x0b, 0xaa, 0x84, 0xc7, 0x53,
0x29, 0x22, 0x3d, 0xd7, 0x3b, 0x15, 0x88, 0xbc, 0x91, 0x21, 0xf7, 0x2d, 0x8f, 0xa7, 0xd2, 0x2c,
0x57, 0x10, 0xfa, 0x26, 0x41, 0x73, 0xb9, 0x82, 0xd8, 0x4c, 0x8e, 0x31, 0x36, 0x1c, 0x1e, 0x20,
0xf2, 0xad, 0xec, 0x7c, 0x36, 0x1c, 0x6e, 0x25, 0x66, 0xb6, 0xf1, 0x34, 0xd0, 0xfd, 0x6e, 0xe7,
0x6d, 0xb3, 0x94, 0xc9, 0xb7, 0xb3, 0x96, 0x4d, 0x00, 0x97, 0x35, 0xf6, 0x7f, 0x6a, 0xe2, 0xa5,
0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x5c, 0xbc, 0x14, 0x06, 0xf7, 0xa0, 0x16, 0x9f, 0xba, 0xca,
0x0f, 0x4f, 0x68, 0x87, 0x5a, 0x3b, 0xd7, 0xd3, 0x19, 0x0d, 0x0d, 0x1f, 0xbd, 0x48, 0x30, 0x8e,
0x4c, 0x42, 0xea, 0x2a, 0x5d, 0x6d, 0x28, 0x4d, 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7,
0xc4, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xff, 0x26, 0x72, 0x64, 0xaa, 0xde, 0x0a, 0x27, 0x7a,
0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0x6b, 0xf3, 0x5f, 0xb1, 0xf6, 0xcb, 0x02, 0xd4, 0x93,
0x52, 0x8d, 0x05, 0xc3, 0x1f, 0x89, 0x50, 0xfb, 0x63, 0x5f, 0x28, 0x6b, 0x38, 0xc7, 0x61, 0xf7,
0xa0, 0xe2, 0x6a, 0xad, 0x92, 0x63, 0xf8, 0xed, 0x7c, 0x9d, 0xdf, 0xda, 0x45, 0x49, 0x2f, 0xd4,
0x6a, 0xc6, 0x0d, 0x6a, 0xf5, 0x63, 0x80, 0x8c, 0x89, 0xbe, 0x9e, 0x89, 0x99, 0xd5, 0x8a, 0x24,
0xbb, 0x01, 0x95, 0xcf, 0xdd, 0x60, 0x9a, 0x64, 0xa4, 0xf9, 0x78, 0x50, 0xfc, 0xb8, 0xe0, 0xfc,
0xa1, 0x08, 0x35, 0x5b, 0xf7, 0xd9, 0x5d, 0xa8, 0x51, 0xdd, 0xb7, 0x1e, 0x5d, 0x9d, 0x7e, 0x09,
0x84, 0x6d, 0xa7, 0x0d, 0x4d, 0xce, 0x47, 0xab, 0xca, 0x34, 0x36, 0xd6, 0xc7, 0xac, 0xbd, 0x29,
0x8d, 0xc4, 0xd8, 0x76, 0x2e, 0x2d, 0xea, 0x13, 0xc4, 0xd8, 0x0f, 0x7d, 0x5c, 0x1f, 0x8e, 0x22,
0x76, 0x37, 0x99, 0x75, 0x99, 0x34, 0xbe, 0x95, 0xd7, 0x78, 0x79, 0xd2, 0x7d, 0x68, 0xe6, 0xcc,
0x5c, 0x31, 0xeb, 0xf7, 0xf3, 0xb3, 0xb6, 0x26, 0x49, 0x9d, 0x69, 0xbb, 0xb2, 0x55, 0xf8, 0x37,
0xd6, 0xef, 0x23, 0x80, 0x4c, 0xe5, 0xf7, 0x3f, 0xbe, 0x9c, 0xdf, 0x97, 0x00, 0x06, 0x11, 0x56,
0xb1, 0x91, 0x4b, 0x75, 0x77, 0xd9, 0x3f, 0x09, 0xa5, 0x12, 0x2f, 0x28, 0xcd, 0x69, 0x7c, 0x9d,
0x37, 0x0d, 0x8f, 0x32, 0x86, 0xed, 0x42, 0x73, 0x24, 0x62, 0x4f, 0xf9, 0x14, 0x50, 0x76, 0xd1,
0x6f, 0xe1, 0x9c, 0x32, 0x3d, 0x5b, 0xdd, 0x0c, 0x61, 0xd6, 0x2a, 0x3f, 0x86, 0xed, 0xc0, 0xb2,
0xb8, 0x88, 0xa4, 0xd2, 0xd6, 0x8a, 0x69, 0x0f, 0xaf, 0x99, 0x46, 0x13, 0xf9, 0x64, 0x89, 0x37,
0x45, 0xf6, 0xc1, 0x5c, 0x28, 0x7b, 0x6e, 0x14, 0xdb, 0xa2, 0xdc, 0x59, 0xb0, 0xb7, 0xef, 0x46,
0x66, 0xd1, 0xf6, 0x3e, 0xc4, 0xb9, 0xfe, 0xfc, 0x2f, 0xb7, 0xee, 0xe4, 0x3a, 0x99, 0x89, 0x3c,
0x9e, 0x6d, 0x53, 0xbc, 0x9c, 0xf9, 0x7a, 0x7b, 0xaa, 0xfd, 0x60, 0xdb, 0x8d, 0x7c, 0x54, 0x87,
0x03, 0xfb, 0x5d, 0x4e, 0xaa, 0xd9, 0xc7, 0xd0, 0x8a, 0x94, 0x3c, 0x51, 0x22, 0x8e, 0x5f, 0x50,
0x5d, 0xb3, 0xfd, 0xe6, 0x1b, 0xb6, 0xfe, 0x92, 0xe4, 0x13, 0x14, 0xf0, 0x95, 0x28, 0xff, 0xb9,
0xfa, 0x43, 0x68, 0x2f, 0xce, 0xf8, 0x75, 0x76, 0x6f, 0xf5, 0x3e, 0x34, 0xd2, 0x19, 0xbc, 0x6a,
0x60, 0x3d, 0xbf, 0xed, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x23, 0xbb, 0x0f, 0x8d, 0x40, 0x7a, 0x2e,
0x3a, 0x90, 0xf4, 0xf6, 0xef, 0x64, 0xe9, 0xba, 0xf5, 0x38, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18,
0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e,
0x82, 0xd6, 0xbc, 0x8a, 0x2b, 0xfc, 0x7c, 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x6e,
0xdf, 0x87, 0x46, 0xca, 0x67, 0x9b, 0x97, 0x1d, 0x5f, 0xce, 0x8f, 0xcc, 0xf9, 0xea, 0x04, 0x00,
0x99, 0x6b, 0x78, 0xcc, 0xe1, 0x25, 0x22, 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d,
0x72, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, 0x46, 0x69, 0xaa, 0x7f, 0xc7, 0x01, 0x90, 0x43, 0x38,
0x03, 0xa8, 0x27, 0x4e, 0xb0, 0x75, 0x68, 0xc6, 0xd6, 0x32, 0xf6, 0xba, 0x68, 0xae, 0xc2, 0xf3,
0x2c, 0xec, 0x59, 0x95, 0x1b, 0x9e, 0x88, 0xb9, 0x9e, 0x95, 0x23, 0x87, 0x5b, 0x81, 0xf3, 0x19,
0x54, 0x88, 0x81, 0x09, 0x1a, 0x6b, 0x57, 0x69, 0xdb, 0xfe, 0x9a, 0x0e, 0x4f, 0xc6, 0x64, 0x76,
0xaf, 0x8c, 0x21, 0xcc, 0x0d, 0x80, 0xbd, 0x8f, 0x7d, 0xe4, 0xc8, 0xae, 0xe8, 0x55, 0x38, 0x14,
0x3b, 0x3f, 0x80, 0x7a, 0xc2, 0xc6, 0x99, 0x3f, 0xf6, 0x43, 0x61, 0x5d, 0x24, 0x1a, 0xaf, 0x0d,
0xfb, 0xa7, 0xae, 0x72, 0x3d, 0x2d, 0x4c, 0x9b, 0x52, 0xe1, 0x19, 0xc3, 0x79, 0x0f, 0x9a, 0xb9,
0xbc, 0xc3, 0x70, 0x7b, 0x4e, 0xdb, 0x68, 0xb2, 0xdf, 0x7c, 0x38, 0x9f, 0xc0, 0xca, 0x5c, 0x0e,
0x60, 0xb1, 0xf2, 0x47, 0x49, 0xb1, 0x32, 0x85, 0xe8, 0x52, 0xb7, 0xc5, 0xa0, 0x7c, 0x2e, 0xdc,
0x33, 0xdb, 0x69, 0x11, 0xed, 0xfc, 0x16, 0x6f, 0x47, 0x49, 0x0f, 0xfb, 0xbf, 0x00, 0xa7, 0x5a,
0x47, 0x2f, 0xa8, 0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad,
0xdc, 0xa8, 0xa6, 0x11, 0xb1, 0x01, 0xfc, 0x0f, 0x34, 0xc6, 0xe9, 0xf0, 0x92, 0x8d, 0x81, 0x64,
0xf4, 0x3b, 0x50, 0x0f, 0xa5, 0x95, 0x99, 0x1e, 0xbb, 0x16, 0xca, 0x74, 0x9c, 0x1b, 0x04, 0x56,
0x56, 0x31, 0xe3, 0xdc, 0x20, 0x20, 0xa1, 0x73, 0x07, 0xde, 0xb8, 0x74, 0xcf, 0x63, 0x6f, 0x41,
0x75, 0xec, 0x07, 0x9a, 0x8a, 0x12, 0xf6, 0xf4, 0xf6, 0xcb, 0xf9, 0x47, 0x01, 0x20, 0x8b, 0x1f,
0xcc, 0x0a, 0xac, 0x2e, 0x88, 0x59, 0x36, 0xd5, 0x24, 0x80, 0xfa, 0xc4, 0x9e, 0x53, 0x36, 0x32,
0x6e, 0xce, 0xc7, 0xdc, 0x56, 0x72, 0x8c, 0x99, 0x13, 0x6c, 0xc7, 0x9e, 0x60, 0xaf, 0x73, 0x17,
0x4b, 0x2d, 0x50, 0xa3, 0x95, 0xbf, 0x9a, 0x43, 0x96, 0xce, 0xdc, 0x4a, 0x56, 0x1f, 0xc1, 0xca,
0x9c, 0xc9, 0xef, 0x59, 0xb3, 0xb2, 0xf3, 0x36, 0x9f, 0xcb, 0x3b, 0x50, 0x35, 0x77, 0x7a, 0xb6,
0x01, 0x35, 0xd7, 0x33, 0x69, 0x9c, 0x3b, 0x4a, 0x50, 0xb8, 0x4b, 0x6c, 0x9e, 0x88, 0x9d, 0x3f,
0x15, 0x01, 0x32, 0xfe, 0x6b, 0x74, 0xdb, 0x0f, 0xa0, 0x15, 0x0b, 0x4f, 0x86, 0x23, 0x57, 0xcd,
0x48, 0x6a, 0x2f, 0x9d, 0x57, 0x0d, 0x59, 0x40, 0xe6, 0x3a, 0xef, 0xd2, 0xab, 0x3b, 0xef, 0x0d,
0x28, 0x7b, 0x32, 0x9a, 0xd9, 0xd2, 0xc4, 0xe6, 0x27, 0xb2, 0x2f, 0xa3, 0xd9, 0xc1, 0x12, 0x27,
0x04, 0xdb, 0x82, 0xea, 0xe4, 0x8c, 0x5e, 0x39, 0xcc, 0x6d, 0xed, 0xc6, 0x3c, 0xf6, 0xc9, 0x19,
0xd2, 0x07, 0x4b, 0xdc, 0xa2, 0xd8, 0x1d, 0xa8, 0x4c, 0xce, 0x46, 0xbe, 0xb2, 0xc5, 0xe5, 0xfa,
0x22, 0xbc, 0xeb, 0x2b, 0x7a, 0xd4, 0x40, 0x0c, 0x73, 0xa0, 0xa8, 0x26, 0xf6, 0x49, 0xa3, 0xbd,
0xb0, 0x9a, 0x93, 0x83, 0x25, 0x5e, 0x54, 0x93, 0xbd, 0x3a, 0x54, 0xcd, 0xba, 0x3a, 0x7f, 0x2f,
0x41, 0x6b, 0xde, 0x4b, 0xdc, 0xd9, 0x58, 0x79, 0xc9, 0xce, 0xc6, 0xca, 0x4b, 0x2f, 0x25, 0xc5,
0xdc, 0xa5, 0xc4, 0x81, 0x8a, 0x3c, 0x0f, 0x85, 0xca, 0x3f, 0xe7, 0xec, 0x9f, 0xca, 0xf3, 0x10,
0x1b, 0x63, 0x23, 0x9a, 0xeb, 0x33, 0x2b, 0xb6, 0xcf, 0x7c, 0x1f, 0x56, 0xc6, 0x32, 0x08, 0xe4,
0xf9, 0x70, 0x36, 0x09, 0xfc, 0xf0, 0xcc, 0x36, 0x9b, 0xf3, 0x4c, 0xb6, 0x01, 0xd7, 0x46, 0xbe,
0x42, 0x77, 0xf6, 0x65, 0xa8, 0x45, 0x48, 0x97, 0x55, 0xc4, 0x2d, 0xb2, 0xd9, 0xa7, 0xb0, 0xee,
0x6a, 0x2d, 0x26, 0x91, 0x7e, 0x16, 0x46, 0xae, 0x77, 0xd6, 0x95, 0x1e, 0x65, 0xe1, 0x24, 0x72,
0xb5, 0x7f, 0xec, 0x07, 0x78, 0x89, 0xaf, 0xd1, 0xd0, 0x57, 0xe2, 0xd8, 0x07, 0xd0, 0xf2, 0x94,
0x70, 0xb5, 0xe8, 0x8a, 0x58, 0x1f, 0xb9, 0xfa, 0xb4, 0x53, 0xa7, 0x91, 0x0b, 0x5c, 0x9c, 0x83,
0x8b, 0xde, 0x7e, 0xe6, 0x07, 0x23, 0x0f, 0xaf, 0x97, 0x0d, 0x33, 0x87, 0x39, 0x26, 0xdb, 0x02,
0x46, 0x8c, 0xde, 0x24, 0xd2, 0xb3, 0x14, 0x0a, 0x04, 0xbd, 0x42, 0x82, 0x07, 0xae, 0xf6, 0x27,
0x22, 0xd6, 0xee, 0x24, 0xa2, 0xf7, 0xa3, 0x12, 0xcf, 0x18, 0xec, 0x36, 0xb4, 0xfd, 0xd0, 0x0b,
0xa6, 0x23, 0xf1, 0x22, 0xc2, 0x89, 0xa8, 0x30, 0xee, 0x2c, 0xd3, 0xa9, 0x72, 0xcd, 0xf2, 0x8f,
0x2c, 0x1b, 0xa1, 0xe2, 0x62, 0x01, 0xba, 0x62, 0xa0, 0x96, 0x9f, 0x40, 0x9d, 0x2f, 0x0a, 0xd0,
0x5e, 0x0c, 0x3c, 0xdc, 0xb6, 0x08, 0x27, 0x6f, 0x2f, 0xd7, 0x48, 0xa7, 0x5b, 0x59, 0xcc, 0x6d,
0x65, 0x52, 0x2f, 0x4b, 0xb9, 0x7a, 0x99, 0x86, 0x45, 0xf9, 0xbb, 0xc3, 0x62, 0x6e, 0xa2, 0x95,
0x85, 0x89, 0x3a, 0xbf, 0x29, 0xc0, 0xb5, 0x85, 0xe0, 0xfe, 0xde, 0x1e, 0xad, 0x43, 0x73, 0xe2,
0x9e, 0x09, 0xf3, 0xb8, 0x10, 0xdb, 0x12, 0x92, 0x67, 0xfd, 0x07, 0xfc, 0x0b, 0x61, 0x39, 0x9f,
0x51, 0x57, 0xfa, 0x96, 0x04, 0xc8, 0xa1, 0xd4, 0x0f, 0xe5, 0xd4, 0xd6, 0xe2, 0x24, 0x40, 0x12,
0xe6, 0xe5, 0x30, 0x2a, 0x5d, 0x11, 0x46, 0xce, 0x21, 0xd4, 0x13, 0x07, 0xd9, 0x2d, 0xfb, 0xfa,
0x53, 0xc8, 0x1e, 0x35, 0x9f, 0xc5, 0x42, 0xa1, 0xef, 0xe6, 0x29, 0xe8, 0x5d, 0xa8, 0x98, 0x36,
0xb4, 0x78, 0x19, 0x61, 0x24, 0xce, 0x10, 0x6a, 0x96, 0xc3, 0x36, 0xa1, 0x7a, 0x3c, 0x4b, 0xdf,
0x51, 0xec, 0x71, 0x81, 0xdf, 0x23, 0x8b, 0xc0, 0x33, 0xc8, 0x20, 0xd8, 0x0d, 0x28, 0x1f, 0xcf,
0xfa, 0x5d, 0x73, 0xb1, 0xc4, 0x93, 0x0c, 0xbf, 0xf6, 0xaa, 0xc6, 0x21, 0xe7, 0x31, 0x2c, 0xe7,
0xc7, 0xa5, 0x85, 0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd5, 0x0d, 0xe3, 0x23, 0x00,
0x7a, 0xab, 0x7d, 0xdd, 0x9b, 0xc9, 0xff, 0x43, 0xcd, 0xbe, 0xf1, 0xb2, 0x0f, 0x16, 0xde, 0xac,
0x5b, 0xe9, 0x03, 0xf0, 0xdc, 0xc3, 0xb5, 0xf3, 0x00, 0x7b, 0xd4, 0x73, 0xa1, 0xba, 0xfe, 0x78,
0xfc, 0xba, 0xe6, 0x1e, 0x40, 0xeb, 0x59, 0x14, 0xfd, 0x6b, 0x63, 0x7f, 0x02, 0x55, 0xf3, 0xd4,
0x8c, 0x63, 0x02, 0xf4, 0xc0, 0xee, 0x01, 0x33, 0x7d, 0x6c, 0xde, 0x25, 0x6e, 0x00, 0x88, 0x9c,
0xa2, 0x3d, 0xbb, 0xb9, 0x84, 0x9c, 0x77, 0x80, 0x1b, 0xc0, 0xe6, 0x06, 0xd4, 0xec, 0xab, 0x26,
0x6b, 0x40, 0xe5, 0xd9, 0xe1, 0xb0, 0xf7, 0xb4, 0xbd, 0xc4, 0xea, 0x50, 0x3e, 0x18, 0x0c, 0x9f,
0xb6, 0x0b, 0x48, 0x1d, 0x0e, 0x0e, 0x7b, 0xed, 0xe2, 0xe6, 0x6d, 0x58, 0xce, 0xbf, 0x6b, 0xb2,
0x26, 0xd4, 0x86, 0xbb, 0x87, 0xdd, 0xbd, 0xc1, 0x8f, 0xdb, 0x4b, 0x6c, 0x19, 0xea, 0xfd, 0xc3,
0x61, 0x6f, 0xff, 0x19, 0xef, 0xb5, 0x0b, 0x9b, 0x3f, 0x82, 0x46, 0xfa, 0x50, 0x84, 0x1a, 0xf6,
0xfa, 0x87, 0xdd, 0xf6, 0x12, 0x03, 0xa8, 0x0e, 0x7b, 0xfb, 0xbc, 0x87, 0x7a, 0x6b, 0x50, 0x1a,
0x0e, 0x0f, 0xda, 0x45, 0xb4, 0xba, 0xbf, 0xbb, 0x7f, 0xd0, 0x6b, 0x97, 0x90, 0x7c, 0xfa, 0xe4,
0xe8, 0xe1, 0xb0, 0x5d, 0xde, 0xfc, 0x08, 0xae, 0x2d, 0x3c, 0xa1, 0xd0, 0xe8, 0x83, 0x5d, 0xde,
0x43, 0x4d, 0x4d, 0xa8, 0x1d, 0xf1, 0xfe, 0xf3, 0xdd, 0xa7, 0xbd, 0x76, 0x01, 0x05, 0x8f, 0x07,
0xfb, 0x8f, 0x7a, 0xdd, 0x76, 0x71, 0xef, 0xe6, 0x97, 0x2f, 0xd7, 0x0a, 0x5f, 0xbd, 0x5c, 0x2b,
0x7c, 0xfd, 0x72, 0xad, 0xf0, 0xd7, 0x97, 0x6b, 0x85, 0x2f, 0xbe, 0x5d, 0x5b, 0xfa, 0xea, 0xdb,
0xb5, 0xa5, 0xaf, 0xbf, 0x5d, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0x1f, 0xfe, 0x33, 0x00, 0x00,
0xff, 0xff, 0x92, 0xc4, 0x20, 0x2a, 0xec, 0x18, 0x00, 0x00,
// 2535 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x5b, 0xc7,
0x11, 0x17, 0xff, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xa8, 0xae, 0xac, 0xbc, 0xa4, 0x81,
0x2c, 0xdb, 0x12, 0xaa, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, 0xb0,
0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, 0x23,
0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x2f, 0xd1, 0x63, 0x7b, 0x0f,
0xd0, 0x4b, 0x0e, 0x3d, 0x04, 0x3d, 0xa4, 0x85, 0x73, 0xe9, 0x87, 0x68, 0x81, 0x62, 0x66, 0xf7,
0xfd, 0x21, 0xa5, 0xc0, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0xbf, 0x9d, 0x99, 0xdd, 0x9d, 0xd9,
0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xaf, 0xde, 0x3b,
0xf1, 0xf5, 0xe9, 0xf4, 0x78, 0xcb, 0x93, 0x93, 0xed, 0x13, 0x79, 0x22, 0xb7, 0x49, 0x74, 0x3c,
0x1d, 0xd3, 0x17, 0x7d, 0x10, 0x65, 0x86, 0x38, 0xff, 0x28, 0x42, 0x71, 0x10, 0xb1, 0xb7, 0xa1,
0xea, 0x87, 0xd1, 0x54, 0xc7, 0x9d, 0xc2, 0x7a, 0x69, 0xa3, 0xb9, 0xd3, 0xd8, 0x8a, 0x8e, 0xb7,
0xfa, 0xc8, 0xe1, 0x56, 0xc0, 0xd6, 0xa1, 0x2c, 0x2e, 0x84, 0xd7, 0x29, 0xae, 0x17, 0x36, 0x9a,
0x3b, 0x80, 0x80, 0xde, 0x85, 0xf0, 0x06, 0xd1, 0xc1, 0x12, 0x27, 0x09, 0x7b, 0x0f, 0xaa, 0xb1,
0x9c, 0x2a, 0x4f, 0x74, 0x4a, 0x84, 0x59, 0x46, 0xcc, 0x90, 0x38, 0x84, 0xb2, 0x52, 0xd4, 0x34,
0xf6, 0x03, 0xd1, 0x29, 0x67, 0x9a, 0x1e, 0xfa, 0x81, 0xc1, 0x90, 0x84, 0xbd, 0x03, 0x95, 0xe3,
0xa9, 0x1f, 0x8c, 0x3a, 0x15, 0x82, 0x34, 0x11, 0xb2, 0x87, 0x0c, 0xc2, 0x18, 0x19, 0x82, 0x26,
0x42, 0x9d, 0x88, 0x4e, 0x35, 0x03, 0x3d, 0x46, 0x86, 0x01, 0x91, 0x0c, 0x6d, 0x8d, 0xfc, 0xf1,
0xb8, 0x53, 0xcb, 0x6c, 0x75, 0xfd, 0xf1, 0xd8, 0xd8, 0x42, 0x09, 0xdb, 0x80, 0x7a, 0x14, 0xb8,
0x7a, 0x2c, 0xd5, 0xa4, 0x03, 0x99, 0xdf, 0x47, 0x96, 0xc7, 0x53, 0x29, 0xbb, 0x0f, 0x4d, 0x4f,
0x86, 0xb1, 0x56, 0xae, 0x1f, 0xea, 0xb8, 0xd3, 0x24, 0xf0, 0xeb, 0x08, 0xfe, 0x54, 0xaa, 0x33,
0xa1, 0xf6, 0x33, 0x21, 0xcf, 0x23, 0xf7, 0xca, 0x50, 0x94, 0x91, 0xf3, 0x9b, 0x02, 0xd4, 0x13,
0xad, 0xcc, 0x81, 0xe5, 0x5d, 0xe5, 0x9d, 0xfa, 0x5a, 0x78, 0x7a, 0xaa, 0x44, 0xa7, 0xb0, 0x5e,
0xd8, 0x68, 0xf0, 0x39, 0x1e, 0x6b, 0x41, 0x71, 0x30, 0xa4, 0xf5, 0x6e, 0xf0, 0xe2, 0x60, 0xc8,
0x3a, 0x50, 0x7b, 0xe6, 0x2a, 0xdf, 0x0d, 0x35, 0x2d, 0x70, 0x83, 0x27, 0x9f, 0xec, 0x26, 0x34,
0x06, 0xc3, 0x67, 0x42, 0xc5, 0xbe, 0x0c, 0x69, 0x59, 0x1b, 0x3c, 0x63, 0xb0, 0x35, 0x80, 0xc1,
0xf0, 0xa1, 0x70, 0x51, 0x69, 0xdc, 0xa9, 0xac, 0x97, 0x36, 0x1a, 0x3c, 0xc7, 0x71, 0x7e, 0x01,
0x15, 0xda, 0x6a, 0xf6, 0x31, 0x54, 0x47, 0xfe, 0x89, 0x88, 0xb5, 0x71, 0x67, 0x6f, 0xe7, 0x8b,
0xaf, 0x6f, 0x2d, 0xfd, 0xf5, 0xeb, 0x5b, 0x9b, 0xb9, 0x98, 0x92, 0x91, 0x08, 0x3d, 0x19, 0x6a,
0xd7, 0x0f, 0x85, 0x8a, 0xb7, 0x4f, 0xe4, 0x3d, 0x33, 0x64, 0xab, 0x4b, 0x3f, 0xdc, 0x6a, 0x60,
0xb7, 0xa1, 0xe2, 0x87, 0x23, 0x71, 0x41, 0xfe, 0x97, 0xf6, 0xae, 0x5b, 0x55, 0xcd, 0xc1, 0x54,
0x47, 0x53, 0xdd, 0x47, 0x11, 0x37, 0x08, 0xe7, 0xcf, 0x05, 0xa8, 0x9a, 0x50, 0x62, 0x37, 0xa1,
0x3c, 0x11, 0xda, 0x25, 0xfb, 0xcd, 0x9d, 0xba, 0xd9, 0x52, 0xed, 0x72, 0xe2, 0x62, 0x94, 0x4e,
0xe4, 0x14, 0xd7, 0xbe, 0x98, 0x45, 0xe9, 0x63, 0xe4, 0x70, 0x2b, 0x60, 0x3f, 0x80, 0x5a, 0x28,
0xf4, 0xb9, 0x54, 0x67, 0xb4, 0x46, 0x2d, 0x13, 0x16, 0x87, 0x42, 0x3f, 0x96, 0x23, 0xc1, 0x13,
0x19, 0xbb, 0x0b, 0xf5, 0x58, 0x78, 0x53, 0xe5, 0xeb, 0x19, 0xad, 0x57, 0x6b, 0xa7, 0x4d, 0xc1,
0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x0e, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x19, 0xad,
0x5f, 0x73, 0x67, 0xc5, 0xc2, 0x95, 0xd0, 0xbd, 0xf0, 0x33, 0x9e, 0xc9, 0x9d, 0x5f, 0x15, 0xa1,
0x8c, 0x3e, 0x33, 0x06, 0x65, 0x57, 0x9d, 0x98, 0x8c, 0x6a, 0x70, 0xa2, 0x59, 0x1b, 0x4a, 0xa8,
0xa3, 0x48, 0x2c, 0x24, 0x91, 0xe3, 0x9d, 0x8f, 0xec, 0x86, 0x22, 0x89, 0xe3, 0xa6, 0xb1, 0x50,
0x76, 0x1f, 0x89, 0x66, 0xb7, 0xa1, 0x11, 0x29, 0x79, 0x31, 0x7b, 0x6e, 0x3c, 0xc8, 0xa2, 0x14,
0x99, 0xe8, 0x40, 0x3d, 0xb2, 0x14, 0xdb, 0x04, 0x10, 0x17, 0x5a, 0xb9, 0x07, 0x32, 0xd6, 0x71,
0xa7, 0x4a, 0xde, 0x52, 0xdc, 0x23, 0xa3, 0x7f, 0xc4, 0x73, 0x52, 0xb6, 0x0a, 0xf5, 0x53, 0x19,
0xeb, 0xd0, 0x9d, 0x08, 0xca, 0x90, 0x06, 0x4f, 0xbf, 0x99, 0x03, 0xd5, 0x69, 0xe0, 0x4f, 0x7c,
0xdd, 0x69, 0x64, 0x3a, 0x9e, 0x12, 0x87, 0x5b, 0x09, 0x46, 0xb1, 0x77, 0xa2, 0xe4, 0x34, 0x3a,
0x72, 0x95, 0x08, 0x35, 0xe5, 0x4f, 0x83, 0xcf, 0xf1, 0x9c, 0xbb, 0x50, 0x35, 0x96, 0x71, 0x62,
0x48, 0xd9, 0x58, 0x27, 0x1a, 0x63, 0xbc, 0x7f, 0x94, 0xc4, 0x78, 0xff, 0xc8, 0xe9, 0x42, 0xd5,
0xd8, 0x40, 0xf4, 0x21, 0xfa, 0x65, 0xd1, 0x48, 0x23, 0x6f, 0x28, 0xc7, 0xda, 0xc4, 0x14, 0x27,
0x9a, 0xb4, 0xba, 0xca, 0xac, 0x60, 0x89, 0x13, 0xed, 0x3c, 0x82, 0x46, 0xba, 0x37, 0x64, 0xa2,
0x6b, 0xd5, 0x14, 0xfb, 0x5d, 0x1c, 0x40, 0x13, 0x36, 0x46, 0x89, 0xc6, 0x85, 0x90, 0x91, 0xf6,
0x65, 0xe8, 0x06, 0xa4, 0xa8, 0xce, 0xd3, 0x6f, 0xe7, 0xb7, 0x25, 0xa8, 0x50, 0x90, 0xb1, 0x0d,
0x8c, 0xe9, 0x68, 0x6a, 0x66, 0x50, 0xda, 0x63, 0x36, 0xa6, 0x81, 0xb2, 0x27, 0x0d, 0x69, 0xcc,
0xa4, 0x55, 0x8c, 0xaf, 0x40, 0x78, 0x5a, 0x2a, 0x6b, 0x27, 0xfd, 0x46, 0xfb, 0x23, 0xcc, 0x31,
0xb3, 0xe5, 0x44, 0xb3, 0x3b, 0x50, 0x95, 0x94, 0x18, 0xb4, 0xeb, 0xdf, 0x92, 0x2e, 0x16, 0x82,
0xca, 0x95, 0x70, 0x47, 0x32, 0x0c, 0x66, 0x14, 0x0b, 0x75, 0x9e, 0x7e, 0x63, 0xa8, 0x52, 0x26,
0x3c, 0x99, 0x45, 0xe6, 0x60, 0x6c, 0x99, 0x50, 0x7d, 0x9c, 0x30, 0x79, 0x26, 0xc7, 0xa3, 0xef,
0xc9, 0x24, 0x1a, 0xc7, 0x83, 0x48, 0x77, 0xae, 0x67, 0x41, 0x95, 0xf0, 0x78, 0x2a, 0x45, 0xa4,
0xe7, 0x7a, 0xa7, 0x02, 0x91, 0x37, 0x32, 0xe4, 0xbe, 0xe5, 0xf1, 0x54, 0x9a, 0xe5, 0x0a, 0x42,
0x5f, 0x27, 0x68, 0x2e, 0x57, 0x10, 0x9b, 0xc9, 0x31, 0xc6, 0x86, 0xc3, 0x03, 0x44, 0xbe, 0x91,
0x9d, 0xcf, 0x86, 0xc3, 0xad, 0xc4, 0xcc, 0x36, 0x9e, 0x06, 0xba, 0xdf, 0xed, 0xbc, 0x69, 0x96,
0x32, 0xf9, 0x76, 0xd6, 0xb2, 0x09, 0xe0, 0xb2, 0xc6, 0xfe, 0xcf, 0x4d, 0xbc, 0x94, 0x38, 0xd1,
0x4e, 0x1f, 0xea, 0x89, 0x8b, 0x97, 0xc2, 0xe0, 0x1e, 0xd4, 0xe2, 0x53, 0x57, 0xf9, 0xe1, 0x09,
0xed, 0x50, 0x6b, 0xe7, 0x7a, 0x3a, 0xa3, 0xa1, 0xe1, 0xa3, 0x17, 0x09, 0xc6, 0x91, 0x49, 0x48,
0x5d, 0xa5, 0xab, 0x0d, 0xa5, 0xa9, 0x3f, 0x22, 0x3d, 0x2b, 0x1c, 0x49, 0xe4, 0x9c, 0xf8, 0x26,
0x28, 0x57, 0x38, 0x92, 0xe8, 0xdf, 0x44, 0x8e, 0x4c, 0xd5, 0x5b, 0xe1, 0x44, 0xcf, 0x85, 0x5d,
0x65, 0x21, 0xec, 0x82, 0x64, 0x6d, 0xfe, 0x2f, 0xd6, 0x7e, 0x5d, 0x80, 0x7a, 0x52, 0xaa, 0xb1,
0x60, 0xf8, 0x23, 0x11, 0x6a, 0x7f, 0xec, 0x0b, 0x65, 0x0d, 0xe7, 0x38, 0xec, 0x1e, 0x54, 0x5c,
0xad, 0x55, 0x72, 0x0c, 0xbf, 0x99, 0xaf, 0xf3, 0x5b, 0xbb, 0x28, 0xe9, 0x85, 0x5a, 0xcd, 0xb8,
0x41, 0xad, 0x7e, 0x08, 0x90, 0x31, 0xd1, 0xd7, 0x33, 0x31, 0xb3, 0x5a, 0x91, 0x64, 0x37, 0xa0,
0xf2, 0x99, 0x1b, 0x4c, 0x93, 0x8c, 0x34, 0x1f, 0x0f, 0x8a, 0x1f, 0x16, 0x9c, 0x3f, 0x15, 0xa1,
0x66, 0xeb, 0x3e, 0xbb, 0x0b, 0x35, 0xaa, 0xfb, 0xd6, 0xa3, 0xab, 0xd3, 0x2f, 0x81, 0xb0, 0xed,
0xb4, 0xa1, 0xc9, 0xf9, 0x68, 0x55, 0x99, 0xc6, 0xc6, 0xfa, 0x98, 0xb5, 0x37, 0xa5, 0x91, 0x18,
0xdb, 0xce, 0xa5, 0x45, 0x7d, 0x82, 0x18, 0xfb, 0xa1, 0x8f, 0xeb, 0xc3, 0x51, 0xc4, 0xee, 0x26,
0xb3, 0x2e, 0x93, 0xc6, 0x37, 0xf2, 0x1a, 0x2f, 0x4f, 0xba, 0x0f, 0xcd, 0x9c, 0x99, 0x2b, 0x66,
0xfd, 0x6e, 0x7e, 0xd6, 0xd6, 0x24, 0xa9, 0x33, 0x6d, 0x57, 0xb6, 0x0a, 0xff, 0xc5, 0xfa, 0x7d,
0x00, 0x90, 0xa9, 0xfc, 0xee, 0xc7, 0x97, 0xf3, 0xc7, 0x12, 0xc0, 0x20, 0xc2, 0x2a, 0x36, 0x72,
0xa9, 0xee, 0x2e, 0xfb, 0x27, 0xa1, 0x54, 0xe2, 0x39, 0xa5, 0x39, 0x8d, 0xaf, 0xf3, 0xa6, 0xe1,
0x51, 0xc6, 0xb0, 0x5d, 0x68, 0x8e, 0x44, 0xec, 0x29, 0x9f, 0x02, 0xca, 0x2e, 0xfa, 0x2d, 0x9c,
0x53, 0xa6, 0x67, 0xab, 0x9b, 0x21, 0xcc, 0x5a, 0xe5, 0xc7, 0xb0, 0x1d, 0x58, 0x16, 0x17, 0x91,
0x54, 0xda, 0x5a, 0x31, 0xed, 0xe1, 0x35, 0xd3, 0x68, 0x22, 0x9f, 0x2c, 0xf1, 0xa6, 0xc8, 0x3e,
0x98, 0x0b, 0x65, 0xcf, 0x8d, 0x62, 0x5b, 0x94, 0x3b, 0x0b, 0xf6, 0xf6, 0xdd, 0xc8, 0x2c, 0xda,
0xde, 0xfb, 0x38, 0xd7, 0x5f, 0xfe, 0xed, 0xd6, 0x9d, 0x5c, 0x27, 0x33, 0x91, 0xc7, 0xb3, 0x6d,
0x8a, 0x97, 0x33, 0x5f, 0x6f, 0x4f, 0xb5, 0x1f, 0x6c, 0xbb, 0x91, 0x8f, 0xea, 0x70, 0x60, 0xbf,
0xcb, 0x49, 0x35, 0xfb, 0x10, 0x5a, 0x91, 0x92, 0x27, 0x4a, 0xc4, 0xf1, 0x73, 0xaa, 0x6b, 0xb6,
0xdf, 0x7c, 0xcd, 0xd6, 0x5f, 0x92, 0x7c, 0x84, 0x02, 0xbe, 0x12, 0xe5, 0x3f, 0x57, 0x7f, 0x0c,
0xed, 0xc5, 0x19, 0xbf, 0xca, 0xee, 0xad, 0xde, 0x87, 0x46, 0x3a, 0x83, 0x97, 0x0d, 0xac, 0xe7,
0xb7, 0xfd, 0x0f, 0x05, 0xa8, 0x9a, 0x7c, 0x64, 0xf7, 0xa1, 0x11, 0x48, 0xcf, 0x45, 0x07, 0x92,
0xde, 0xfe, 0xad, 0x2c, 0x5d, 0xb7, 0x3e, 0x49, 0x64, 0x66, 0x3f, 0x32, 0x2c, 0x86, 0xa7, 0x1f,
0x8e, 0x65, 0x92, 0x3f, 0xad, 0x6c, 0x50, 0x3f, 0x1c, 0x4b, 0x6e, 0x84, 0xab, 0x8f, 0xa0, 0x35,
0xaf, 0xe2, 0x0a, 0x3f, 0xdf, 0x99, 0x0f, 0x74, 0xaa, 0x06, 0xe9, 0xa0, 0xbc, 0xdb, 0xf7, 0xa1,
0x91, 0xf2, 0xd9, 0xe6, 0x65, 0xc7, 0x97, 0xf3, 0x23, 0x73, 0xbe, 0x3a, 0x01, 0x40, 0xe6, 0x1a,
0x1e, 0x73, 0x78, 0x89, 0x08, 0xb3, 0xe6, 0x21, 0xfd, 0xa6, 0xda, 0xeb, 0x6a, 0x97, 0x5c, 0x59,
0xe6, 0x44, 0xb3, 0x2d, 0x80, 0x51, 0x9a, 0xea, 0xdf, 0x72, 0x00, 0xe4, 0x10, 0xce, 0x00, 0xea,
0x89, 0x13, 0x6c, 0x1d, 0x9a, 0xb1, 0xb5, 0x8c, 0xbd, 0x2e, 0x9a, 0xab, 0xf0, 0x3c, 0x0b, 0x7b,
0x56, 0xe5, 0x86, 0x27, 0x62, 0xae, 0x67, 0xe5, 0xc8, 0xe1, 0x56, 0xe0, 0x7c, 0x0a, 0x15, 0x62,
0x60, 0x82, 0xc6, 0xda, 0x55, 0xda, 0xb6, 0xbf, 0xa6, 0xc3, 0x93, 0x31, 0x99, 0xdd, 0x2b, 0x63,
0x08, 0x73, 0x03, 0x60, 0xef, 0x62, 0x1f, 0x39, 0xb2, 0x2b, 0x7a, 0x15, 0x0e, 0xc5, 0xce, 0x8f,
0xa0, 0x9e, 0xb0, 0x71, 0xe6, 0x81, 0x1f, 0x0a, 0xeb, 0x22, 0xd1, 0x78, 0x6d, 0xf0, 0x4e, 0x5d,
0xe5, 0x7a, 0x5a, 0x98, 0x36, 0xa5, 0xc2, 0x33, 0x86, 0xf3, 0x0e, 0x34, 0x73, 0x79, 0x87, 0xe1,
0xf6, 0x8c, 0xb6, 0xd1, 0x64, 0xbf, 0xf9, 0x70, 0x3e, 0x82, 0x95, 0xb9, 0x1c, 0xc0, 0x62, 0xe5,
0x8f, 0x92, 0x62, 0x65, 0x0a, 0xd1, 0xa5, 0x6e, 0x8b, 0x41, 0xf9, 0x5c, 0xb8, 0x67, 0xb6, 0xd3,
0x22, 0xda, 0xf9, 0x3d, 0xde, 0x8e, 0x92, 0x1e, 0xf6, 0xfb, 0x00, 0xa7, 0x5a, 0x47, 0xcf, 0xa9,
0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad, 0xdc, 0xa8, 0xa6,
0x11, 0xb1, 0x01, 0x7c, 0x0f, 0x1a, 0xe3, 0x74, 0x78, 0xc9, 0xc6, 0x40, 0x32, 0xfa, 0x2d, 0xa8,
0x87, 0xd2, 0xca, 0x4c, 0x8f, 0x5d, 0x0b, 0x65, 0x3a, 0xce, 0x0d, 0x02, 0x2b, 0xab, 0x98, 0x71,
0x6e, 0x10, 0x90, 0xd0, 0xb9, 0x03, 0xaf, 0x5d, 0xba, 0xe7, 0xb1, 0x37, 0xa0, 0x3a, 0xf6, 0x03,
0x4d, 0x45, 0x09, 0x7b, 0x7a, 0xfb, 0xe5, 0xfc, 0xab, 0x00, 0x90, 0xc5, 0x0f, 0x66, 0x05, 0x56,
0x17, 0xc4, 0x2c, 0x9b, 0x6a, 0x12, 0x40, 0x7d, 0x62, 0xcf, 0x29, 0x1b, 0x19, 0x37, 0xe7, 0x63,
0x6e, 0x2b, 0x39, 0xc6, 0xcc, 0x09, 0xb6, 0x63, 0x4f, 0xb0, 0x57, 0xb9, 0x8b, 0xa5, 0x16, 0xa8,
0xd1, 0xca, 0x5f, 0xcd, 0x21, 0x4b, 0x67, 0x6e, 0x25, 0xab, 0x8f, 0x60, 0x65, 0xce, 0xe4, 0x77,
0xac, 0x59, 0xd9, 0x79, 0x9b, 0xcf, 0xe5, 0x1d, 0xa8, 0x9a, 0x3b, 0x3d, 0xdb, 0x80, 0x9a, 0xeb,
0x99, 0x34, 0xce, 0x1d, 0x25, 0x28, 0xdc, 0x25, 0x36, 0x4f, 0xc4, 0xce, 0x5f, 0x8a, 0x00, 0x19,
0xff, 0x15, 0xba, 0xed, 0x07, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x97,
0xce, 0xab, 0x86, 0x2c, 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xe5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19,
0xcd, 0x6c, 0x69, 0x62, 0xf3, 0x13, 0xd9, 0x97, 0xd1, 0xec, 0x60, 0x89, 0x13, 0x82, 0x6d, 0x41,
0x75, 0x72, 0x46, 0xaf, 0x1c, 0xe6, 0xb6, 0x76, 0x63, 0x1e, 0xfb, 0xf8, 0x0c, 0xe9, 0x83, 0x25,
0x6e, 0x51, 0xec, 0x0e, 0x54, 0x26, 0x67, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x7d, 0x11, 0xde, 0xf5,
0x15, 0x3d, 0x6a, 0x20, 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0xa4, 0xd1, 0x5e, 0x58, 0xcd, 0xc9,
0xc1, 0x12, 0x2f, 0xaa, 0xc9, 0x5e, 0x1d, 0xaa, 0x66, 0x5d, 0x9d, 0x7f, 0x96, 0xa0, 0x35, 0xef,
0x25, 0xee, 0x6c, 0xac, 0xbc, 0x64, 0x67, 0x63, 0xe5, 0xa5, 0x97, 0x92, 0x62, 0xee, 0x52, 0xe2,
0x40, 0x45, 0x9e, 0x87, 0x42, 0xe5, 0x9f, 0x73, 0xf6, 0x4f, 0xe5, 0x79, 0x88, 0x8d, 0xb1, 0x11,
0xcd, 0xf5, 0x99, 0x15, 0xdb, 0x67, 0xbe, 0x0b, 0x2b, 0x63, 0x19, 0x04, 0xf2, 0x7c, 0x38, 0x9b,
0x04, 0x7e, 0x78, 0x66, 0x9b, 0xcd, 0x79, 0x26, 0xdb, 0x80, 0x6b, 0x23, 0x5f, 0xa1, 0x3b, 0xfb,
0x32, 0xd4, 0x22, 0xa4, 0xcb, 0x2a, 0xe2, 0x16, 0xd9, 0xec, 0x63, 0x58, 0x77, 0xb5, 0x16, 0x93,
0x48, 0x3f, 0x0d, 0x23, 0xd7, 0x3b, 0xeb, 0x4a, 0x8f, 0xb2, 0x70, 0x12, 0xb9, 0xda, 0x3f, 0xf6,
0x03, 0xbc, 0xc4, 0xd7, 0x68, 0xe8, 0x4b, 0x71, 0xec, 0x3d, 0x68, 0x79, 0x4a, 0xb8, 0x5a, 0x74,
0x45, 0xac, 0x8f, 0x5c, 0x7d, 0xda, 0xa9, 0xd3, 0xc8, 0x05, 0x2e, 0xce, 0xc1, 0x45, 0x6f, 0x3f,
0xf5, 0x83, 0x91, 0x87, 0xd7, 0xcb, 0x86, 0x99, 0xc3, 0x1c, 0x93, 0x6d, 0x01, 0x23, 0x46, 0x6f,
0x12, 0xe9, 0x59, 0x0a, 0x05, 0x82, 0x5e, 0x21, 0xc1, 0x03, 0x57, 0xfb, 0x13, 0x11, 0x6b, 0x77,
0x12, 0xd1, 0xfb, 0x51, 0x89, 0x67, 0x0c, 0x76, 0x1b, 0xda, 0x7e, 0xe8, 0x05, 0xd3, 0x91, 0x78,
0x1e, 0xe1, 0x44, 0x54, 0x18, 0x77, 0x96, 0xe9, 0x54, 0xb9, 0x66, 0xf9, 0x47, 0x96, 0x8d, 0x50,
0x71, 0xb1, 0x00, 0x5d, 0x31, 0x50, 0xcb, 0x4f, 0xa0, 0xce, 0xe7, 0x05, 0x68, 0x2f, 0x06, 0x1e,
0x6e, 0x5b, 0x84, 0x93, 0xb7, 0x97, 0x6b, 0xa4, 0xd3, 0xad, 0x2c, 0xe6, 0xb6, 0x32, 0xa9, 0x97,
0xa5, 0x5c, 0xbd, 0x4c, 0xc3, 0xa2, 0xfc, 0xed, 0x61, 0x31, 0x37, 0xd1, 0xca, 0xc2, 0x44, 0x9d,
0xdf, 0x15, 0xe0, 0xda, 0x42, 0x70, 0x7f, 0x67, 0x8f, 0xd6, 0xa1, 0x39, 0x71, 0xcf, 0x84, 0x79,
0x5c, 0x88, 0x6d, 0x09, 0xc9, 0xb3, 0xfe, 0x07, 0xfe, 0x85, 0xb0, 0x9c, 0xcf, 0xa8, 0x2b, 0x7d,
0x4b, 0x02, 0xe4, 0x50, 0xea, 0x87, 0x72, 0x6a, 0x6b, 0x71, 0x12, 0x20, 0x09, 0xf3, 0x72, 0x18,
0x95, 0xae, 0x08, 0x23, 0xe7, 0x10, 0xea, 0x89, 0x83, 0xec, 0x96, 0x7d, 0xfd, 0x29, 0x64, 0x8f,
0x9a, 0x4f, 0x63, 0xa1, 0xd0, 0x77, 0xf3, 0x14, 0xf4, 0x36, 0x54, 0x4c, 0x1b, 0x5a, 0xbc, 0x8c,
0x30, 0x12, 0x67, 0x08, 0x35, 0xcb, 0x61, 0x9b, 0x50, 0x3d, 0x9e, 0xa5, 0xef, 0x28, 0xf6, 0xb8,
0xc0, 0xef, 0x91, 0x45, 0xe0, 0x19, 0x64, 0x10, 0xec, 0x06, 0x94, 0x8f, 0x67, 0xfd, 0xae, 0xb9,
0x58, 0xe2, 0x49, 0x86, 0x5f, 0x7b, 0x55, 0xe3, 0x90, 0xf3, 0x09, 0x2c, 0xe7, 0xc7, 0xa5, 0x85,
0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd9, 0x0d, 0xe3, 0x03, 0x00, 0x7a, 0xab, 0x7d,
0xd5, 0x9b, 0xc9, 0x0f, 0xa1, 0x66, 0xdf, 0x78, 0xd9, 0x7b, 0x0b, 0x6f, 0xd6, 0xad, 0xf4, 0x01,
0x78, 0xee, 0xe1, 0xda, 0x79, 0x80, 0x3d, 0xea, 0xb9, 0x50, 0x5d, 0x7f, 0x3c, 0x7e, 0x55, 0x73,
0x0f, 0xa0, 0xf5, 0x34, 0x8a, 0xfe, 0xb3, 0xb1, 0x3f, 0x83, 0xaa, 0x79, 0x6a, 0xc6, 0x31, 0x01,
0x7a, 0x60, 0xf7, 0x80, 0x99, 0x3e, 0x36, 0xef, 0x12, 0x37, 0x00, 0x44, 0x4e, 0xd1, 0x9e, 0xdd,
0x5c, 0x42, 0xce, 0x3b, 0xc0, 0x0d, 0x60, 0x73, 0x03, 0x6a, 0xf6, 0x55, 0x93, 0x35, 0xa0, 0xf2,
0xf4, 0x70, 0xd8, 0x7b, 0xd2, 0x5e, 0x62, 0x75, 0x28, 0x1f, 0x0c, 0x86, 0x4f, 0xda, 0x05, 0xa4,
0x0e, 0x07, 0x87, 0xbd, 0x76, 0x71, 0xf3, 0x36, 0x2c, 0xe7, 0xdf, 0x35, 0x59, 0x13, 0x6a, 0xc3,
0xdd, 0xc3, 0xee, 0xde, 0xe0, 0xa7, 0xed, 0x25, 0xb6, 0x0c, 0xf5, 0xfe, 0xe1, 0xb0, 0xb7, 0xff,
0x94, 0xf7, 0xda, 0x85, 0xcd, 0x9f, 0x40, 0x23, 0x7d, 0x28, 0x42, 0x0d, 0x7b, 0xfd, 0xc3, 0x6e,
0x7b, 0x89, 0x01, 0x54, 0x87, 0xbd, 0x7d, 0xde, 0x43, 0xbd, 0x35, 0x28, 0x0d, 0x87, 0x07, 0xed,
0x22, 0x5a, 0xdd, 0xdf, 0xdd, 0x3f, 0xe8, 0xb5, 0x4b, 0x48, 0x3e, 0x79, 0x7c, 0xf4, 0x70, 0xd8,
0x2e, 0x6f, 0x7e, 0x00, 0xd7, 0x16, 0x9e, 0x50, 0x68, 0xf4, 0xc1, 0x2e, 0xef, 0xa1, 0xa6, 0x26,
0xd4, 0x8e, 0x78, 0xff, 0xd9, 0xee, 0x93, 0x5e, 0xbb, 0x80, 0x82, 0x4f, 0x06, 0xfb, 0x8f, 0x7a,
0xdd, 0x76, 0x71, 0xef, 0xe6, 0x17, 0x2f, 0xd6, 0x0a, 0x5f, 0xbe, 0x58, 0x2b, 0x7c, 0xf5, 0x62,
0xad, 0xf0, 0xf7, 0x17, 0x6b, 0x85, 0xcf, 0xbf, 0x59, 0x5b, 0xfa, 0xf2, 0x9b, 0xb5, 0xa5, 0xaf,
0xbe, 0x59, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0xef, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1,
0x92, 0xe3, 0x09, 0xec, 0x18, 0x00, 0x00,
}
func (m *Op) Marshal() (dAtA []byte, err error) {

View File

@ -243,8 +243,8 @@ message Range {
// Position is single location in a source file
message Position {
int32 Line = 1;
int32 Character = 2;
int32 line = 1;
int32 character = 2;
}
message ExportCache {

View File

@ -0,0 +1,95 @@
package gitutil
import (
"regexp"
"strings"
"github.com/containerd/containerd/errdefs"
)
// GitRef represents a git ref.
//
// Examples:
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
type GitRef struct {
// Remote is the remote repository path.
Remote string
// ShortName is the directory name of the repo.
// e.g., "bar" for "https://github.com/foo/bar.git"
ShortName string
// Commit is a commit hash, a tag, or branch name.
// Commit is optional.
Commit string
// SubDir is a directory path inside the repo.
// SubDir is optional.
SubDir string
// IndistinguishableFromLocal is true for a ref that is indistinguishable from a local file path,
// e.g., "github.com/foo/bar".
//
// Deprecated.
// Instead, use a distinguishable form such as "https://github.com/foo/bar.git".
//
// The dockerfile frontend still accepts this form only for build contexts.
IndistinguishableFromLocal bool
// UnencryptedTCP is true for a ref that needs an unencrypted TCP connection,
// e.g., "git://..." and "http://..." .
//
// Discouraged, although not deprecated.
// Instead, consider using an encrypted TCP connection such as "git@github.com/foo/bar.git" or "https://github.com/foo/bar.git".
UnencryptedTCP bool
}
// var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
// ParseGitRef parses a git ref.
func ParseGitRef(ref string) (*GitRef, error) {
res := &GitRef{}
if strings.HasPrefix(ref, "github.com/") {
res.IndistinguishableFromLocal = true // Deprecated
} else {
_, proto := ParseProtocol(ref)
switch proto {
case UnknownProtocol:
return nil, errdefs.ErrInvalidArgument
}
switch proto {
case HTTPProtocol, GitProtocol:
res.UnencryptedTCP = true // Discouraged, but not deprecated
}
switch proto {
// An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix.
case HTTPProtocol, HTTPSProtocol:
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
if !gitURLPathWithFragmentSuffix.MatchString(ref) {
return nil, errdefs.ErrInvalidArgument
}
}
}
refSplitBySharp := strings.SplitN(ref, "#", 2)
res.Remote = refSplitBySharp[0]
if len(res.Remote) == 0 {
return res, errdefs.ErrInvalidArgument
}
if len(refSplitBySharp) > 1 {
refSplitBySharpSplitByColon := strings.SplitN(refSplitBySharp[1], ":", 2)
res.Commit = refSplitBySharpSplitByColon[0]
if len(res.Commit) == 0 {
return res, errdefs.ErrInvalidArgument
}
if len(refSplitBySharpSplitByColon) > 1 {
res.SubDir = refSplitBySharpSplitByColon[1]
}
}
repoSplitBySlash := strings.Split(res.Remote, "/")
res.ShortName = strings.TrimSuffix(repoSplitBySlash[len(repoSplitBySlash)-1], ".git")
return res, nil
}

View File

@ -42,6 +42,14 @@ func ToGRPC(err error) error {
st = status.FromProto(pb)
}
// If the original error was wrapped with more context than the GRPCStatus error,
// copy the original message to the GRPCStatus error
if err.Error() != st.Message() {
pb := st.Proto()
pb.Message = err.Error()
st = status.FromProto(pb)
}
var details []proto.Message
for _, st := range stack.Traces(err) {

View File

@ -14,6 +14,10 @@ import (
"github.com/pkg/errors"
)
// MaxRetryBackoff is the maximum backoff time before giving up. This is a
// variable so that code which embeds BuildKit can override the default value.
var MaxRetryBackoff = 8 * time.Second
func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) {
backoff := time.Second
@ -35,7 +39,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
return descs, nil
}
// backoff logic
if backoff >= 8*time.Second {
if backoff >= MaxRetryBackoff {
return nil, err
}
if logger != nil {

View File

@ -197,14 +197,6 @@ func goRuntimeMemStats() memStatsMetrics {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
}
}
@ -268,7 +260,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
@ -278,6 +269,7 @@ func memstatNamespace(s string) string {
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
// TODO(bwplotka): Remove with end Go 1.16 EOL and replace with runtime/metrics.Description
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64

View File

@ -40,13 +40,28 @@ type goCollector struct {
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
msMetrics := goRuntimeMemStats()
msMetrics = append(msMetrics, struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}{
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
})
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}

View File

@ -25,11 +25,71 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
)
const (
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
goGCHeapFreesObjects = "/gc/heap/frees:objects"
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
goGCHeapObjects = "/gc/heap/objects:objects"
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
)
// runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects,
goGCHeapFreesObjects,
goGCHeapAllocsBytes,
goGCHeapObjects,
goGCHeapGoalBytes,
goMemoryClassesTotalBytes,
goMemoryClassesHeapObjectsBytes,
goMemoryClassesHeapUnusedBytes,
goMemoryClassesHeapReleasedBytes,
goMemoryClassesHeapFreeBytes,
goMemoryClassesHeapStacksBytes,
goMemoryClassesOSStacksBytes,
goMemoryClassesMetadataMSpanInuseBytes,
goMemoryClassesMetadataMSPanFreeBytes,
goMemoryClassesMetadataMCacheInuseBytes,
goMemoryClassesMetadataMCacheFreeBytes,
goMemoryClassesProfilingBucketsBytes,
goMemoryClassesMetadataOtherBytes,
goMemoryClassesOtherBytes,
}
func bestEffortLookupRM(lookup []string) []metrics.Description {
ret := make([]metrics.Description, 0, len(lookup))
for _, rm := range metrics.All() {
for _, m := range lookup {
if m == rm.Name {
ret = append(ret, rm)
}
}
}
return ret
}
type goCollector struct {
opt GoCollectorOptions
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
@ -51,12 +111,46 @@ type goCollector struct {
msMetrics memStatsMetrics
}
const (
// Those are not exposed due to need to move Go collector to another package in v2.
// See issue https://github.com/prometheus/client_golang/issues/1030.
goRuntimeMemStatsCollection uint32 = 1 << iota
goRuntimeMetricsCollection
)
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
// Use it via collectors package instead. See issue
// https://github.com/prometheus/client_golang/issues/1030.
//
// Deprecated: Use collectors.WithGoCollections
type GoCollectorOptions struct {
// EnabledCollection sets what type of collections collector should expose on top of base collection.
// By default it's goMemStatsCollection | goRuntimeMetricsCollection.
EnabledCollections uint32
}
func (c GoCollectorOptions) isEnabled(flag uint32) bool {
return c.EnabledCollections&flag != 0
}
const defaultGoCollections = goRuntimeMemStatsCollection
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
func NewGoCollector(opts ...func(o *GoCollectorOptions)) Collector {
opt := GoCollectorOptions{EnabledCollections: defaultGoCollections}
for _, o := range opts {
o(&opt)
}
var descriptions []metrics.Description
if opt.isEnabled(goRuntimeMetricsCollection) {
descriptions = metrics.All()
} else if opt.isEnabled(goRuntimeMemStatsCollection) {
descriptions = bestEffortLookupRM(rmForMemStats)
}
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
@ -67,7 +161,11 @@ func NewGoCollector() Collector {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
if len(histograms) > 0 {
metrics.Read(histograms)
}
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
@ -83,7 +181,7 @@ func NewGoCollector() Collector {
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested elsewhere.
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
@ -123,12 +221,18 @@ func NewGoCollector() Collector {
}
metricSet = append(metricSet, m)
}
var msMetrics memStatsMetrics
if opt.isEnabled(goRuntimeMemStatsCollection) {
msMetrics = goRuntimeMemStats()
}
return &goCollector{
opt: opt,
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}
@ -163,40 +267,47 @@ func (c *goCollector) Collect(ch chan<- Metric) {
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
if len(c.rmSampleBuf) > 0 {
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
}
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
if c.opt.isEnabled(goRuntimeMetricsCollection) {
// Collect all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
// populate the old metrics from it if goMemStatsCollection is enabled.
if c.opt.isEnabled(goRuntimeMemStatsCollection) {
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
}
@ -261,35 +372,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
// N.B. LastGC is omitted because runtime.GCStats already has this.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.LastGC = 0
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
@ -324,6 +430,11 @@ type batchHistogram struct {
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
// We need to remove -Inf values. runtime/metrics keeps them around.
// But -Inf bucket should not be allowed for prometheus histograms.
if buckets[0] == math.Inf(-1) {
buckets = buckets[1:]
}
h := &batchHistogram{
desc: desc,
buckets: buckets,
@ -382,8 +493,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error {
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
if count != 0 {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
}
}
// Skip the +Inf bucket, but only for the bucket list.

View File

@ -62,7 +62,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
// other data.
name = strings.ReplaceAll(name, "-", "_")
name = name + "_" + unit
if d.Cumulative {
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
name = name + "_total"
}
@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
// Re-bucket as powers of 2.
return reBucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// Re-bucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
b := reBucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
func reBucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it

View File

@ -306,15 +306,27 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) {
// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
lim.mu.Lock()
defer lim.mu.Unlock()
if lim.limit == Inf {
lim.mu.Unlock()
return Reservation{
ok: true,
lim: lim,
tokens: n,
timeToAct: now,
}
} else if lim.limit == 0 {
var ok bool
if lim.burst >= n {
ok = true
lim.burst -= n
}
return Reservation{
ok: ok,
lim: lim,
tokens: lim.burst,
timeToAct: now,
}
}
now, last, tokens := lim.advance(now)
@ -351,7 +363,6 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio
lim.last = last
}
lim.mu.Unlock()
return r
}
@ -377,6 +388,9 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time,
// durationFromTokens is a unit conversion function from the number of tokens to the duration
// of time it takes to accumulate them at a rate of limit tokens per second.
func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
seconds := tokens / float64(limit)
return time.Duration(float64(time.Second) * seconds)
}
@ -384,5 +398,8 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
// which could be accumulated during that duration at a rate of limit tokens per second.
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
if limit <= 0 {
return 0
}
return d.Seconds() * float64(limit)
}

View File

@ -27,6 +27,7 @@ import (
"net"
"strings"
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
@ -192,7 +193,7 @@ type BuildOptions struct {
// server can ignore this field.
Authority string
// ChannelzParentID is the parent ClientConn's channelz ID.
ChannelzParentID int64
ChannelzParentID *channelz.Identifier
// CustomUserAgent is the custom user agent set on the parent ClientConn.
// The balancer should set the same custom user agent if it creates a
// ClientConn.

View File

@ -20,130 +20,178 @@ package grpc
import (
"fmt"
"strings"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
)
// scStateUpdate contains the subConn and the new state it changed to.
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
// balancer.Balancer interface. The ClientConn is free to call these methods
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
// to the Balancer happen synchronously and in order.
//
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
// passed to the Balancer implementations. It invokes unexported methods on the
// ClientConn to handle these calls from the Balancer.
//
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
cc *ClientConn
// Since these fields are accessed only from handleXxx() methods which are
// synchronized by the watcher goroutine, we do not need a mutex to protect
// these fields.
balancer *gracefulswitch.Balancer
curBalancerName string
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
closed *grpcsync.Event // Indicates if close has been called.
done *grpcsync.Event // Indicates if close has completed its work.
}
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
// is not created until the switchTo() method is invoked.
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
cc: cc,
updateCh: buffer.NewUnbounded(),
resultCh: buffer.NewUnbounded(),
closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
}
go ccb.watcher()
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
return ccb
}
// The following xxxUpdate structs wrap the arguments received as part of the
// corresponding update. The watcher goroutine uses the 'type' of the update to
// invoke the appropriate handler routine to handle the update.
type ccStateUpdate struct {
ccs *balancer.ClientConnState
}
type scStateUpdate struct {
sc balancer.SubConn
state connectivity.State
err error
}
// exitIdle contains no data and is just a signal sent on the updateCh in
// ccBalancerWrapper to instruct the balancer to exit idle.
type exitIdle struct{}
type exitIdleUpdate struct{}
// ccBalancerWrapper is a wrapper on top of cc for balancers.
// It implements balancer.ClientConn interface.
type ccBalancerWrapper struct {
cc *ClientConn
balancerMu sync.Mutex // synchronizes calls to the balancer
balancer balancer.Balancer
hasExitIdle bool
updateCh *buffer.Unbounded
closed *grpcsync.Event
done *grpcsync.Event
mu sync.Mutex
subConns map[*acBalancerWrapper]struct{}
type resolverErrorUpdate struct {
err error
}
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
cc: cc,
updateCh: buffer.NewUnbounded(),
closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
subConns: make(map[*acBalancerWrapper]struct{}),
}
go ccb.watcher()
ccb.balancer = b.Build(ccb, bopts)
_, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler)
return ccb
type switchToUpdate struct {
name string
}
// watcher balancer functions sequentially, so the balancer can be implemented
// lock-free.
type subConnUpdate struct {
acbw *acBalancerWrapper
}
// watcher is a long-running goroutine which reads updates from a channel and
// invokes corresponding methods on the underlying balancer. It ensures that
// these methods are invoked in a synchronous fashion. It also ensures that
// these methods are invoked in the order in which the updates were received.
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
case t := <-ccb.updateCh.Get():
case u := <-ccb.updateCh.Get():
ccb.updateCh.Load()
if ccb.closed.HasFired() {
break
}
switch u := t.(type) {
switch update := u.(type) {
case *ccStateUpdate:
ccb.handleClientConnStateChange(update.ccs)
case *scStateUpdate:
ccb.balancerMu.Lock()
ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
ccb.balancerMu.Unlock()
case *acBalancerWrapper:
ccb.mu.Lock()
if ccb.subConns != nil {
delete(ccb.subConns, u)
ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
}
ccb.mu.Unlock()
case exitIdle:
if ccb.cc.GetState() == connectivity.Idle {
if ei, ok := ccb.balancer.(balancer.ExitIdler); ok {
// We already checked that the balancer implements
// ExitIdle before pushing the event to updateCh, but
// check conditionally again as defensive programming.
ccb.balancerMu.Lock()
ei.ExitIdle()
ccb.balancerMu.Unlock()
}
}
ccb.handleSubConnStateChange(update)
case *exitIdleUpdate:
ccb.handleExitIdle()
case *resolverErrorUpdate:
ccb.handleResolverError(update.err)
case *switchToUpdate:
ccb.handleSwitchTo(update.name)
case *subConnUpdate:
ccb.handleRemoveSubConn(update.acbw)
default:
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
}
case <-ccb.closed.Done():
}
if ccb.closed.HasFired() {
ccb.balancerMu.Lock()
ccb.balancer.Close()
ccb.balancerMu.Unlock()
ccb.mu.Lock()
scs := ccb.subConns
ccb.subConns = nil
ccb.mu.Unlock()
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
ccb.done.Fire()
// Fire done before removing the addr conns. We can safely unblock
// ccb.close and allow the removeAddrConns to happen
// asynchronously.
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
ccb.handleClose()
return
}
}
}
func (ccb *ccBalancerWrapper) close() {
ccb.closed.Fire()
<-ccb.done.Done()
}
// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer.
//
// Unlike other methods invoked by grpc to push updates to the underlying
// balancer, this method cannot simply push the update onto the update channel
// and return. It needs to return the error returned by the underlying balancer
// back to grpc which propagates that to the resolver.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
func (ccb *ccBalancerWrapper) exitIdle() bool {
if !ccb.hasExitIdle {
return false
var res interface{}
select {
case res = <-ccb.resultCh.Get():
ccb.resultCh.Load()
case <-ccb.closed.Done():
// Return early if the balancer wrapper is closed while we are waiting for
// the underlying balancer to process a ClientConnState update.
return nil
}
ccb.updateCh.Put(exitIdle{})
return true
// If the returned error is nil, attempting to type assert to error leads to
// panic. So, this needs to handled separately.
if res == nil {
return nil
}
return res.(error)
}
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
// handleClientConnStateChange handles a ClientConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
//
// If the addresses specified in the update contain addresses of type "grpclb"
// and the selected LB policy is not "grpclb", these addresses will be filtered
// out and ccs will be modified with the updated address list.
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
if ccb.curBalancerName != grpclbName {
// Filter any grpclb addresses since we don't have the grpclb balancer.
var addrs []resolver.Address
for _, addr := range ccs.ResolverState.Addresses {
if addr.Type == resolver.GRPCLB {
continue
}
addrs = append(addrs, addr)
}
ccs.ResolverState.Addresses = addrs
}
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
}
// updateSubConnState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
// When updating addresses for a SubConn, if the address in use is not in
// the new addresses, the old ac will be tearDown() and a new ac will be
// created. tearDown() generates a state change with Shutdown state, we
@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
})
}
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock()
return ccb.balancer.UpdateClientConnState(*ccs)
// handleSubConnStateChange handles a SubConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
}
func (ccb *ccBalancerWrapper) exitIdle() {
ccb.updateCh.Put(&exitIdleUpdate{})
}
func (ccb *ccBalancerWrapper) handleExitIdle() {
if ccb.cc.GetState() != connectivity.Idle {
return
}
ccb.balancer.ExitIdle()
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock()
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
}
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
ccb.balancer.ResolverError(err)
}
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
// LB policy identified by name.
//
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
// first good update from the name resolver, it determines the LB policy to use
// and invokes the switchTo() method. Upon receipt of every subsequent update
// from the name resolver, it invokes this method.
//
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
// the graceful balancer switching process if the name does not change.
func (ccb *ccBalancerWrapper) switchTo(name string) {
ccb.updateCh.Put(&switchToUpdate{name: name})
}
// handleSwitchTo handles a balancer switch update from the update channel. It
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
// balancer.Builder corresponding to name. If no balancer.Builder is registered
// for the given name, it uses the default LB policy which is "pick_first".
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
// TODO: Other languages use case-insensitive balancer registries. We should
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
if strings.EqualFold(ccb.curBalancerName, name) {
return
}
// TODO: Ensure that name is a registered LB policy when we get here.
// We currently only validate the `loadBalancingConfig` field. We need to do
// the same for the `loadBalancingPolicy` field and reject the service config
// if the specified policy is not registered.
builder := balancer.Get(name)
if builder == nil {
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
builder = newPickfirstBuilder()
} else {
channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
}
if err := ccb.balancer.SwitchTo(builder); err != nil {
channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
return
}
ccb.curBalancerName = builder.Name()
}
// handleRemoveSucConn handles a request from the underlying balancer to remove
// a subConn.
//
// See comments in RemoveSubConn() for more details.
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
func (ccb *ccBalancerWrapper) close() {
ccb.closed.Fire()
<-ccb.done.Done()
}
func (ccb *ccBalancerWrapper) handleClose() {
ccb.balancer.Close()
ccb.done.Fire()
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
if len(addrs) <= 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
}
ac, err := ccb.cc.newAddrConn(addrs, opts)
if err != nil {
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err
}
acbw := &acBalancerWrapper{ac: ac}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
ccb.subConns[acbw] = struct{}{}
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
// during switchBalancer() if the old balancer calls RemoveSubConn() in its
// Close().
ccb.updateCh.Put(sc)
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
// was required to handle the RemoveSubConn() method asynchronously by pushing
// the update onto the update channel. This was done to avoid a deadlock as
// switchBalancer() was holding cc.mu when calling Close() on the old
// balancer, which would in turn call RemoveSubConn().
//
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
// asynchronously is probably not required anymore since the switchTo() method
// handles the balancer switch by pushing the update onto the channel.
// TODO(easwars): Handle this inline.
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is

36
vendor/google.golang.org/grpc/channelz/channelz.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package channelz exports internals of the channelz implementation as required
// by other gRPC packages.
//
// The implementation of the channelz spec as defined in
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
// the `internal/channelz` package.
//
// Experimental
//
// Notice: All APIs in this package are experimental and may be removed in a
// later release.
package channelz
import "google.golang.org/grpc/internal/channelz"
// Identifier is an opaque identifier which uniquely identifies an entity in the
// channelz database.
type Identifier = channelz.Identifier

View File

@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
if channelz.IsOn() {
if cc.dopts.channelzParentID != 0 {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Channel Created",
Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
Severity: channelz.CtInfo,
},
})
} else {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
channelz.Info(logger, cc.channelzID, "Channel Created")
}
cc.csMgr.channelzID = cc.channelzID
pid := cc.dopts.channelzParentID
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
ted := &channelz.TraceEventDesc{
Desc: "Channel created",
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != nil {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
cc.csMgr.channelzID = cc.channelzID
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
return nil, errNoTransportSecurity
@ -281,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
credsClone = creds.Clone()
}
cc.balancerBuildOpts = balancer.BuildOptions{
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
DialCreds: credsClone,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
@ -289,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
}
})
// Build the resolver.
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
@ -398,7 +395,7 @@ type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
channelzID int64
channelzID *channelz.Identifier
}
// updateState updates the connectivity.State of ClientConn.
@ -464,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil)
// handshakes. It also handles errors on established connections by
// re-resolving the name and reconnecting.
type ClientConn struct {
ctx context.Context
cancel context.CancelFunc
ctx context.Context // Initialized using the background context at dial time.
cancel context.CancelFunc // Cancelled on close.
target string
parsedTarget resolver.Target
authority string
dopts dialOptions
csMgr *connectivityStateManager
balancerBuildOpts balancer.BuildOptions
blockingpicker *pickerWrapper
// The following are initialized at dial time, and are read-only after that.
target string // User's dial target.
parsedTarget resolver.Target // See parseTargetAndFindResolver().
authority string // See determineAuthority().
dopts dialOptions // Default and user specified dial options.
channelzID *channelz.Identifier // Channelz identifier for the channel.
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
csMgr *connectivityStateManager
blockingpicker *pickerWrapper
safeConfigSelector iresolver.SafeConfigSelector
czData *channelzData
retryThrottler atomic.Value // Updated from service config.
mu sync.RWMutex
resolverWrapper *ccResolverWrapper
sc *ServiceConfig
conns map[*addrConn]struct{}
// Keepalive parameter can be updated if a GoAway is received.
mkp keepalive.ClientParameters
curBalancerName string
balancerWrapper *ccBalancerWrapper
retryThrottler atomic.Value
// firstResolveEvent is used to track whether the name resolver sent us at
// least one update. RPCs block on this event.
firstResolveEvent *grpcsync.Event
channelzID int64 // channelz unique identification number
czData *channelzData
// mu protects the following fields.
// TODO: split mu so the same mutex isn't used for everything.
mu sync.RWMutex
resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
@ -536,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
cc.mu.Lock()
defer cc.mu.Unlock()
if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() {
return
}
for ac := range cc.conns {
go ac.connect()
}
cc.balancerWrapper.exitIdle()
}
func (cc *ClientConn) scWatcher() {
@ -623,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
// with the new addresses.
cc.maybeApplyDefaultServiceConfig(nil)
if cc.balancerWrapper != nil {
cc.balancerWrapper.resolverError(err)
}
cc.balancerWrapper.resolverError(err)
// No addresses are valid with err set; return early.
cc.mu.Unlock()
@ -653,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
} else {
ret = balancer.ErrBadResolverState
if cc.balancerWrapper == nil {
var err error
if s.ServiceConfig.Err != nil {
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
} else {
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
}
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
if cc.sc == nil {
// Apply the failing LB only if we haven't received valid service config
// from the name resolver in the past.
cc.applyFailingLB(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
@ -670,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
}
var balCfg serviceconfig.LoadBalancingConfig
if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
if cc.sc != nil && cc.sc.lbConfig != nil {
balCfg = cc.sc.lbConfig.cfg
}
cbn := cc.curBalancerName
bw := cc.balancerWrapper
cc.mu.Unlock()
if cbn != grpclbName {
// Filter any grpclb addresses since we don't have the grpclb balancer.
for i := 0; i < len(s.Addresses); {
if s.Addresses[i].Type == resolver.GRPCLB {
copy(s.Addresses[i:], s.Addresses[i+1:])
s.Addresses = s.Addresses[:len(s.Addresses)-1]
continue
}
i++
}
}
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
if ret == nil {
ret = uccsErr // prefer ErrBadResolver state since any other error is
@ -696,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
return ret
}
// switchBalancer starts the switching from current balancer to the balancer
// with the given name.
//
// It will NOT send the current address list to the new balancer. If needed,
// caller of this function should send address list to the new balancer after
// this function returns.
// applyFailingLB is akin to configuring an LB policy on the channel which
// always fails RPCs. Here, an actual LB policy is not configured, but an always
// erroring picker is configured, which returns errors with information about
// what was invalid in the received service config. A config selector with no
// service config is configured, and the connectivity state of the channel is
// set to TransientFailure.
//
// Caller must hold cc.mu.
func (cc *ClientConn) switchBalancer(name string) {
if strings.EqualFold(cc.curBalancerName, name) {
return
}
channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
if cc.dopts.balancerBuilder != nil {
channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
return
}
if cc.balancerWrapper != nil {
// Don't hold cc.mu while closing the balancers. The balancers may call
// methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex
// would cause a deadlock in that case.
cc.mu.Unlock()
cc.balancerWrapper.close()
cc.mu.Lock()
}
builder := balancer.Get(name)
if builder == nil {
channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder()
func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
var err error
if sc.Err != nil {
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
} else {
channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
cc.curBalancerName = builder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
}
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return
}
// TODO(bar switching) send updates to all balancer wrappers when balancer
// gracefully switching is supported.
cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
cc.mu.Unlock()
cc.balancerWrapper.updateSubConnState(sc, s, err)
}
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
@ -768,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
cc.mu.Unlock()
return nil, ErrClientConnClosing
}
if channelz.IsOn() {
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Created",
Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
Severity: channelz.CtInfo,
},
})
var err error
ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
if err != nil {
return nil, err
}
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel created",
Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
Severity: channelz.CtInfo,
},
})
cc.conns[ac] = struct{}{}
cc.mu.Unlock()
return ac, nil
@ -853,16 +801,31 @@ func (ac *addrConn) connect() error {
return nil
}
func equalAddresses(a, b []resolver.Address) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if !v.Equal(b[i]) {
return false
}
}
return true
}
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
//
// If ac is Connecting, it returns false. The caller should tear down the ac and
// create a new one. Note that the backoff will be reset when this happens.
//
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
// addresses will be picked up by retry in the next iteration after backoff.
//
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
//
// If the addresses is the same as the old list, it does nothing and returns
// true.
//
// If ac is Connecting, it returns false. The caller should tear down the ac and
// create a new one. Note that the backoff will be reset when this happens.
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
@ -879,6 +842,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
return true
}
if equalAddresses(ac.addrs, addrs) {
return true
}
if ac.state == connectivity.Connecting {
return false
}
@ -959,14 +926,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
})
if err != nil {
return nil, nil, toRPCErr(err)
}
return t, done, nil
}
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
@ -991,35 +954,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
cc.retryThrottler.Store((*retryThrottler)(nil))
}
if cc.dopts.balancerBuilder == nil {
// Only look at balancer types and switch balancer if balancer dial
// option is not set.
var newBalancerName string
if cc.sc != nil && cc.sc.lbConfig != nil {
newBalancerName = cc.sc.lbConfig.name
} else {
var isGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
newBalancerName = PickFirstBalancerName
var newBalancerName string
if cc.sc != nil && cc.sc.lbConfig != nil {
newBalancerName = cc.sc.lbConfig.name
} else {
var isGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
cc.switchBalancer(newBalancerName)
} else if cc.balancerWrapper == nil {
// Balancer dial option was set, and this is the first time handling
// resolved addresses. Build a balancer with dopts.balancerBuilder.
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
newBalancerName = PickFirstBalancerName
}
}
cc.balancerWrapper.switchTo(newBalancerName)
}
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
@ -1070,11 +1024,11 @@ func (cc *ClientConn) Close() error {
rWrapper := cc.resolverWrapper
cc.resolverWrapper = nil
bWrapper := cc.balancerWrapper
cc.balancerWrapper = nil
cc.mu.Unlock()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
cc.blockingpicker.close()
if bWrapper != nil {
bWrapper.close()
}
@ -1085,22 +1039,22 @@ func (cc *ClientConn) Close() error {
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
if channelz.IsOn() {
ted := &channelz.TraceEventDesc{
Desc: "Channel Deleted",
ted := &channelz.TraceEventDesc{
Desc: "Channel deleted",
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != nil {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != 0 {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(cc.channelzID)
}
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
// deleted right away.
channelz.RemoveEntry(cc.channelzID)
return nil
}
@ -1130,7 +1084,7 @@ type addrConn struct {
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
channelzID int64 // channelz unique identification number.
channelzID *channelz.Identifier
czData *channelzData
}
@ -1284,6 +1238,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.mu.Lock()
defer ac.mu.Unlock()
defer connClosed.Fire()
defer hcancel()
if !hcStarted || hctx.Err() != nil {
// We didn't start the health check or set the state to READY, so
// no need to do anything else here.
@ -1294,7 +1249,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// state, since there may be a new transport in this addrConn.
return
}
hcancel()
ac.transport = nil
// Refresh the name resolver
ac.cc.resolveNow(resolver.ResolveNowOptions{})
@ -1312,14 +1266,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
defer cancel()
if channelz.IsOn() {
copts.ChannelzParentID = ac.channelzID
}
copts.ChannelzParentID = ac.channelzID
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
if err != nil {
// newTr is either nil, or closed.
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err)
hcancel()
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
return err
}
@ -1332,7 +1285,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
newTr.Close(transport.ErrConnClosing)
if connectCtx.Err() == context.DeadlineExceeded {
err := errors.New("failed to receive server preface within timeout")
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err)
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
return err
}
return nil
@ -1497,19 +1450,18 @@ func (ac *addrConn) tearDown(err error) {
curTr.GracefulClose()
ac.mu.Lock()
}
if channelz.IsOn() {
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel Deleted",
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel deleted",
Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
Severity: channelz.CtInfo,
Parent: &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
Severity: channelz.CtInfo,
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
// the entity being deleted, and thus prevent it from being deleted right away.
channelz.RemoveEntry(ac.channelzID)
}
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from
// being deleted right away.
channelz.RemoveEntry(ac.channelzID)
ac.mu.Unlock()
}

View File

@ -20,12 +20,11 @@ package grpc
import (
"context"
"fmt"
"net"
"time"
"google.golang.org/grpc/backoff"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal"
@ -45,19 +44,17 @@ type dialOptions struct {
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
cp Compressor
dc Decompressor
bs internalbackoff.Strategy
block bool
returnLastError bool
timeout time.Duration
scChan <-chan ServiceConfig
authority string
copts transport.ConnectOptions
callOptions []CallOption
// This is used by WithBalancerName dial option.
balancerBuilder balancer.Builder
channelzParentID int64
cp Compressor
dc Decompressor
bs internalbackoff.Strategy
block bool
returnLastError bool
timeout time.Duration
scChan <-chan ServiceConfig
authority string
copts transport.ConnectOptions
callOptions []CallOption
channelzParentID *channelz.Identifier
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
@ -195,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption {
})
}
// WithBalancerName sets the balancer that the ClientConn will be initialized
// with. Balancer registered with balancerName will be used. This function
// panics if no balancer was registered by balancerName.
//
// The balancer cannot be overridden by balancer option specified by service
// config.
//
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
// instead. Will be removed in a future 1.x release.
func WithBalancerName(balancerName string) DialOption {
builder := balancer.Get(balancerName)
if builder == nil {
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
}
return newFuncDialOption(func(o *dialOptions) {
o.balancerBuilder = builder
})
}
// WithServiceConfig returns a DialOption which has a channel to read the
// service configuration.
//
@ -304,8 +282,8 @@ func WithReturnConnectionError() DialOption {
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
// security is incompatible and will cause grpc.Dial() to fail.
//
// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
// Will be supported throughout 1.x.
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
// instead. Will be supported throughout 1.x.
func WithInsecure() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.TransportCredentials = insecure.NewCredentials()
@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption {
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithChannelzParentID(id int64) DialOption {
func WithChannelzParentID(id *channelz.Identifier) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.channelzParentID = id
})

View File

@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec)
// more details.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Compressors are
// an init() function), and is not thread-safe. If multiple Codecs are
// registered with the same name, the one registered last will take effect.
func RegisterCodec(codec Codec) {
if codec == nil {

View File

@ -0,0 +1,382 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package gracefulswitch implements a graceful switch load balancer.
package gracefulswitch
import (
"errors"
"fmt"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/resolver"
)
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
var _ balancer.Balancer = (*Balancer)(nil)
// NewBalancer returns a graceful switch Balancer.
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
return &Balancer{
cc: cc,
bOpts: opts,
}
}
// Balancer is a utility to gracefully switch from one balancer to
// a new balancer. It implements the balancer.Balancer interface.
type Balancer struct {
bOpts balancer.BuildOptions
cc balancer.ClientConn
// mu protects the following fields and all fields within balancerCurrent
// and balancerPending. mu does not need to be held when calling into the
// child balancers, as all calls into these children happen only as a direct
// result of a call into the gracefulSwitchBalancer, which are also
// guaranteed to be synchronous. There is one exception: an UpdateState call
// from a child balancer when current and pending are populated can lead to
// calling Close() on the current. To prevent that racing with an
// UpdateSubConnState from the channel, we hold currentMu during Close and
// UpdateSubConnState calls.
mu sync.Mutex
balancerCurrent *balancerWrapper
balancerPending *balancerWrapper
closed bool // set to true when this balancer is closed
// currentMu must be locked before mu. This mutex guards against this
// sequence of events: UpdateSubConnState() called, finds the
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
// balancerCurrent before the UpdateSubConnState is called on the
// balancerCurrent.
currentMu sync.Mutex
}
// swap swaps out the current lb with the pending lb and updates the ClientConn.
// The caller must hold gsb.mu.
func (gsb *Balancer) swap() {
gsb.cc.UpdateState(gsb.balancerPending.lastState)
cur := gsb.balancerCurrent
gsb.balancerCurrent = gsb.balancerPending
gsb.balancerPending = nil
go func() {
gsb.currentMu.Lock()
defer gsb.currentMu.Unlock()
cur.Close()
}()
}
// Helper function that checks if the balancer passed in is current or pending.
// The caller must hold gsb.mu.
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
}
// SwitchTo initializes the graceful switch process, which completes based on
// connectivity state changes on the current/pending balancer. Thus, the switch
// process is not complete when this method returns. This method must be called
// synchronously alongside the rest of the balancer.Balancer methods this
// Graceful Switch Balancer implements.
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
gsb.mu.Lock()
if gsb.closed {
gsb.mu.Unlock()
return errBalancerClosed
}
bw := &balancerWrapper{
gsb: gsb,
lastState: balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
},
subconns: make(map[balancer.SubConn]bool),
}
balToClose := gsb.balancerPending // nil if there is no pending balancer
if gsb.balancerCurrent == nil {
gsb.balancerCurrent = bw
} else {
gsb.balancerPending = bw
}
gsb.mu.Unlock()
balToClose.Close()
// This function takes a builder instead of a balancer because builder.Build
// can call back inline, and this utility needs to handle the callbacks.
newBalancer := builder.Build(bw, gsb.bOpts)
if newBalancer == nil {
// This is illegal and should never happen; we clear the balancerWrapper
// we were constructing if it happens to avoid a potential panic.
gsb.mu.Lock()
if gsb.balancerPending != nil {
gsb.balancerPending = nil
} else {
gsb.balancerCurrent = nil
}
gsb.mu.Unlock()
return balancer.ErrBadResolverState
}
// This write doesn't need to take gsb.mu because this field never gets read
// or written to on any calls from the current or pending. Calls from grpc
// to this balancer are guaranteed to be called synchronously, so this
// bw.Balancer field will never be forwarded to until this SwitchTo()
// function returns.
bw.Balancer = newBalancer
return nil
}
// Returns nil if the graceful switch balancer is closed.
func (gsb *Balancer) latestBalancer() *balancerWrapper {
gsb.mu.Lock()
defer gsb.mu.Unlock()
if gsb.balancerPending != nil {
return gsb.balancerPending
}
return gsb.balancerCurrent
}
// UpdateClientConnState forwards the update to the latest balancer created.
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
// The resolver data is only relevant to the most recent LB Policy.
balToUpdate := gsb.latestBalancer()
if balToUpdate == nil {
return errBalancerClosed
}
// Perform this call without gsb.mu to prevent deadlocks if the child calls
// back into the channel. The latest balancer can never be closed during a
// call from the channel, even without gsb.mu held.
return balToUpdate.UpdateClientConnState(state)
}
// ResolverError forwards the error to the latest balancer created.
func (gsb *Balancer) ResolverError(err error) {
// The resolver data is only relevant to the most recent LB Policy.
balToUpdate := gsb.latestBalancer()
if balToUpdate == nil {
return
}
// Perform this call without gsb.mu to prevent deadlocks if the child calls
// back into the channel. The latest balancer can never be closed during a
// call from the channel, even without gsb.mu held.
balToUpdate.ResolverError(err)
}
// ExitIdle forwards the call to the latest balancer created.
//
// If the latest balancer does not support ExitIdle, the subConns are
// re-connected to manually.
func (gsb *Balancer) ExitIdle() {
balToUpdate := gsb.latestBalancer()
if balToUpdate == nil {
return
}
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
ei.ExitIdle()
return
}
for sc := range balToUpdate.subconns {
sc.Connect()
}
}
// UpdateSubConnState forwards the update to the appropriate child.
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
gsb.currentMu.Lock()
defer gsb.currentMu.Unlock()
gsb.mu.Lock()
// Forward update to the appropriate child. Even if there is a pending
// balancer, the current balancer should continue to get SubConn updates to
// maintain the proper state while the pending is still connecting.
var balToUpdate *balancerWrapper
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
balToUpdate = gsb.balancerCurrent
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
balToUpdate = gsb.balancerPending
}
gsb.mu.Unlock()
if balToUpdate == nil {
// SubConn belonged to a stale lb policy that has not yet fully closed,
// or the balancer was already closed.
return
}
balToUpdate.UpdateSubConnState(sc, state)
}
// Close closes any active child balancers.
func (gsb *Balancer) Close() {
gsb.mu.Lock()
gsb.closed = true
currentBalancerToClose := gsb.balancerCurrent
gsb.balancerCurrent = nil
pendingBalancerToClose := gsb.balancerPending
gsb.balancerPending = nil
gsb.mu.Unlock()
currentBalancerToClose.Close()
pendingBalancerToClose.Close()
}
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
// methods to help cleanup SubConns created by the wrapped balancer.
//
// It implements the balancer.ClientConn interface and is passed down in that
// capacity to the wrapped balancer. It maintains a set of subConns created by
// the wrapped balancer and calls from the latter to create/update/remove
// SubConns update this set before being forwarded to the parent ClientConn.
// State updates from the wrapped balancer can result in invocation of the
// graceful switch logic.
type balancerWrapper struct {
balancer.Balancer
gsb *Balancer
lastState balancer.State
subconns map[balancer.SubConn]bool // subconns created by this balancer
}
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
if state.ConnectivityState == connectivity.Shutdown {
bw.gsb.mu.Lock()
delete(bw.subconns, sc)
bw.gsb.mu.Unlock()
}
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
bw.Balancer.UpdateSubConnState(sc, state)
}
// Close closes the underlying LB policy and removes the subconns it created. bw
// must not be referenced via balancerCurrent or balancerPending in gsb when
// called. gsb.mu must not be held. Does not panic with a nil receiver.
func (bw *balancerWrapper) Close() {
// before Close is called.
if bw == nil {
return
}
// There is no need to protect this read with a mutex, as Close() is
// impossible to be called concurrently with the write in SwitchTo(). The
// callsites of Close() for this balancer in Graceful Switch Balancer will
// never be called until SwitchTo() returns.
bw.Balancer.Close()
bw.gsb.mu.Lock()
for sc := range bw.subconns {
bw.gsb.cc.RemoveSubConn(sc)
}
bw.gsb.mu.Unlock()
}
func (bw *balancerWrapper) UpdateState(state balancer.State) {
// Hold the mutex for this entire call to ensure it cannot occur
// concurrently with other updateState() calls. This causes updates to
// lastState and calls to cc.UpdateState to happen atomically.
bw.gsb.mu.Lock()
defer bw.gsb.mu.Unlock()
bw.lastState = state
if !bw.gsb.balancerCurrentOrPending(bw) {
return
}
if bw == bw.gsb.balancerCurrent {
// In the case that the current balancer exits READY, and there is a pending
// balancer, you can forward the pending balancer's cached State up to
// ClientConn and swap the pending into the current. This is because there
// is no reason to gracefully switch from and keep using the old policy as
// the ClientConn is not connected to any backends.
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
bw.gsb.swap()
return
}
// Even if there is a pending balancer waiting to be gracefully switched to,
// continue to forward current balancer updates to the Client Conn. Ignoring
// state + picker from the current would cause undefined behavior/cause the
// system to behave incorrectly from the current LB policies perspective.
// Also, the current LB is still being used by grpc to choose SubConns per
// RPC, and thus should use the most updated form of the current balancer.
bw.gsb.cc.UpdateState(state)
return
}
// This method is now dealing with a state update from the pending balancer.
// If the current balancer is currently in a state other than READY, the new
// policy can be swapped into place immediately. This is because there is no
// reason to gracefully switch from and keep using the old policy as the
// ClientConn is not connected to any backends.
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
bw.gsb.swap()
}
}
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
bw.gsb.mu.Lock()
if !bw.gsb.balancerCurrentOrPending(bw) {
bw.gsb.mu.Unlock()
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
}
bw.gsb.mu.Unlock()
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
if err != nil {
return nil, err
}
bw.gsb.mu.Lock()
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
bw.gsb.cc.RemoveSubConn(sc)
bw.gsb.mu.Unlock()
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
}
bw.subconns[sc] = true
bw.gsb.mu.Unlock()
return sc, nil
}
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
// Ignore ResolveNow requests from anything other than the most recent
// balancer, because older balancers were already removed from the config.
if bw != bw.gsb.latestBalancer() {
return
}
bw.gsb.cc.ResolveNow(opts)
}
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
bw.gsb.mu.Lock()
if !bw.gsb.balancerCurrentOrPending(bw) {
bw.gsb.mu.Unlock()
return
}
bw.gsb.mu.Unlock()
bw.gsb.cc.RemoveSubConn(sc)
}
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
bw.gsb.mu.Lock()
if !bw.gsb.balancerCurrentOrPending(bw) {
bw.gsb.mu.Unlock()
return
}
bw.gsb.mu.Unlock()
bw.gsb.cc.UpdateAddresses(sc, addrs)
}
func (bw *balancerWrapper) Target() string {
return bw.gsb.cc.Target()
}

View File

@ -31,7 +31,7 @@ import (
// Logger is the global binary logger. It can be used to get binary logger for
// each method.
type Logger interface {
getMethodLogger(methodName string) *MethodLogger
GetMethodLogger(methodName string) MethodLogger
}
// binLogger is the global binary logger for the binary. One of this should be
@ -49,17 +49,24 @@ func SetLogger(l Logger) {
binLogger = l
}
// GetLogger gets the binarg logger.
//
// Only call this at init time.
func GetLogger() Logger {
return binLogger
}
// GetMethodLogger returns the methodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func GetMethodLogger(methodName string) *MethodLogger {
func GetMethodLogger(methodName string) MethodLogger {
if binLogger == nil {
return nil
}
return binLogger.getMethodLogger(methodName)
return binLogger.GetMethodLogger(methodName)
}
func init() {
@ -68,17 +75,29 @@ func init() {
binLogger = NewLoggerFromConfigString(configStr)
}
type methodLoggerConfig struct {
// MethodLoggerConfig contains the setting for logging behavior of a method
// logger. Currently, it contains the max length of header and message.
type MethodLoggerConfig struct {
// Max length of header and message.
hdr, msg uint64
Header, Message uint64
}
// LoggerConfig contains the config for loggers to create method loggers.
type LoggerConfig struct {
All *MethodLoggerConfig
Services map[string]*MethodLoggerConfig
Methods map[string]*MethodLoggerConfig
Blacklist map[string]struct{}
}
type logger struct {
all *methodLoggerConfig
services map[string]*methodLoggerConfig
methods map[string]*methodLoggerConfig
config LoggerConfig
}
blacklist map[string]struct{}
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
func NewLoggerFromConfig(config LoggerConfig) Logger {
return &logger{config: config}
}
// newEmptyLogger creates an empty logger. The map fields need to be filled in
@ -88,57 +107,57 @@ func newEmptyLogger() *logger {
}
// Set method logger for "*".
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
if l.all != nil {
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
if l.config.All != nil {
return fmt.Errorf("conflicting global rules found")
}
l.all = ml
l.config.All = ml
return nil
}
// Set method logger for "service/*".
//
// New methodLogger with same service overrides the old one.
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
if _, ok := l.services[service]; ok {
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
if _, ok := l.config.Services[service]; ok {
return fmt.Errorf("conflicting service rules for service %v found", service)
}
if l.services == nil {
l.services = make(map[string]*methodLoggerConfig)
if l.config.Services == nil {
l.config.Services = make(map[string]*MethodLoggerConfig)
}
l.services[service] = ml
l.config.Services[service] = ml
return nil
}
// Set method logger for "service/method".
//
// New methodLogger with same method overrides the old one.
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
if _, ok := l.blacklist[method]; ok {
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
if _, ok := l.config.Blacklist[method]; ok {
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
if _, ok := l.config.Methods[method]; ok {
return fmt.Errorf("conflicting method rules for method %v found", method)
}
if l.methods == nil {
l.methods = make(map[string]*methodLoggerConfig)
if l.config.Methods == nil {
l.config.Methods = make(map[string]*MethodLoggerConfig)
}
l.methods[method] = ml
l.config.Methods[method] = ml
return nil
}
// Set blacklist method for "-service/method".
func (l *logger) setBlacklist(method string) error {
if _, ok := l.blacklist[method]; ok {
if _, ok := l.config.Blacklist[method]; ok {
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
if _, ok := l.methods[method]; ok {
if _, ok := l.config.Methods[method]; ok {
return fmt.Errorf("conflicting method rules for method %v found", method)
}
if l.blacklist == nil {
l.blacklist = make(map[string]struct{})
if l.config.Blacklist == nil {
l.config.Blacklist = make(map[string]struct{})
}
l.blacklist[method] = struct{}{}
l.config.Blacklist[method] = struct{}{}
return nil
}
@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error {
//
// Each methodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
s, m, err := grpcutil.ParseMethod(methodName)
if err != nil {
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
if ml, ok := l.methods[s+"/"+m]; ok {
return newMethodLogger(ml.hdr, ml.msg)
if ml, ok := l.config.Methods[s+"/"+m]; ok {
return newMethodLogger(ml.Header, ml.Message)
}
if _, ok := l.blacklist[s+"/"+m]; ok {
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
return nil
}
if ml, ok := l.services[s]; ok {
return newMethodLogger(ml.hdr, ml.msg)
if ml, ok := l.config.Services[s]; ok {
return newMethodLogger(ml.Header, ml.Message)
}
if l.all == nil {
if l.config.All == nil {
return nil
}
return newMethodLogger(l.all.hdr, l.all.msg)
return newMethodLogger(l.config.All.Header, l.config.All.Message)
}

View File

@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return nil
@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
}
if m == "*" {
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
} else {
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
}

View File

@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() {
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
type MethodLogger struct {
type MethodLogger interface {
Log(LogEntryConfig)
}
type methodLogger struct {
headerMaxLen, messageMaxLen uint64
callID uint64
@ -57,8 +61,8 @@ type MethodLogger struct {
sink Sink // TODO(blog): make this plugable.
}
func newMethodLogger(h, m uint64) *MethodLogger {
return &MethodLogger{
func newMethodLogger(h, m uint64) *methodLogger {
return &methodLogger{
headerMaxLen: h,
messageMaxLen: m,
@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger {
}
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *MethodLogger) Log(c LogEntryConfig) {
// Build is an internal only method for building the proto message out of the
// input event. It's made public to enable other library to reuse as much logic
// in methodLogger as possible.
func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
m := c.toProto()
timestamp, _ := ptypes.TimestampProto(time.Now())
m.Timestamp = timestamp
@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
case *pb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
ml.sink.Write(m)
return m
}
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *methodLogger) Log(c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
return truncated
}
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}

View File

@ -25,6 +25,7 @@ package channelz
import (
"context"
"errors"
"fmt"
"sort"
"sync"
@ -184,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
return db.get().GetServer(id)
}
// RegisterChannel registers the given channel c in channelz database with ref
// as its reference name, and add it to the child list of its parent (identified
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
// assigned to this channel.
func RegisterChannel(c Channel, pid int64, ref string) int64 {
// RegisterChannel registers the given channel c in the channelz database with
// ref as its reference name, and adds it to the child list of its parent
// (identified by pid). pid == nil means no parent.
//
// Returns a unique channelz identifier assigned to this channel.
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
id := idGen.genID()
var parent int64
isTopChannel := true
if pid != nil {
isTopChannel = false
parent = pid.Int()
}
if !IsOn() {
return newIdentifer(RefChannel, id, pid)
}
cn := &channel{
refName: ref,
c: c,
subChans: make(map[int64]string),
nestedChans: make(map[int64]string),
id: id,
pid: pid,
pid: parent,
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
if pid == 0 {
db.get().addChannel(id, cn, true, pid)
} else {
db.get().addChannel(id, cn, false, pid)
}
return id
db.get().addChannel(id, cn, isTopChannel, parent)
return newIdentifer(RefChannel, id, pid)
}
// RegisterSubChannel registers the given channel c in channelz database with ref
// as its reference name, and add it to the child list of its parent (identified
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a SubChannel's parent id cannot be 0")
return 0
// RegisterSubChannel registers the given subChannel c in the channelz database
// with ref as its reference name, and adds it to the child list of its parent
// (identified by pid).
//
// Returns a unique channelz identifier assigned to this subChannel.
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
if pid == nil {
return nil, errors.New("a SubChannel's parent id cannot be nil")
}
id := idGen.genID()
if !IsOn() {
return newIdentifer(RefSubChannel, id, pid), nil
}
sc := &subChannel{
refName: ref,
c: c,
sockets: make(map[int64]string),
id: id,
pid: pid,
pid: pid.Int(),
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
}
db.get().addSubChannel(id, sc, pid)
return id
db.get().addSubChannel(id, sc, pid.Int())
return newIdentifer(RefSubChannel, id, pid), nil
}
// RegisterServer registers the given server s in channelz database. It returns
// the unique channelz tracking id assigned to this server.
func RegisterServer(s Server, ref string) int64 {
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterServer(s Server, ref string) *Identifier {
id := idGen.genID()
if !IsOn() {
return newIdentifer(RefServer, id, nil)
}
svr := &server{
refName: ref,
s: s,
@ -240,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
id: id,
}
db.get().addServer(id, svr)
return id
return newIdentifer(RefServer, id, nil)
}
// RegisterListenSocket registers the given listen socket s in channelz database
// with ref as its reference name, and add it to the child list of its parent
// (identified by pid). It returns the unique channelz tracking id assigned to
// this listen socket.
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a ListenSocket's parent id cannot be 0")
return 0
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
if pid == nil {
return nil, errors.New("a ListenSocket's parent id cannot be 0")
}
id := idGen.genID()
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
db.get().addListenSocket(id, ls, pid)
return id
if !IsOn() {
return newIdentifer(RefListenSocket, id, pid), nil
}
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
db.get().addListenSocket(id, ls, pid.Int())
return newIdentifer(RefListenSocket, id, pid), nil
}
// RegisterNormalSocket registers the given normal socket s in channelz database
// with ref as its reference name, and add it to the child list of its parent
// with ref as its reference name, and adds it to the child list of its parent
// (identified by pid). It returns the unique channelz tracking id assigned to
// this normal socket.
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
if pid == 0 {
logger.Error("a NormalSocket's parent id cannot be 0")
return 0
//
// If channelz is not turned ON, the channelz database is not mutated.
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
if pid == nil {
return nil, errors.New("a NormalSocket's parent id cannot be 0")
}
id := idGen.genID()
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
db.get().addNormalSocket(id, ns, pid)
return id
if !IsOn() {
return newIdentifer(RefNormalSocket, id, pid), nil
}
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
db.get().addNormalSocket(id, ns, pid.Int())
return newIdentifer(RefNormalSocket, id, pid), nil
}
// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
func RemoveEntry(id int64) {
db.get().removeEntry(id)
//
// If channelz is not turned ON, this function is a no-op.
func RemoveEntry(id *Identifier) {
if !IsOn() {
return
}
db.get().removeEntry(id.Int())
}
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
// to the channel trace.
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
// trace also.
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
// the event to be added to the channel trace.
//
// The Parent field is optional. It is used for an event that will be recorded
// in the entity's parent trace.
type TraceEventDesc struct {
Desc string
Severity Severity
Parent *TraceEventDesc
}
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
for d := desc; d != nil; d = d.Parent {
switch d.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, d.Desc)
case CtWarning:
l.WarningDepth(depth+1, d.Desc)
case CtError:
l.ErrorDepth(depth+1, d.Desc)
}
// AddTraceEvent adds trace related to the entity with specified id, using the
// provided TraceEventDesc.
//
// If channelz is not turned ON, this will simply log the event descriptions.
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
// Log only the trace description associated with the bottom most entity.
switch desc.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
case CtWarning:
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
case CtError:
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
}
if getMaxTraceEntry() == 0 {
return
}
db.get().traceEvent(id, desc)
if IsOn() {
db.get().traceEvent(id.Int(), desc)
}
}
// channelMap is the storage data structure for channelz.

75
vendor/google.golang.org/grpc/internal/channelz/id.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
/*
*
* Copyright 2022 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package channelz
import "fmt"
// Identifier is an opaque identifier which uniquely identifies an entity in the
// channelz database.
type Identifier struct {
typ RefChannelType
id int64
str string
pid *Identifier
}
// Type returns the entity type corresponding to id.
func (id *Identifier) Type() RefChannelType {
return id.typ
}
// Int returns the integer identifier corresponding to id.
func (id *Identifier) Int() int64 {
return id.id
}
// String returns a string representation of the entity corresponding to id.
//
// This includes some information about the parent as well. Examples:
// Top-level channel: [Channel #channel-number]
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
func (id *Identifier) String() string {
return id.str
}
// Equal returns true if other is the same as id.
func (id *Identifier) Equal(other *Identifier) bool {
if (id != nil) != (other != nil) {
return false
}
if id == nil && other == nil {
return true
}
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
}
// NewIdentifierForTesting returns a new opaque identifier to be used only for
// testing purposes.
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
return newIdentifer(typ, id, pid)
}
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
str := fmt.Sprintf("%s #%d", typ, id)
if pid != nil {
str = fmt.Sprintf("%s %s", pid, str)
}
return &Identifier{typ: typ, id: id, str: str, pid: pid}
}

View File

@ -26,77 +26,54 @@ import (
var logger = grpclog.Component("channelz")
func withParens(id *Identifier) string {
return "[" + id.String() + "] "
}
// Info logs and adds a trace event if channelz is on.
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtInfo,
})
} else {
l.InfoDepth(1, args...)
}
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtInfo,
})
}
// Infof logs and adds a trace event if channelz is on.
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtInfo,
})
} else {
l.InfoDepth(1, msg)
}
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...),
Severity: CtInfo,
})
}
// Warning logs and adds a trace event if channelz is on.
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
} else {
l.WarningDepth(1, args...)
}
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
}
// Warningf logs and adds a trace event if channelz is on.
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtWarning,
})
} else {
l.WarningDepth(1, msg)
}
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...),
Severity: CtWarning,
})
}
// Error logs and adds a trace event if channelz is on.
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
} else {
l.ErrorDepth(1, args...)
}
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
}
// Errorf logs and adds a trace event if channelz is on.
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if IsOn() {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: msg,
Severity: CtError,
})
} else {
l.ErrorDepth(1, msg)
}
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...),
Severity: CtError,
})
}

View File

@ -686,12 +686,33 @@ const (
type RefChannelType int
const (
// RefUnknown indicates an unknown entity type, the zero value for this type.
RefUnknown RefChannelType = iota
// RefChannel indicates the referenced entity is a Channel.
RefChannel RefChannelType = iota
RefChannel
// RefSubChannel indicates the referenced entity is a SubChannel.
RefSubChannel
// RefServer indicates the referenced entity is a Server.
RefServer
// RefListenSocket indicates the referenced entity is a ListenSocket.
RefListenSocket
// RefNormalSocket indicates the referenced entity is a NormalSocket.
RefNormalSocket
)
var refChannelTypeToString = map[RefChannelType]string{
RefUnknown: "Unknown",
RefChannel: "Channel",
RefSubChannel: "SubChannel",
RefServer: "Server",
RefListenSocket: "ListenSocket",
RefNormalSocket: "NormalSocket",
}
func (r RefChannelType) String() string {
return refChannelTypeToString[r]
}
func (c *channelTrace) dumpData() *ChannelTrace {
c.mu.Lock()
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}

View File

@ -85,3 +85,9 @@ const (
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
//
// It currently has an experimental suffix which would be removed once
// end-to-end testing of the policy is completed.
const RLSLoadBalancingPolicyName = "rls_experimental"

View File

@ -22,6 +22,9 @@
package metadata
import (
"fmt"
"strings"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
return addr
}
// Validate returns an error if the input md contains invalid keys or values.
//
// If the header is not a pseudo-header, the following items are checked:
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
func Validate(md metadata.MD) error {
for k, vals := range md {
// pseudo-header will be ignored
if k[0] == ':' {
continue
}
// check key, for i that saving a conversion if not using for range
for i := 0; i < len(k); i++ {
r := k[i]
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
}
}
if strings.HasSuffix(k, "-bin") {
continue
}
// check value
for _, val := range vals {
if hasNotPrintable(val) {
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
}
}
}
return nil
}
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
func hasNotPrintable(msg string) bool {
// for i that saving a conversion if not using for range
for i := 0; i < len(msg); i++ {
if msg[i] < 0x20 || msg[i] > 0x7E {
return true
}
}
return false
}

View File

@ -0,0 +1,82 @@
/*
*
* Copyright 2021 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package pretty defines helper functions to pretty-print structs for logging.
package pretty
import (
"bytes"
"encoding/json"
"fmt"
"github.com/golang/protobuf/jsonpb"
protov1 "github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protov2 "google.golang.org/protobuf/proto"
)
const jsonIndent = " "
// ToJSON marshals the input into a json string.
//
// If marshal fails, it falls back to fmt.Sprintf("%+v").
func ToJSON(e interface{}) string {
switch ee := e.(type) {
case protov1.Message:
mm := jsonpb.Marshaler{Indent: jsonIndent}
ret, err := mm.MarshalToString(ee)
if err != nil {
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
// messages are not imported, and this will fail because the message
// is not found.
return fmt.Sprintf("%+v", ee)
}
return ret
case protov2.Message:
mm := protojson.MarshalOptions{
Multiline: true,
Indent: jsonIndent,
}
ret, err := mm.Marshal(ee)
if err != nil {
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
// messages are not imported, and this will fail because the message
// is not found.
return fmt.Sprintf("%+v", ee)
}
return string(ret)
default:
ret, err := json.MarshalIndent(ee, "", jsonIndent)
if err != nil {
return fmt.Sprintf("%+v", ee)
}
return string(ret)
}
}
// FormatJSON formats the input json bytes with indentation.
//
// If Indent fails, it returns the unchanged input as string.
func FormatJSON(b []byte) string {
var out bytes.Buffer
err := json.Indent(&out, b, "", jsonIndent)
if err != nil {
return string(b)
}
return out.String()
}

View File

@ -137,6 +137,7 @@ type earlyAbortStream struct {
streamID uint32
contentSubtype string
status *status.Status
rst bool
}
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
return err
}
if eas.rst {
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
return err
}
}
return nil
}

View File

@ -132,7 +132,7 @@ type http2Client struct {
kpDormant bool
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
channelzID *channelz.Identifier
czData *channelzData
onGoAway func(GoAwayReason)
@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
t.statsHandler.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
if err != nil {
return nil, err
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
// the wire. However, there are two notable exceptions:
//
// 1. If the stream headers violate the max header list size allowed by the
// server. In this case there is no reason to retry at all, as it is
// assumed the RPC would continue to fail on subsequent attempts.
// server. It's possible this could succeed on another transport, even if
// it's unlikely, but do not transparently retry.
// 2. If the credentials errored when requesting their headers. In this case,
// it's possible a retry can fix the problem, but indefinitely transparently
// retrying is not appropriate as it is likely the credentials, if they can
@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
type NewStreamError struct {
Err error
DoNotRetry bool
DoNotTransparentRetry bool
AllowTransparentRetry bool
}
func (e NewStreamError) Error() string {
@ -649,11 +649,11 @@ func (e NewStreamError) Error() string {
// NewStream creates a stream and registers it into the transport as "active"
// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true}
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return true
}, hdr)
if err != nil {
return nil, &NewStreamError{Err: err}
// Connection closed.
return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
}
if success {
break
}
if hdrListSizeErr != nil {
return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
return nil, &NewStreamError{Err: hdrListSizeErr}
}
firstTry = false
select {
@ -767,9 +768,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
case <-ctx.Done():
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
case <-t.goAway:
return nil, &NewStreamError{Err: errStreamDrain}
return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
case <-t.ctx.Done():
return nil, &NewStreamError{Err: ErrConnClosing}
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
if t.statsHandler != nil {
@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) {
t.controlBuf.finish()
t.cancel()
t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
channelz.RemoveEntry(t.channelzID)
// Append info about previous goaways if there were any, since this may be important
// for understanding the root cause for this connection to be closed.
_, goAwayDebugMessage := t.GetGoAwayReason()

View File

@ -21,7 +21,6 @@ package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
@ -36,6 +35,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@ -52,10 +52,10 @@ import (
var (
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
)
// serverConnectionCounter counts the number of connections a server has seen
@ -117,7 +117,7 @@ type http2Server struct {
idle time.Time
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
channelzID *channelz.Identifier
czData *channelzData
bufferPool *bufferPool
@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
if kp.Time != infinity {
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
@ -275,12 +280,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
if err != nil {
return nil, err
}
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
t.framer.writer.Flush()
defer func() {
@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
rst: !frame.StreamEnded(),
})
return false
}
@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if httpMethod != http.MethodPost {
t.mu.Unlock()
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
if logger.V(logLevel) {
logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
logger.Infof("transport: %v", errMsg)
}
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: http2.ErrCodeProtocol,
onWrite: func() {},
t.controlBuf.put(&earlyAbortStream{
httpStatus: 405,
streamID: streamID,
contentSubtype: s.contentSubtype,
status: status.New(codes.Internal, errMsg),
rst: !frame.StreamEnded(),
})
s.cancel()
return false
@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
streamID: s.id,
contentSubtype: s.contentSubtype,
status: stat,
rst: !frame.StreamEnded(),
})
return false
}
@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
return true
}
func (t *http2Server) streamContextErr(s *Stream) error {
select {
case <-t.done:
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
}
// WriteHeader sends the header metadata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.updateHeaderSent() || s.getState() == streamDone {
if s.updateHeaderSent() {
return ErrIllegalHeaderWrite
}
if s.getState() == streamDone {
return t.streamContextErr(s)
}
s.hdrMu.Lock()
if md.Len() > 0 {
if s.header.Len() > 0 {
@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
return err
return status.Convert(err).Err()
}
s.hdrMu.Unlock()
return nil
@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
if _, ok := err.(ConnectionError); ok {
return err
}
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
return status.Errorf(codes.Internal, "transport: %v", err)
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
s.cancel()
select {
case <-t.done:
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
return t.streamContextErr(s)
}
}
df := &dataFrame{
@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
case <-t.done:
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
return t.streamContextErr(s)
}
return t.controlBuf.put(df)
}
@ -1210,9 +1217,7 @@ func (t *http2Server) Close() {
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
logger.Infof("transport: error closing conn during Close: %v", err)
}
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
channelz.RemoveEntry(t.channelzID)
// Cancel all active streams.
for _, s := range streams {
s.cancel()
@ -1225,10 +1230,6 @@ func (t *http2Server) Close() {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
s.swapState(streamDone)
t.deleteStream(s, eosReceived)

View File

@ -34,6 +34,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@ -529,7 +530,7 @@ type ServerConfig struct {
InitialConnWindowSize int32
WriteBufferSize int
ReadBufferSize int
ChannelzParentID int64
ChannelzParentID *channelz.Identifier
MaxHeaderListSize *uint32
HeaderTableSize *uint32
}
@ -563,7 +564,7 @@ type ConnectOptions struct {
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
ReadBufferSize int
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
ChannelzParentID int64
ChannelzParentID *channelz.Identifier
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.

View File

@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
out[key] = v
s := make([]string, len(v))
copy(s, v)
out[key] = s
}
return out, true
}
@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) {
// map, and there's no guarantee that the MD attached to the context is
// created using our helper functions.
key := strings.ToLower(k)
out[key] = v
s := make([]string, len(v))
copy(s, v)
out[key] = s
}
for _, added := range raw.added {
if len(added)%2 == 1 {

View File

@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
if _, ok := status.FromError(err); ok {
// Status error: end the RPC unconditionally with this status.
return nil, nil, err
return nil, nil, dropError{error: err}
}
// For all other errors, wait for ready RPCs should block and other
// RPCs should fail with unavailable.
@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() {
pw.done = true
close(pw.blockingCh)
}
// dropError is a wrapper error that indicates the LB policy wishes to drop the
// RPC and not retry it.
type dropError struct {
error
}

View File

@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string {
}
type pickfirstBalancer struct {
state connectivity.State
cc balancer.ClientConn
sc balancer.SubConn
state connectivity.State
cc balancer.ClientConn
subConn balancer.SubConn
}
func (b *pickfirstBalancer) ResolverError(err error) {
switch b.state {
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
// Set a failing picker if we don't have a good picker.
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
if logger.V(2) {
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
}
if b.subConn == nil {
b.state = connectivity.TransientFailure
}
if b.state != connectivity.TransientFailure {
// The picker will not change since the balancer does not currently
// report an error.
return
}
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
if len(cs.ResolverState.Addresses) == 0 {
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
if len(state.ResolverState.Addresses) == 0 {
// The resolver reported an empty address list. Treat it like an error by
// calling b.ResolverError.
if b.subConn != nil {
// Remove the old subConn. All addresses were removed, so it is no longer
// valid.
b.cc.RemoveSubConn(b.subConn)
b.subConn = nil
}
b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
if b.sc == nil {
var err error
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
if err != nil {
if logger.V(2) {
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
})
return balancer.ErrBadResolverState
}
b.state = connectivity.Idle
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
b.sc.Connect()
} else {
b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses)
b.sc.Connect()
if b.subConn != nil {
b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
return nil
}
subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
if err != nil {
if logger.V(2) {
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
})
return balancer.ErrBadResolverState
}
b.subConn = subConn
b.state = connectivity.Idle
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Idle,
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
})
b.subConn.Connect()
return nil
}
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
if logger.V(2) {
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
}
if b.sc != sc {
if b.subConn != subConn {
if logger.V(2) {
logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
}
return
}
b.state = s.ConnectivityState
if s.ConnectivityState == connectivity.Shutdown {
b.sc = nil
b.state = state.ConnectivityState
if state.ConnectivityState == connectivity.Shutdown {
b.subConn = nil
return
}
switch s.ConnectivityState {
switch state.ConnectivityState {
case connectivity.Ready:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
b.cc.UpdateState(balancer.State{
ConnectivityState: state.ConnectivityState,
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
})
case connectivity.Connecting:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
b.cc.UpdateState(balancer.State{
ConnectivityState: state.ConnectivityState,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
case connectivity.Idle:
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})
b.cc.UpdateState(balancer.State{
ConnectivityState: state.ConnectivityState,
Picker: &idlePicker{subConn: subConn},
})
case connectivity.TransientFailure:
b.cc.UpdateState(balancer.State{
ConnectivityState: s.ConnectivityState,
Picker: &picker{err: s.ConnectionError},
ConnectivityState: state.ConnectivityState,
Picker: &picker{err: state.ConnectionError},
})
}
}
@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() {
}
func (b *pickfirstBalancer) ExitIdle() {
if b.sc != nil && b.state == connectivity.Idle {
b.sc.Connect()
if b.subConn != nil && b.state == connectivity.Idle {
b.subConn.Connect()
}
}
@ -135,18 +163,18 @@ type picker struct {
err error
}
func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return p.result, p.err
}
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
sc balancer.SubConn
subConn balancer.SubConn
}
func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
i.sc.Connect()
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
i.subConn.Connect()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}

View File

@ -27,6 +27,7 @@ import (
"google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/serviceconfig"
)
@ -139,13 +140,18 @@ type Address struct {
// Equal returns whether a and o are identical. Metadata is compared directly,
// not with any recursive introspection.
func (a *Address) Equal(o Address) bool {
func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) &&
a.BalancerAttributes.Equal(o.BalancerAttributes) &&
a.Type == o.Type && a.Metadata == o.Metadata
}
// String returns JSON formatted string representation of the address.
func (a Address) String() string {
return pretty.ToJSON(a)
}
// BuildOptions includes additional information for the builder to create
// the resolver.
type BuildOptions struct {

View File

@ -19,7 +19,6 @@
package grpc
import (
"fmt"
"strings"
"sync"
@ -27,6 +26,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
if ccr.done.HasFired() {
return nil
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
ccr.addChannelzTraceEvent(s)
ccr.curState = s
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
return balancer.ErrBadResolverState
@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
if ccr.done.HasFired() {
return
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
}
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
ccr.curState.Addresses = addrs
ccr.cc.updateResolverState(ccr.curState, nil)
}
@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
if ccr.done.HasFired() {
return
}
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc)
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
if ccr.cc.dopts.disableServiceConfig {
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
return
@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
return
}
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
}
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
ccr.curState.ServiceConfig = scpr
ccr.cc.updateResolverState(ccr.curState, nil)
}
@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
Severity: channelz.CtInfo,
})
channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}

View File

@ -134,7 +134,7 @@ type Server struct {
channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
channelzID int64 // channelz unique identification number
channelzID *channelz.Identifier
czData *channelzData
serverWorkerChannels []chan *serverWorkerData
@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server {
s.initServerWorkers()
}
if channelz.IsOn() {
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
}
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
channelz.Info(logger, s.channelzID, "Server created")
return s
}
@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
type listenSocket struct {
net.Listener
channelzID int64
channelzID *channelz.Identifier
}
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
func (l *listenSocket) Close() error {
err := l.Listener.Close()
if channelz.IsOn() {
channelz.RemoveEntry(l.channelzID)
}
channelz.RemoveEntry(l.channelzID)
channelz.Info(logger, l.channelzID, "ListenSocket deleted")
return err
}
@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error {
ls := &listenSocket{Listener: lis}
s.lis[ls] = true
if channelz.IsOn() {
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
}
s.mu.Unlock()
defer func() {
s.mu.Lock()
if s.lis != nil && s.lis[ls] {
@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Unlock()
}()
var tempDelay time.Duration // how long to sleep on accept failure
var err error
ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
if err != nil {
s.mu.Unlock()
return err
}
s.mu.Unlock()
channelz.Info(logger, ls.channelzID, "ListenSocket created")
var tempDelay time.Duration // how long to sleep on accept failure
for {
rawConn, err := lis.Accept()
if err != nil {
@ -1709,11 +1710,7 @@ func (s *Server) Stop() {
s.done.Fire()
}()
s.channelzRemoveOnce.Do(func() {
if channelz.IsOn() {
channelz.RemoveEntry(s.channelzID)
}
})
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
s.mu.Lock()
listeners := s.lis
@ -1751,11 +1748,7 @@ func (s *Server) GracefulStop() {
s.quit.Fire()
defer s.done.Fire()
s.channelzRemoveOnce.Do(func() {
if channelz.IsOn() {
channelz.RemoveEntry(s.channelzID)
}
})
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
@ -1808,12 +1801,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return codec
}
// SetHeader sets the header metadata.
// When called multiple times, all the provided metadata will be merged.
// All the metadata will be sent out when one of the following happens:
// - grpc.SendHeader() is called;
// - The first response is sent out;
// - An RPC status is sent out (error or success).
// SetHeader sets the header metadata to be sent from the server to the client.
// The context provided must be the context passed to the server's handler.
//
// Streaming RPCs should prefer the SetHeader method of the ServerStream.
//
// When called multiple times, all the provided metadata will be merged. All
// the metadata will be sent out when one of the following happens:
//
// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
// - The first response message is sent. For unary handlers, this occurs when
// the handler returns; for streaming handlers, this can happen when stream's
// SendMsg method is called.
// - An RPC status is sent out (error or success). This occurs when the handler
// returns.
//
// SetHeader will fail if called after any of the events above.
//
// The error returned is compatible with the status package. However, the
// status code will often not match the RPC status as seen by the client
// application, and therefore, should not be relied upon for this purpose.
func SetHeader(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
@ -1825,8 +1832,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
return stream.SetHeader(md)
}
// SendHeader sends header metadata. It may be called at most once.
// The provided md and headers set by SetHeader() will be sent.
// SendHeader sends header metadata. It may be called at most once, and may not
// be called after any event that causes headers to be sent (see SetHeader for
// a complete list). The provided md and headers set by SetHeader() will be
// sent.
//
// The error returned is compatible with the status package. However, the
// status code will often not match the RPC status as seen by the client
// application, and therefore, should not be relied upon for this purpose.
func SendHeader(ctx context.Context, md metadata.MD) error {
stream := ServerTransportStreamFromContext(ctx)
if stream == nil {
@ -1840,6 +1853,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
// When called more than once, all the provided metadata will be merged.
//
// The error returned is compatible with the status package. However, the
// status code will often not match the RPC status as seen by the client
// application, and therefore, should not be relied upon for this purpose.
func SetTrailer(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil

View File

@ -381,6 +381,9 @@ func init() {
//
// If any of them is NOT *ServiceConfig, return false.
func equalServiceConfig(a, b serviceconfig.Config) bool {
if a == nil && b == nil {
return true
}
aa, ok := a.(*ServiceConfig)
if !ok {
return false

View File

@ -36,6 +36,7 @@ import (
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/internal/transport"
@ -166,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
if channelz.IsOn() {
cc.incrCallsStarted()
defer func() {
@ -297,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}
cs.binlog = binarylog.GetMethodLogger(method)
if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {
cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */)
if err != nil {
cs.finish(err)
return nil, err
}
op := func(a *csAttempt) error { return a.newStream() }
// Pick the transport to use and create a new stream on the transport.
// Assign cs.attempt upon success.
op := func(a *csAttempt) error {
if err := a.getTransport(); err != nil {
return err
}
if err := a.newStream(); err != nil {
return err
}
// Because this operation is always called either here (while creating
// the clientStream) or by the retry code while locked when replaying
// the operation, it is safe to access cs.attempt directly.
cs.attempt = a
return nil
}
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
cs.finish(err)
return nil, err
}
@ -343,9 +363,15 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
return cs, nil
}
// newAttemptLocked creates a new attempt with a transport.
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
// newAttemptLocked creates a new csAttempt without a transport or stream.
func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
if err := cs.ctx.Err(); err != nil {
return nil, toRPCErr(err)
}
if err := cs.cc.ctx.Err(); err != nil {
return nil, ErrClientConnClosing
}
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
method := cs.callHdr.Method
sh := cs.cc.dopts.copts.StatsHandler
@ -379,27 +405,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
ctx = trace.NewContext(ctx, trInfo.tr)
}
newAttempt := &csAttempt{
ctx: ctx,
beginTime: beginTime,
cs: cs,
dc: cs.cc.dopts.dc,
statsHandler: sh,
trInfo: trInfo,
}
defer func() {
if retErr != nil {
// This attempt is not set in the clientStream, so it's finish won't
// be called. Call it here for stats and trace in case they are not
// nil.
newAttempt.finish(retErr)
}
}()
if err := ctx.Err(); err != nil {
return toRPCErr(err)
}
if cs.cc.parsedTarget.Scheme == "xds" {
// Add extra metadata (metadata that will be added by transport) to context
// so the balancer can see them.
@ -407,16 +412,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {
"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
))
}
t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
return &csAttempt{
ctx: ctx,
beginTime: beginTime,
cs: cs,
dc: cs.cc.dopts.dc,
statsHandler: sh,
trInfo: trInfo,
}, nil
}
func (a *csAttempt) getTransport() error {
cs := a.cs
var err error
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
a.drop = true
}
return err
}
if trInfo != nil {
trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
if a.trInfo != nil {
a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
}
newAttempt.t = t
newAttempt.done = done
cs.attempt = newAttempt
return nil
}
@ -425,12 +446,21 @@ func (a *csAttempt) newStream() error {
cs.callHdr.PreviousAttempts = cs.numRetries
s, err := a.t.NewStream(a.ctx, cs.callHdr)
if err != nil {
// Return without converting to an RPC error so retry code can
// inspect.
return err
nse, ok := err.(*transport.NewStreamError)
if !ok {
// Unexpected.
return err
}
if nse.AllowTransparentRetry {
a.allowTransparentRetry = true
}
// Unwrap and convert error.
return toRPCErr(nse.Err)
}
cs.attempt.s = s
cs.attempt.p = &parser{r: s}
a.s = s
a.p = &parser{r: s}
return nil
}
@ -456,7 +486,7 @@ type clientStream struct {
retryThrottler *retryThrottler // The throttler active when the RPC began.
binlog *binarylog.MethodLogger // Binary logger, can be nil.
binlog binarylog.MethodLogger // Binary logger, can be nil.
// serverHeaderBinlogged is a boolean for whether server header has been
// logged. Server header will be logged when the first time one of those
// happens: stream.Header(), stream.Recv().
@ -508,6 +538,11 @@ type csAttempt struct {
statsHandler stats.Handler
beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
// set for pick errors that are returned as a status
drop bool
}
func (cs *clientStream) commitAttemptLocked() {
@ -527,41 +562,21 @@ func (cs *clientStream) commitAttempt() {
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
// the error that should be returned by the operation. If the RPC should be
// retried, the bool indicates whether it is being retried transparently.
func (cs *clientStream) shouldRetry(err error) (bool, error) {
if cs.attempt.s == nil {
// Error from NewClientStream.
nse, ok := err.(*transport.NewStreamError)
if !ok {
// Unexpected, but assume no I/O was performed and the RPC is not
// fatal, so retry indefinitely.
return true, nil
}
func (a *csAttempt) shouldRetry(err error) (bool, error) {
cs := a.cs
// Unwrap and convert error.
err = toRPCErr(nse.Err)
// Never retry DoNotRetry errors, which indicate the RPC should not be
// retried due to max header list size violation, etc.
if nse.DoNotRetry {
return false, err
}
// In the event of a non-IO operation error from NewStream, we never
// attempted to write anything to the wire, so we can retry
// indefinitely.
if !nse.DoNotTransparentRetry {
return true, nil
}
}
if cs.finished || cs.committed {
// RPC is finished or committed; cannot retry.
if cs.finished || cs.committed || a.drop {
// RPC is finished or committed or was dropped by the picker; cannot retry.
return false, err
}
if a.s == nil && a.allowTransparentRetry {
return true, nil
}
// Wait for the trailers.
unprocessed := false
if cs.attempt.s != nil {
<-cs.attempt.s.Done()
unprocessed = cs.attempt.s.Unprocessed()
if a.s != nil {
<-a.s.Done()
unprocessed = a.s.Unprocessed()
}
if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
@ -573,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
pushback := 0
hasPushback := false
if cs.attempt.s != nil {
if !cs.attempt.s.TrailersOnly() {
if a.s != nil {
if !a.s.TrailersOnly() {
return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
// before considering this a failure for throttling.
sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
sps := a.s.Trailer()["grpc-retry-pushback-ms"]
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
@ -597,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
}
var code codes.Code
if cs.attempt.s != nil {
code = cs.attempt.s.Status().Code()
if a.s != nil {
code = a.s.Status().Code()
} else {
code = status.Convert(err).Code()
code = status.Code(err)
}
rp := cs.methodConfig.RetryPolicy
@ -645,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) {
}
// Returns nil if a retry was performed and succeeded; error otherwise.
func (cs *clientStream) retryLocked(lastErr error) error {
func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
for {
cs.attempt.finish(toRPCErr(lastErr))
isTransparent, err := cs.shouldRetry(lastErr)
attempt.finish(toRPCErr(lastErr))
isTransparent, err := attempt.shouldRetry(lastErr)
if err != nil {
cs.commitAttemptLocked()
return err
}
cs.firstAttempt = false
if err := cs.newAttemptLocked(isTransparent); err != nil {
attempt, err = cs.newAttemptLocked(isTransparent)
if err != nil {
// Only returns error if the clientconn is closed or the context of
// the stream is canceled.
return err
}
if lastErr = cs.replayBufferLocked(); lastErr == nil {
// Note that the first op in the replay buffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
}
}
@ -667,7 +687,10 @@ func (cs *clientStream) Context() context.Context {
cs.commitAttempt()
// No need to lock before using attempt, since we know it is committed and
// cannot change.
return cs.attempt.s.Context()
if cs.attempt.s != nil {
return cs.attempt.s.Context()
}
return cs.ctx
}
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
@ -697,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
cs.mu.Unlock()
return err
}
if err := cs.retryLocked(err); err != nil {
if err := cs.retryLocked(a, err); err != nil {
cs.mu.Unlock()
return err
}
@ -728,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
cs.binlog.Log(logEntry)
cs.serverHeaderBinlogged = true
}
return m, err
return m, nil
}
func (cs *clientStream) Trailer() metadata.MD {
@ -746,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD {
return cs.attempt.s.Trailer()
}
func (cs *clientStream) replayBufferLocked() error {
a := cs.attempt
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
for _, f := range cs.buffer {
if err := f(a); err != nil {
if err := f(attempt); err != nil {
return err
}
}
@ -797,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
}
msgBytes := data // Store the pointer before setting to nil. For binary logging.
op := func(a *csAttempt) error {
err := a.sendMsg(m, hdr, payload, data)
// nil out the message and uncomp when replaying; they are only needed for
// stats which is disabled for subsequent attempts.
m, data = nil, nil
return err
return a.sendMsg(m, hdr, payload, data)
}
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if cs.binlog != nil && err == nil {
cs.binlog.Log(&binarylog.ClientMessage{
OnClientSide: true,
Message: msgBytes,
Message: data,
})
}
return
return err
}
func (cs *clientStream) RecvMsg(m interface{}) error {
@ -1364,8 +1381,10 @@ func (as *addrConnStream) finish(err error) {
// ServerStream defines the server-side behavior of a streaming RPC.
//
// All errors returned from ServerStream methods are compatible with the
// status package.
// Errors returned from ServerStream methods are compatible with the status
// package. However, the status code will often not match the RPC status as
// seen by the client application, and therefore, should not be relied upon for
// this purpose.
type ServerStream interface {
// SetHeader sets the header metadata. It may be called multiple times.
// When call multiple times, all the provided metadata will be merged.
@ -1428,7 +1447,7 @@ type serverStream struct {
statsHandler stats.Handler
binlog *binarylog.MethodLogger
binlog binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
// will happen when one of the following two happens: stream.SendHeader(),
// stream.Send().
@ -1448,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
err := imetadata.Validate(md)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
return ss.s.SetHeader(md)
}
func (ss *serverStream) SendHeader(md metadata.MD) error {
err := ss.t.WriteHeader(ss.s, md)
err := imetadata.Validate(md)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
err = ss.t.WriteHeader(ss.s, md)
if ss.binlog != nil && !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
ss.binlog.Log(&binarylog.ServerHeader{
@ -1467,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
if md.Len() == 0 {
return
}
if err := imetadata.Validate(md); err != nil {
logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
}
ss.s.SetTrailer(md)
}

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.45.0"
const Version = "1.47.0"

23
vendor/modules.txt vendored
View File

@ -332,11 +332,12 @@ github.com/inconshreveable/mousetrap
github.com/json-iterator/go
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
## explicit
# github.com/klauspost/compress v1.15.1
## explicit; go 1.15
# github.com/klauspost/compress v1.15.7
## explicit; go 1.16
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@ -357,7 +358,7 @@ github.com/mitchellh/go-wordwrap
# github.com/mitchellh/mapstructure v1.5.0
## explicit; go 1.14
github.com/mitchellh/mapstructure
# github.com/moby/buildkit v0.10.1-0.20220721175135-c75998aec3d4
# github.com/moby/buildkit v0.10.1-0.20220809151411-8488654e899b
## explicit; go 1.17
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types
@ -374,6 +375,9 @@ github.com/moby/buildkit/exporter/containerimage/exptypes
github.com/moby/buildkit/frontend/gateway/client
github.com/moby/buildkit/frontend/gateway/grpcclient
github.com/moby/buildkit/frontend/gateway/pb
github.com/moby/buildkit/frontend/subrequests
github.com/moby/buildkit/frontend/subrequests/outline
github.com/moby/buildkit/frontend/subrequests/targets
github.com/moby/buildkit/identity
github.com/moby/buildkit/session
github.com/moby/buildkit/session/auth
@ -460,7 +464,7 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.12.1
# github.com/prometheus/client_golang v1.12.2
## explicit; go 1.13
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
@ -663,7 +667,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
## explicit
golang.org/x/time/rate
# google.golang.org/appengine v1.6.7
@ -678,13 +682,13 @@ google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368
## explicit; go 1.11
# google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6
## explicit; go 1.15
google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.45.0
# google.golang.org/grpc v1.47.0
## explicit; go 1.14
google.golang.org/grpc
google.golang.org/grpc/attributes
@ -694,6 +698,7 @@ google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
@ -706,6 +711,7 @@ google.golang.org/grpc/health
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
google.golang.org/grpc/internal/balancer/gracefulswitch
google.golang.org/grpc/internal/balancerload
google.golang.org/grpc/internal/binarylog
google.golang.org/grpc/internal/buffer
@ -717,6 +723,7 @@ google.golang.org/grpc/internal/grpcrand
google.golang.org/grpc/internal/grpcsync
google.golang.org/grpc/internal/grpcutil
google.golang.org/grpc/internal/metadata
google.golang.org/grpc/internal/pretty
google.golang.org/grpc/internal/resolver
google.golang.org/grpc/internal/resolver/dns
google.golang.org/grpc/internal/resolver/passthrough