Merge pull request #781 from crazy-max/vendor-buildkit

vendor: update buildkit
This commit is contained in:
Tõnis Tiigi 2021-09-27 19:01:49 -07:00 committed by GitHub
commit c05a6eb2c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
419 changed files with 20903 additions and 12229 deletions

14
go.mod
View File

@ -9,9 +9,9 @@ require (
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c
github.com/containerd/console v1.0.2
github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.5.5
github.com/docker/cli v20.10.7+incompatible
github.com/docker/cli v20.10.8+incompatible
github.com/docker/cli-docs-tool v0.1.1
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
github.com/docker/distribution v2.7.1+incompatible
@ -30,9 +30,9 @@ require (
github.com/jinzhu/gorm v1.9.2 // indirect
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/moby/buildkit v0.9.1-0.20210921052901-44891f4cb975
github.com/moby/buildkit v0.9.1-0.20210927192002-23c8affb1ffe
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
github.com/pkg/errors v0.9.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.8.1
@ -48,9 +48,9 @@ require (
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
k8s.io/api v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/client-go v0.21.2
k8s.io/api v0.22.1
k8s.io/apimachinery v0.22.1
k8s.io/client-go v0.22.1
)
replace (

92
go.sum
View File

@ -80,7 +80,7 @@ github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
@ -88,6 +88,7 @@ github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMl
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
@ -104,6 +105,7 @@ github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQ
github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@ -282,8 +284,9 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@ -299,7 +302,6 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.3/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
github.com/containerd/containerd v1.5.5 h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=
github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@ -333,8 +335,9 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
github.com/containerd/stargz-snapshotter v0.7.0/go.mod h1:otcOAgcOpY48CsSs0Kw/+wlIAGSNWS/+hlc/rLIGdIQ=
github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
github.com/containerd/stargz-snapshotter v0.8.1-0.20210910092506-a3ecdc9366fb/go.mod h1:VWbnkZwoaOVh9W1n+dQ9xxXNgTA14SrDNVhq269Odp8=
github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
github.com/containerd/stargz-snapshotter/estargz v0.8.1-0.20210910092506-a3ecdc9366fb/go.mod h1:ndCBLrMyXFyfgBEPHgdR0Ywe8O/kuTShyUsKOzIVeyw=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@ -461,6 +464,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
@ -468,6 +472,7 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
@ -560,8 +565,9 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@ -592,9 +598,6 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4-0.20210608040537-544b4180ac70 h1:yxuuMouxXYv9V1HprM9jTODJPGrTrC0FYVtPSnyIXxs=
github.com/golang/snappy v0.0.4-0.20210608040537-544b4180ac70/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
@ -618,6 +621,7 @@ github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUz
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw=
@ -679,8 +683,10 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -815,8 +821,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -900,8 +906,8 @@ github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxd
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
github.com/moby/buildkit v0.9.1-0.20210921052901-44891f4cb975 h1:rWXpP0Tntk23XKnvY0hb6p858yTOJVHjG5q/9RI3KSI=
github.com/moby/buildkit v0.9.1-0.20210921052901-44891f4cb975/go.mod h1:b8cqWy+6iMXHywaEaIbD6bV+8wZ9bsV9QgBwe0z/FD4=
github.com/moby/buildkit v0.9.1-0.20210927192002-23c8affb1ffe h1:SA9l85GRngGomGpyDmVdziovizKPH8peFyUUtsluh34=
github.com/moby/buildkit v0.9.1-0.20210927192002-23c8affb1ffe/go.mod h1:BmktjI4PV4waDqQWB19oWso7X4t9VTcak6q7ynntmkQ=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
@ -957,6 +963,7 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -965,6 +972,7 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -974,8 +982,9 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs=
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
@ -1009,8 +1018,9 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@ -1153,6 +1163,7 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -1215,11 +1226,13 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@ -1424,6 +1437,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -1435,11 +1449,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1534,6 +1548,7 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1565,6 +1580,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1582,16 +1598,17 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1765,6 +1782,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -1805,8 +1823,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1864,6 +1882,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
@ -1888,16 +1907,16 @@ k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y=
k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc=
k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
@ -1908,8 +1927,8 @@ k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0=
k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U=
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
@ -1920,7 +1939,7 @@ k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/cri-api v0.21.2/go.mod h1:ukzeKnOkrG9/+ghKZA57WeZbQfRtqlGLF5GcF3RtHZ8=
k8s.io/cri-api v0.22.1/go.mod h1:mj5DGUtElRyErU5AZ8EM0ahxbElYsaLAMTPhLPQ40Eg=
k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@ -1932,20 +1951,21 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
@ -1967,8 +1987,8 @@ sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -17,10 +17,10 @@
package console
import (
"errors"
"fmt"
"os"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -103,7 +103,7 @@ func (m *master) Reset() error {
{m.err, m.errMode},
} {
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
return errors.Wrap(err, "unable to restore console mode")
return fmt.Errorf("unable to restore console mode: %w", err)
}
}
@ -114,7 +114,7 @@ func (m *master) Size() (WinSize, error) {
var info windows.ConsoleScreenBufferInfo
err := windows.GetConsoleScreenBufferInfo(m.out, &info)
if err != nil {
return WinSize{}, errors.Wrap(err, "unable to get console info")
return WinSize{}, fmt.Errorf("unable to get console info: %w", err)
}
winsize := WinSize{
@ -139,7 +139,7 @@ func (m *master) DisableEcho() error {
mode |= windows.ENABLE_LINE_INPUT
if err := windows.SetConsoleMode(m.in, mode); err != nil {
return errors.Wrap(err, "unable to set console to disable echo")
return fmt.Errorf("unable to set console to disable echo: %w", err)
}
return nil
@ -192,7 +192,7 @@ func makeInputRaw(fd windows.Handle, mode uint32) error {
}
if err := windows.SetConsoleMode(fd, mode); err != nil {
return errors.Wrap(err, "unable to set console to raw mode")
return fmt.Errorf("unable to set console to raw mode: %w", err)
}
return nil

163
vendor/github.com/containerd/console/console_zos.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// +build zos
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"golang.org/x/sys/unix"
)
// NewPty creates a new pty pair
// The master is returned as the first console and a string
// with the path to the pty slave is returned as the second
func NewPty() (Console, string, error) {
var f File
var err error
var slave string
for i := 0;; i++ {
ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
if err == nil {
slave = fmt.Sprintf("/dev/ttyp%04d", i)
break
}
if os.IsNotExist(err) {
return nil, "", err
}
// else probably Resource Busy
}
m, err := newMaster(f)
if err != nil {
return nil, "", err
}
return m, slave, nil
}
type master struct {
f File
original *unix.Termios
}
func (m *master) Read(b []byte) (int, error) {
return m.f.Read(b)
}
func (m *master) Write(b []byte) (int, error) {
return m.f.Write(b)
}
func (m *master) Close() error {
return m.f.Close()
}
func (m *master) Resize(ws WinSize) error {
return tcswinsz(m.f.Fd(), ws)
}
func (m *master) ResizeFrom(c Console) error {
ws, err := c.Size()
if err != nil {
return err
}
return m.Resize(ws)
}
func (m *master) Reset() error {
if m.original == nil {
return nil
}
return tcset(m.f.Fd(), m.original)
}
func (m *master) getCurrent() (unix.Termios, error) {
var termios unix.Termios
if err := tcget(m.f.Fd(), &termios); err != nil {
return unix.Termios{}, err
}
return termios, nil
}
func (m *master) SetRaw() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState = cfmakeraw(rawState)
rawState.Oflag = rawState.Oflag | unix.OPOST
return tcset(m.f.Fd(), &rawState)
}
func (m *master) DisableEcho() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState.Lflag = rawState.Lflag &^ unix.ECHO
return tcset(m.f.Fd(), &rawState)
}
func (m *master) Size() (WinSize, error) {
return tcgwinsz(m.f.Fd())
}
func (m *master) Fd() uintptr {
return m.f.Fd()
}
func (m *master) Name() string {
return m.f.Name()
}
// checkConsole checks if the provided file is a console
func checkConsole(f File) error {
var termios unix.Termios
if tcget(f.Fd(), &termios) != nil {
return ErrNotAConsole
}
return nil
}
func newMaster(f File) (Console, error) {
m := &master{
f: f,
}
t, err := m.getCurrent()
if err != nil {
return nil, err
}
m.original = &t
return m, nil
}
// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts normally. In particular, a not-very-well-known default of
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
// problem for terminal emulators, because we relay data from the terminal we
// also relay that funky line discipline.
func ClearONLCR(fd uintptr) error {
return setONLCR(fd, false)
}
// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts as intended for a terminal emulator.
func SetONLCR(fd uintptr) error {
return setONLCR(fd, true)
}

View File

@ -2,7 +2,4 @@ module github.com/containerd/console
go 1.13
require (
github.com/pkg/errors v0.9.1
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)
require golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c

View File

@ -1,4 +1,2 @@
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -1,4 +1,4 @@
// +build darwin freebsd linux netbsd openbsd solaris
// +build darwin freebsd linux netbsd openbsd solaris zos
/*
Copyright The containerd Authors.

26
vendor/github.com/containerd/console/tc_zos.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)

View File

@ -1,16 +0,0 @@
cmd/snappytool/snappytool
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

View File

@ -1,18 +0,0 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Amazon.com, Inc
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Klaus Post <klauspost@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -1,41 +0,0 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Alex Legg <alexlegg@google.com>
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Jan Mercl <0xjnml@gmail.com>
Jonathan Swinney <jswinney@amazon.com>
Kai Backman <kaib@golang.org>
Klaus Post <klauspost@gmail.com>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -1,107 +0,0 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
Benchmarks.
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
"go test -test.bench=."
_UFlat0-8 2.19GB/s ± 0% html
_UFlat1-8 1.41GB/s ± 0% urls
_UFlat2-8 23.5GB/s ± 2% jpg
_UFlat3-8 1.91GB/s ± 0% jpg_200
_UFlat4-8 14.0GB/s ± 1% pdf
_UFlat5-8 1.97GB/s ± 0% html4
_UFlat6-8 814MB/s ± 0% txt1
_UFlat7-8 785MB/s ± 0% txt2
_UFlat8-8 857MB/s ± 0% txt3
_UFlat9-8 719MB/s ± 1% txt4
_UFlat10-8 2.84GB/s ± 0% pb
_UFlat11-8 1.05GB/s ± 0% gaviota
_ZFlat0-8 1.04GB/s ± 0% html
_ZFlat1-8 534MB/s ± 0% urls
_ZFlat2-8 15.7GB/s ± 1% jpg
_ZFlat3-8 740MB/s ± 3% jpg_200
_ZFlat4-8 9.20GB/s ± 1% pdf
_ZFlat5-8 991MB/s ± 0% html4
_ZFlat6-8 379MB/s ± 0% txt1
_ZFlat7-8 352MB/s ± 0% txt2
_ZFlat8-8 396MB/s ± 1% txt3
_ZFlat9-8 327MB/s ± 1% txt4
_ZFlat10-8 1.33GB/s ± 1% pb
_ZFlat11-8 605MB/s ± 1% gaviota
"go test -test.bench=. -tags=noasm"
_UFlat0-8 621MB/s ± 2% html
_UFlat1-8 494MB/s ± 1% urls
_UFlat2-8 23.2GB/s ± 1% jpg
_UFlat3-8 1.12GB/s ± 1% jpg_200
_UFlat4-8 4.35GB/s ± 1% pdf
_UFlat5-8 609MB/s ± 0% html4
_UFlat6-8 296MB/s ± 0% txt1
_UFlat7-8 288MB/s ± 0% txt2
_UFlat8-8 309MB/s ± 1% txt3
_UFlat9-8 280MB/s ± 1% txt4
_UFlat10-8 753MB/s ± 0% pb
_UFlat11-8 400MB/s ± 0% gaviota
_ZFlat0-8 409MB/s ± 1% html
_ZFlat1-8 250MB/s ± 1% urls
_ZFlat2-8 12.3GB/s ± 1% jpg
_ZFlat3-8 132MB/s ± 0% jpg_200
_ZFlat4-8 2.92GB/s ± 0% pdf
_ZFlat5-8 405MB/s ± 1% html4
_ZFlat6-8 179MB/s ± 1% txt1
_ZFlat7-8 170MB/s ± 1% txt2
_ZFlat8-8 189MB/s ± 1% txt3
_ZFlat9-8 164MB/s ± 1% txt4
_ZFlat10-8 479MB/s ± 1% pb
_ZFlat11-8 270MB/s ± 1% gaviota
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
are the numbers from C++ Snappy's
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
BM_UFlat/0 2.4GB/s html
BM_UFlat/1 1.4GB/s urls
BM_UFlat/2 21.8GB/s jpg
BM_UFlat/3 1.5GB/s jpg_200
BM_UFlat/4 13.3GB/s pdf
BM_UFlat/5 2.1GB/s html4
BM_UFlat/6 1.0GB/s txt1
BM_UFlat/7 959.4MB/s txt2
BM_UFlat/8 1.0GB/s txt3
BM_UFlat/9 864.5MB/s txt4
BM_UFlat/10 2.9GB/s pb
BM_UFlat/11 1.2GB/s gaviota
BM_ZFlat/0 944.3MB/s html (22.31 %)
BM_ZFlat/1 501.6MB/s urls (47.78 %)
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
BM_ZFlat/10 1.2GB/s pb (19.68 %)
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)

View File

@ -1,490 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

View File

@ -1,494 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R2 scratch
// - R3 scratch
// - R4 length or x
// - R5 offset
// - R6 &src[s]
// - R7 &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7.
// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
TEXT ·decode(SB), NOSPLIT, $56-56
// Initialize R6, R7 and R8-R13.
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R7
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R6
MOVD R11, R13
ADD R12, R13, R13
loop:
// for s < len(src)
CMP R13, R6
BEQ end
// R4 = uint32(src[s])
//
// switch src[s] & 0x03
MOVBU (R6), R4
MOVW R4, R3
ANDW $3, R3
MOVW $1, R1
CMPW R1, R3
BGE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
// case x < 60:
// s++
ADD $1, R6, R6
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R4 == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R4 can hold 64 bits, so the increment cannot overflow.
ADD $1, R4, R4
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R2 = len(dst) - d
// R3 = len(src) - s
MOVD R10, R2
SUB R7, R2, R2
MOVD R13, R3
SUB R6, R3, R3
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMP $16, R4
BGT callMemmove
CMP $16, R2
BLT callMemmove
CMP $16, R3
BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R6), (R14, R15)
STP (R14, R15), 0(R7)
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMP R2, R4
BGT errCorrupt
CMP R3, R4
BGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVD R7, 8(RSP)
MOVD R6, 16(RSP)
MOVD R4, 24(RSP)
MOVD R7, 32(RSP)
MOVD R6, 40(RSP)
MOVD R4, 48(RSP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVD 32(RSP), R7
MOVD 40(RSP), R6
MOVD 48(RSP), R4
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R13
ADD R12, R13, R13
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADD R4, R6, R6
SUB $58, R6, R6
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// case x == 60:
MOVW $61, R1
CMPW R1, R4
BEQ tagLit61
BGT tagLit62Plus
// x = uint32(src[s-1])
MOVBU -1(R6), R4
B doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVHU -2(R6), R4
B doLit
tagLit62Plus:
CMPW $62, R4
BHI tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVHU -3(R6), R4
MOVBU -1(R6), R3
ORR R3<<16, R4
B doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVWU -4(R6), R4
B doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADD $5, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-5])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVWU -4(R6), R5
B doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADD $3, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-3])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVHU -2(R6), R5
B doCopy
tagCopy:
// We have a copy tag. We assume that:
// - R3 == src[s] & 0x03
// - R4 == src[s]
CMP $2, R3
BEQ tagCopy2
BGT tagCopy4
// case tagCopy1:
// s += 2
ADD $2, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVD R4, R5
AND $0xe0, R5
MOVBU -1(R6), R3
ORR R5<<3, R3, R5
// length = 4 + int(src[s-2])>>2&0x7
MOVD $7, R1
AND R4>>2, R1, R4
ADD $4, R4, R4
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - R4 == length && R4 > 0
// - R5 == offset
// if offset <= 0 { etc }
MOVD $0, R1
CMP R1, R5
BLE errCorrupt
// if d < offset { etc }
MOVD R7, R3
SUB R8, R3, R3
CMP R5, R3
BLT errCorrupt
// if length > len(dst)-d { etc }
MOVD R10, R3
SUB R7, R3, R3
CMP R3, R4
BGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVD R10, R14
SUB R7, R14, R14
MOVD R7, R15
SUB R5, R15, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMP $16, R4
BGT slowForwardCopy
CMP $8, R5
BLT slowForwardCopy
CMP $16, R14
BLT slowForwardCopy
MOVD 0(R15), R2
MOVD R2, 0(R7)
MOVD 8(R15), R3
MOVD R3, 8(R7)
ADD R4, R7, R7
B loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUB $10, R14, R14
CMP R14, R4
BGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMP $8, R5
BGE fixUpSlowForwardCopy
MOVD (R15), R3
MOVD R3, (R7)
SUB R5, R4, R4
ADD R5, R7, R7
ADD R5, R5, R5
B makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by R7 being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save R7 to R2 so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVD R7, R2
ADD R4, R7, R7
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
MOVD $0, R1
CMP R1, R4
BLE loop
MOVD (R15), R3
MOVD R3, (R2)
ADD $8, R15, R15
ADD $8, R2, R2
SUB $8, R4, R4
B finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), R3
MOVB R3, (R7)
ADD $1, R15, R15
ADD $1, R7, R7
SUB $1, R4, R4
CBNZ R4, verySlowForwardCopy
B loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMP R10, R7
BNE errCorrupt
// return 0
MOVD $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVD $1, R2
MOVD R2, ret+48(FP)
RET

View File

@ -1,15 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
// +build amd64 arm64
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

View File

@ -1,730 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

View File

@ -1,722 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - R3 len(lit)
// - R4 n
// - R6 return value
// - R8 &dst[i]
// - R10 &lit[0]
//
// The 32 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $32-56
MOVD dst_base+0(FP), R8
MOVD lit_base+24(FP), R10
MOVD lit_len+32(FP), R3
MOVD R3, R6
MOVW R3, R4
SUBW $1, R4, R4
CMPW $60, R4
BLT oneByte
CMPW $256, R4
BLT twoBytes
threeBytes:
MOVD $0xf4, R2
MOVB R2, 0(R8)
MOVW R4, 1(R8)
ADD $3, R8, R8
ADD $3, R6, R6
B memmove
twoBytes:
MOVD $0xf0, R2
MOVB R2, 0(R8)
MOVB R4, 1(R8)
ADD $2, R8, R8
ADD $2, R6, R6
B memmove
oneByte:
LSLW $2, R4, R4
MOVB R4, 0(R8)
ADD $1, R8, R8
ADD $1, R6, R6
memmove:
MOVD R6, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// R8, R10 and R3 as arguments.
MOVD R8, 8(RSP)
MOVD R10, 16(RSP)
MOVD R3, 24(RSP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - R3 length
// - R7 &dst[0]
// - R8 &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVD dst_base+0(FP), R8
MOVD R8, R7
MOVD offset+24(FP), R11
MOVD length+32(FP), R3
loop0:
// for length >= 68 { etc }
CMPW $68, R3
BLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVD $0xfe, R2
MOVB R2, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUB $64, R3, R3
B loop0
step1:
// if length > 64 { etc }
CMP $64, R3
BLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R2
MOVB R2, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUB $60, R3, R3
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMP $12, R3
BGE step3
CMPW $2048, R11
BGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(R8)
LSRW $3, R11, R11
AND $0xe0, R11, R11
SUB $4, R3, R3
LSLW $2, R3
AND $0xff, R3, R3
ORRW R3, R11, R11
ORRW $1, R11, R11
MOVB R11, 0(R8)
ADD $2, R8, R8
// Return the number of bytes written.
SUB R7, R8, R8
MOVD R8, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUB $1, R3, R3
AND $0xff, R3, R3
LSLW $2, R3, R3
ORRW $2, R3, R3
MOVB R3, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
// Return the number of bytes written.
SUB R7, R8, R8
MOVD R8, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - R6 &src[0]
// - R7 &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVD src_base+0(FP), R6
MOVD src_len+8(FP), R14
MOVD i+24(FP), R15
MOVD j+32(FP), R7
ADD R6, R14, R14
ADD R6, R15, R15
ADD R6, R7, R7
MOVD R14, R13
SUB $8, R13, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMP R13, R7
BHI cmp1
MOVD (R15), R3
MOVD (R7), R4
CMP R4, R3
BNE bsf
ADD $8, R15, R15
ADD $8, R7, R7
B cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs.
// RBIT reverses the bit order, then CLZ counts the leading zeros, the
// combination of which finds the least significant bit which is set.
// The arm64 architecture is little-endian, and the shift by 3 converts
// a bit index to a byte index.
EOR R3, R4, R4
RBIT R4, R4
CLZ R4, R4
ADD R4>>3, R7, R7
// Convert from &src[ret] to ret.
SUB R6, R7, R7
MOVD R7, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMP R7, R14
BLS extendMatchEnd
MOVB (R15), R3
MOVB (R7), R4
CMP R4, R3
BNE extendMatchEnd
ADD $1, R15, R15
ADD $1, R7, R7
B cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUB R6, R7, R7
MOVD R7, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - R3 . .
// - R4 . .
// - R5 64 shift
// - R6 72 &src[0], tableSize
// - R7 80 &src[s]
// - R8 88 &dst[d]
// - R9 96 sLimit
// - R10 . &src[nextEmit]
// - R11 104 prevHash, currHash, nextHash, offset
// - R12 112 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 120 candidate
// - R16 . hash constant, 0x1e35a7bd
// - R17 . &table
// - . 128 table
//
// The second column (64, 72, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 64 + 64 = 32896.
TEXT ·encodeBlock(SB), 0, $32896-56
MOVD dst_base+0(FP), R8
MOVD src_base+24(FP), R7
MOVD src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVD $24, R5
MOVD $256, R6
MOVW $0xa7bd, R16
MOVKW $(0x1e35<<16), R16
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
MOVD $16384, R2
CMP R2, R6
BGE varTable
CMP R14, R6
BGE varTable
SUB $1, R5, R5
LSL $1, R6, R6
B calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each
// iterations writes 64 bytes, so we can do only tableSize/32 writes
// instead of the 2048 writes that would zero-initialize all of table's
// 32768 bytes. This clear could overrun the first tableSize elements, but
// it won't overrun the allocated stack size.
ADD $128, RSP, R17
MOVD R17, R4
// !!! R6 = &src[tableSize]
ADD R6<<1, R17, R6
memclr:
STP.P (ZR, ZR), 64(R4)
STP (ZR, ZR), -48(R4)
STP (ZR, ZR), -32(R4)
STP (ZR, ZR), -16(R4)
CMP R4, R6
BHI memclr
// !!! R6 = &src[0]
MOVD R7, R6
// sLimit := len(src) - inputMargin
MOVD R14, R9
SUB $15, R9, R9
// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't
// change for the rest of the function.
MOVD R5, 64(RSP)
MOVD R6, 72(RSP)
MOVD R9, 96(RSP)
// nextEmit := 0
MOVD R6, R10
// s := 1
ADD $1, R7, R7
// nextHash := hash(load32(src, s), shift)
MOVW 0(R7), R11
MULW R16, R11, R11
LSRW R5, R11, R11
outer:
// for { etc }
// skip := 32
MOVD $32, R12
// nextS := s
MOVD R7, R13
// candidate := 0
MOVD $0, R15
inner0:
// for { etc }
// s := nextS
MOVD R13, R7
// bytesBetweenHashLookups := skip >> 5
MOVD R12, R14
LSR $5, R14, R14
// nextS = s + bytesBetweenHashLookups
ADD R14, R13, R13
// skip += bytesBetweenHashLookups
ADD R14, R12, R12
// if nextS > sLimit { goto emitRemainder }
MOVD R13, R3
SUB R6, R3, R3
CMP R9, R3
BHI emitRemainder
// candidate = int(table[nextHash])
MOVHU 0(R17)(R11<<1), R15
// table[nextHash] = uint16(s)
MOVD R7, R3
SUB R6, R3, R3
MOVH R3, 0(R17)(R11<<1)
// nextHash = hash(load32(src, nextS), shift)
MOVW 0(R13), R11
MULW R16, R11
LSRW R5, R11, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVW 0(R7), R3
MOVW (R6)(R15), R4
CMPW R4, R3
BNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVD R7, R3
SUB R10, R3, R3
CMP $16, R3
BLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVW R3, R4
SUBW $1, R4, R4
MOVW $60, R2
CMPW R2, R4
BLT inlineEmitLiteralOneByte
MOVW $256, R2
CMPW R2, R4
BLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVD $0xf4, R1
MOVB R1, 0(R8)
MOVW R4, 1(R8)
ADD $3, R8, R8
B inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVD $0xf0, R1
MOVB R1, 0(R8)
MOVB R4, 1(R8)
ADD $2, R8, R8
B inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
LSLW $2, R4, R4
MOVB R4, 0(R8)
ADD $1, R8, R8
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// R8, R10 and R3 as arguments.
MOVD R8, 8(RSP)
MOVD R10, 16(RSP)
MOVD R3, 24(RSP)
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADD R3, R8, R8
MOVD R7, 80(RSP)
MOVD R8, 88(RSP)
MOVD R15, 120(RSP)
CALL runtime·memmove(SB)
MOVD 64(RSP), R5
MOVD 72(RSP), R6
MOVD 80(RSP), R7
MOVD 88(RSP), R8
MOVD 96(RSP), R9
MOVD 120(RSP), R15
ADD $128, RSP, R17
MOVW $0xa7bd, R16
MOVKW $(0x1e35<<16), R16
B inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB R3, R4
SUBW $1, R4, R4
AND $0xff, R4, R4
LSLW $2, R4, R4
MOVB R4, (R8)
ADD $1, R8, R8
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R10), (R0, R1)
STP (R0, R1), 0(R8)
ADD R3, R8, R8
inner1:
// for { etc }
// base := s
MOVD R7, R12
// !!! offset := base - candidate
MOVD R12, R11
SUB R15, R11, R11
SUB R6, R11, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVD src_len+32(FP), R14
ADD R6, R14, R14
// !!! R13 = &src[len(src) - 8]
MOVD R14, R13
SUB $8, R13, R13
// !!! R15 = &src[candidate + 4]
ADD $4, R15, R15
ADD R6, R15, R15
// !!! s += 4
ADD $4, R7, R7
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMP R13, R7
BHI inlineExtendMatchCmp1
MOVD (R15), R3
MOVD (R7), R4
CMP R4, R3
BNE inlineExtendMatchBSF
ADD $8, R15, R15
ADD $8, R7, R7
B inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs.
// RBIT reverses the bit order, then CLZ counts the leading zeros, the
// combination of which finds the least significant bit which is set.
// The arm64 architecture is little-endian, and the shift by 3 converts
// a bit index to a byte index.
EOR R3, R4, R4
RBIT R4, R4
CLZ R4, R4
ADD R4>>3, R7, R7
B inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMP R7, R14
BLS inlineExtendMatchEnd
MOVB (R15), R3
MOVB (R7), R4
CMP R4, R3
BNE inlineExtendMatchEnd
ADD $1, R15, R15
ADD $1, R7, R7
B inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVD R7, R3
SUB R12, R3, R3
inlineEmitCopyLoop0:
// for length >= 68 { etc }
MOVW $68, R2
CMPW R2, R3
BLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVD $0xfe, R1
MOVB R1, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUBW $64, R3, R3
B inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
MOVW $64, R2
CMPW R2, R3
BLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R1
MOVB R1, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
SUBW $60, R3, R3
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
MOVW $12, R2
CMPW R2, R3
BGE inlineEmitCopyStep3
MOVW $2048, R2
CMPW R2, R11
BGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(R8)
LSRW $8, R11, R11
LSLW $5, R11, R11
SUBW $4, R3, R3
AND $0xff, R3, R3
LSLW $2, R3, R3
ORRW R3, R11, R11
ORRW $1, R11, R11
MOVB R11, 0(R8)
ADD $2, R8, R8
B inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBW $1, R3, R3
LSLW $2, R3, R3
ORRW $2, R3, R3
MOVB R3, 0(R8)
MOVW R11, 1(R8)
ADD $3, R8, R8
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVD R7, R10
// if s >= sLimit { goto emitRemainder }
MOVD R7, R3
SUB R6, R3, R3
CMP R3, R9
BLS emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVD -1(R7), R14
// prevHash := hash(uint32(x>>0), shift)
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// table[prevHash] = uint16(s-1)
MOVD R7, R3
SUB R6, R3, R3
SUB $1, R3, R3
MOVHU R3, 0(R17)(R11<<1)
// currHash := hash(uint32(x>>8), shift)
LSR $8, R14, R14
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// candidate = int(table[currHash])
MOVHU 0(R17)(R11<<1), R15
// table[currHash] = uint16(s)
ADD $1, R3, R3
MOVHU R3, 0(R17)(R11<<1)
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVW (R6)(R15), R4
CMPW R4, R14
BEQ inner1
// nextHash = hash(uint32(x>>16), shift)
LSR $8, R14, R14
MOVW R14, R11
MULW R16, R11, R11
LSRW R5, R11, R11
// s++
ADD $1, R7, R7
// break out of the inner1 for loop, i.e. continue the outer loop.
B outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVD src_len+32(FP), R3
ADD R6, R3, R3
CMP R3, R10
BEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVD R8, 8(RSP)
MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative.
MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative.
MOVD R10, 32(RSP)
SUB R10, R3, R3
MOVD R3, 40(RSP)
MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVD R8, 88(RSP)
CALL ·emitLiteral(SB)
MOVD 88(RSP), R8
// Finish the "d +=" part of "d += emitLiteral(etc)".
MOVD 56(RSP), R1
ADD R1, R8, R8
encodeBlockEnd:
MOVD dst_base+0(FP), R3
SUB R3, R8, R8
MOVD R8, d+48(FP)
RET

View File

@ -1,30 +0,0 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
// +build amd64 arm64
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

View File

@ -1 +0,0 @@
module github.com/golang/snappy

View File

@ -1,3 +1,4 @@
# Compiler support code
This directory contains compiler support code used by Gnostic and Gnostic extensions.
This directory contains compiler support code used by Gnostic and Gnostic
extensions.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,30 +14,36 @@
package compiler
import (
yaml "gopkg.in/yaml.v3"
)
// Context contains state of the compiler as it traverses a document.
type Context struct {
Parent *Context
Name string
Node *yaml.Node
ExtensionHandlers *[]ExtensionHandler
}
// NewContextWithExtensions returns a new object representing the compiler state
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers}
}
// NewContext returns a new object representing the compiler state
func NewContext(name string, parent *Context) *Context {
func NewContext(name string, node *yaml.Node, parent *Context) *Context {
if parent != nil {
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
}
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
}
// Description returns a text description of the compiler state
func (context *Context) Description() string {
name := context.Name
if context.Parent != nil {
return context.Parent.Description() + "." + context.Name
name = context.Parent.Description() + "." + name
}
return context.Name
return name
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@
package compiler
import "fmt"
// Error represents compiler errors and their location in the document.
type Error struct {
Context *Context
@ -25,12 +27,19 @@ func NewError(context *Context, message string) *Error {
return &Error{Context: context, Message: message}
}
func (err *Error) locationDescription() string {
if err.Context.Node != nil {
return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description())
}
return err.Context.Description()
}
// Error returns the string value of an Error.
func (err *Error) Error() string {
if err.Context == nil {
return "ERROR " + err.Message
return err.Message
}
return "ERROR " + err.Context.Description() + " " + err.Message
return err.locationDescription() + " " + err.Message
}
// ErrorGroup is a container for groups of Error values.

View File

@ -1,101 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"errors"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
ext_plugin "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v2"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// HandleExtension calls a binary extension handler.
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
handled := false
var errFromPlugin error
var outFromPlugin *any.Any
if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
if outFromPlugin == nil {
continue
} else {
handled = true
break
}
}
}
return handled, outFromPlugin, errFromPlugin
}
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
binary, _ := yaml.Marshal(in)
request := &ext_plugin.ExtensionHandlerRequest{}
version := &ext_plugin.Version{}
version.Major = 0
version.Minor = 1
version.Patch = 0
request.CompilerVersion = version
request.Wrapper = &ext_plugin.Wrapper{}
request.Wrapper.Version = "v2"
request.Wrapper.Yaml = string(binary)
request.Wrapper.ExtensionName = extensionName
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
fmt.Printf("Error: %+v\n", err)
return nil, err
}
response := &ext_plugin.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil {
fmt.Printf("Error: %+v\n", err)
fmt.Printf("%s\n", string(output))
return nil, err
}
if !response.Handled {
return nil, nil
}
if len(response.Error) != 0 {
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
return nil, errors.New(message)
}
return response.Value, nil
}
return nil, nil
}

View File

@ -0,0 +1,85 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
extensions "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v3"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// CallExtension calls a binary extension handler.
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
if context == nil || context.ExtensionHandlers == nil {
return false, nil, nil
}
handled = false
for _, handler := range *(context.ExtensionHandlers) {
response, err = handler.handle(in, extensionName)
if response == nil {
continue
} else {
handled = true
break
}
}
return handled, response, err
}
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
yamlData, _ := yaml.Marshal(in)
request := &extensions.ExtensionHandlerRequest{
CompilerVersion: &extensions.Version{
Major: 0,
Minor: 1,
Patch: 0,
},
Wrapper: &extensions.Wrapper{
Version: "unknown", // TODO: set this to the type/version of spec being parsed.
Yaml: string(yamlData),
ExtensionName: extensionName,
},
}
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
return nil, err
}
response := &extensions.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil || !response.Handled {
return nil, err
}
if len(response.Errors) != 0 {
return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ","))
}
return response.Value, nil
}
return nil, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,56 +16,63 @@ package compiler
import (
"fmt"
"gopkg.in/yaml.v2"
"regexp"
"sort"
"strconv"
"github.com/googleapis/gnostic/jsonschema"
"gopkg.in/yaml.v3"
)
// compiler helper functions, usually called from generated code
// UnpackMap gets a yaml.MapSlice if possible.
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
m, ok := in.(yaml.MapSlice)
if ok {
return m, true
// UnpackMap gets a *yaml.Node if possible.
func UnpackMap(in *yaml.Node) (*yaml.Node, bool) {
if in == nil {
return nil, false
}
// do we have an empty array?
a, ok := in.([]interface{})
if ok && len(a) == 0 {
// if so, return an empty map
return yaml.MapSlice{}, true
}
return nil, false
return in, true
}
// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
func SortedKeysForMap(m yaml.MapSlice) []string {
// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice.
func SortedKeysForMap(m *yaml.Node) []string {
keys := make([]string, 0)
for _, item := range m {
keys = append(keys, item.Key.(string))
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
keys = append(keys, m.Content[i].Value)
}
}
sort.Strings(keys)
return keys
}
// MapHasKey returns true if a yaml.MapSlice contains a specified key.
func MapHasKey(m yaml.MapSlice, key string) bool {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return true
// MapHasKey returns true if a yamlv2.MapSlice contains a specified key.
func MapHasKey(m *yaml.Node, key string) bool {
if m == nil {
return false
}
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return true
}
}
}
return false
}
// MapValueForKey gets the value of a map value for a specified key.
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return item.Value
func MapValueForKey(m *yaml.Node, key string) *yaml.Node {
if m == nil {
return nil
}
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return m.Content[i+1]
}
}
}
return nil
@ -83,8 +90,118 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
return stringArray
}
// SequenceNodeForNode returns a node if it is a SequenceNode.
func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) {
if node.Kind != yaml.SequenceNode {
return nil, false
}
return node, true
}
// BoolForScalarNode returns the bool value of a node.
func BoolForScalarNode(node *yaml.Node) (bool, bool) {
if node == nil {
return false, false
}
if node.Kind == yaml.DocumentNode {
return BoolForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return false, false
}
if node.Tag != "!!bool" {
return false, false
}
v, err := strconv.ParseBool(node.Value)
if err != nil {
return false, false
}
return v, true
}
// IntForScalarNode returns the integer value of a node.
func IntForScalarNode(node *yaml.Node) (int64, bool) {
if node == nil {
return 0, false
}
if node.Kind == yaml.DocumentNode {
return IntForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0, false
}
if node.Tag != "!!int" {
return 0, false
}
v, err := strconv.ParseInt(node.Value, 10, 64)
if err != nil {
return 0, false
}
return v, true
}
// FloatForScalarNode returns the float value of a node.
func FloatForScalarNode(node *yaml.Node) (float64, bool) {
if node == nil {
return 0.0, false
}
if node.Kind == yaml.DocumentNode {
return FloatForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0.0, false
}
if (node.Tag != "!!int") && (node.Tag != "!!float") {
return 0.0, false
}
v, err := strconv.ParseFloat(node.Value, 64)
if err != nil {
return 0.0, false
}
return v, true
}
// StringForScalarNode returns the string value of a node.
func StringForScalarNode(node *yaml.Node) (string, bool) {
if node == nil {
return "", false
}
if node.Kind == yaml.DocumentNode {
return StringForScalarNode(node.Content[0])
}
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!int":
return node.Value, true
case "!!str":
return node.Value, true
case "!!timestamp":
return node.Value, true
case "!!null":
return "", true
default:
return "", false
}
default:
return "", false
}
}
// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible.
func StringArrayForSequenceNode(node *yaml.Node) []string {
stringArray := make([]string, 0)
for _, item := range node.Content {
v, ok := StringForScalarNode(item)
if ok {
stringArray = append(stringArray, v)
}
}
return stringArray
}
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string {
missingKeys := make([]string, 0)
for _, k := range requiredKeys {
if !MapHasKey(m, k) {
@ -95,64 +212,109 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
}
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
invalidKeys := make([]string, 0)
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok {
key := itemKey
found := false
// does the key match an allowed key?
for _, allowedKey := range allowedKeys {
if key == allowedKey {
if m == nil || m.Kind != yaml.MappingNode {
return invalidKeys
}
for i := 0; i < len(m.Content); i += 2 {
key := m.Content[i].Value
found := false
// does the key match an allowed key?
for _, allowedKey := range allowedKeys {
if key == allowedKey {
found = true
break
}
}
if !found {
// does the key match an allowed pattern?
for _, allowedPattern := range allowedPatterns {
if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
// does the key match an allowed pattern?
for _, allowedPattern := range allowedPatterns {
if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
invalidKeys = append(invalidKeys, key)
}
invalidKeys = append(invalidKeys, key)
}
}
}
return invalidKeys
}
// DescribeMap describes a map (for debugging purposes).
func DescribeMap(in interface{}, indent string) string {
description := ""
m, ok := in.(map[string]interface{})
if ok {
keys := make([]string, 0)
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := m[k]
description += fmt.Sprintf("%s%s:\n", indent, k)
description += DescribeMap(v, indent+" ")
}
return description
// NewNullNode creates a new Null node.
func NewNullNode() *yaml.Node {
node := &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!null",
}
a, ok := in.([]interface{})
if ok {
for i, v := range a {
description += fmt.Sprintf("%s%d:\n", indent, i)
description += DescribeMap(v, indent+" ")
}
return description
return node
}
// NewMappingNode creates a new Mapping node.
func NewMappingNode() *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: make([]*yaml.Node, 0),
}
}
// NewSequenceNode creates a new Sequence node.
func NewSequenceNode() *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
}
return node
}
// NewScalarNodeForString creates a new node to hold a string.
func NewScalarNodeForString(s string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: s,
}
}
// NewSequenceNodeForStringArray creates a new node to hold an array of strings.
func NewSequenceNodeForStringArray(strings []string) *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
}
for _, s := range strings {
node.Content = append(node.Content, NewScalarNodeForString(s))
}
return node
}
// NewScalarNodeForBool creates a new node to hold a bool.
func NewScalarNodeForBool(b bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", b),
}
}
// NewScalarNodeForFloat creates a new node to hold a float.
func NewScalarNodeForFloat(f float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%g", f),
}
}
// NewScalarNodeForInt creates a new node to hold an integer.
func NewScalarNodeForInt(i int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", i),
}
description += fmt.Sprintf("%s%+v\n", indent, in)
return description
}
// PluralProperties returns the string "properties" pluralized.
@ -195,3 +357,40 @@ func StringValue(item interface{}) (value string, ok bool) {
}
return "", false
}
// Description returns a human-readable represention of an item.
func Description(item interface{}) string {
value, ok := item.(*yaml.Node)
if ok {
return jsonschema.Render(value)
}
return fmt.Sprintf("%+v", item)
}
// Display returns a description of a node for use in error messages.
func Display(node *yaml.Node) string {
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!str":
return fmt.Sprintf("%s (string)", node.Value)
}
}
return fmt.Sprintf("%+v (%T)", node, node)
}
// Marshal creates a yaml version of a structure in our preferred style
func Marshal(in *yaml.Node) []byte {
clearStyle(in)
//bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}})
bytes, _ := yaml.Marshal(in)
return bytes
}
func clearStyle(node *yaml.Node) {
node.Style = 0
for _, c := range node.Content {
clearStyle(c)
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,7 +15,6 @@
package compiler
import (
"errors"
"fmt"
"io/ioutil"
"log"
@ -23,18 +22,30 @@ import (
"net/url"
"path/filepath"
"strings"
"sync"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
)
var fileCache map[string][]byte
var infoCache map[string]interface{}
var count int64
var verboseReader = false
var fileCache map[string][]byte
var infoCache map[string]*yaml.Node
var fileCacheEnable = true
var infoCacheEnable = true
// These locks are used to synchronize accesses to the fileCache and infoCache
// maps (above). They are global state and can throw thread-related errors
// when modified from separate goroutines. The general strategy is to protect
// all public functions in this file with mutex Lock() calls. As a result, to
// avoid deadlock, these public functions should not call other public
// functions, so some public functions have private equivalents.
// In the future, we might consider replacing the maps with sync.Map and
// eliminating these mutexes.
var fileCacheMutex sync.Mutex
var infoCacheMutex sync.Mutex
func initializeFileCache() {
if fileCache == nil {
fileCache = make(map[string][]byte, 0)
@ -43,27 +54,42 @@ func initializeFileCache() {
func initializeInfoCache() {
if infoCache == nil {
infoCache = make(map[string]interface{}, 0)
infoCache = make(map[string]*yaml.Node, 0)
}
}
// EnableFileCache turns on file caching.
func EnableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = true
}
// EnableInfoCache turns on parsed info caching.
func EnableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = true
}
// DisableFileCache turns off file caching.
func DisableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = false
}
// DisableInfoCache turns off parsed info caching.
func DisableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = false
}
// RemoveFromFileCache removes an entry from the file cache.
func RemoveFromFileCache(fileurl string) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
if !fileCacheEnable {
return
}
@ -71,7 +97,10 @@ func RemoveFromFileCache(fileurl string) {
delete(fileCache, fileurl)
}
// RemoveFromInfoCache removes an entry from the info cache.
func RemoveFromInfoCache(filename string) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if !infoCacheEnable {
return
}
@ -79,21 +108,31 @@ func RemoveFromInfoCache(filename string) {
delete(infoCache, filename)
}
func GetInfoCache() map[string]interface{} {
// GetInfoCache returns the info cache map.
func GetInfoCache() map[string]*yaml.Node {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if infoCache == nil {
initializeInfoCache()
}
return infoCache
}
// ClearFileCache clears the file cache.
func ClearFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCache = make(map[string][]byte, 0)
}
// ClearInfoCache clears the info cache.
func ClearInfoCache() {
infoCache = make(map[string]interface{})
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCache = make(map[string]*yaml.Node)
}
// ClearCaches clears all caches.
func ClearCaches() {
ClearFileCache()
ClearInfoCache()
@ -101,6 +140,12 @@ func ClearCaches() {
// FetchFile gets a specified file from the local filesystem or a remote location.
func FetchFile(fileurl string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return fetchFile(fileurl)
}
func fetchFile(fileurl string) ([]byte, error) {
var bytes []byte
initializeFileCache()
if fileCacheEnable {
@ -121,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) {
}
defer response.Body.Close()
if response.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status)
}
bytes, err = ioutil.ReadAll(response.Body)
if fileCacheEnable && err == nil {
@ -132,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) {
// ReadBytesForFile reads the bytes of a file.
func ReadBytesForFile(filename string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return readBytesForFile(filename)
}
func readBytesForFile(filename string) ([]byte, error) {
// is the filename a url?
fileurl, _ := url.Parse(filename)
if fileurl.Scheme != "" {
// yes, fetch it
bytes, err := FetchFile(filename)
bytes, err := fetchFile(filename)
if err != nil {
return nil, err
}
@ -150,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) {
return bytes, nil
}
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
// ReadInfoFromBytes unmarshals a file as a *yaml.Node.
func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
return readInfoFromBytes(filename, bytes)
}
func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
initializeInfoCache()
if infoCacheEnable {
cachedInfo, ok := infoCache[filename]
@ -165,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
log.Printf("Reading info for file %s", filename)
}
}
var info yaml.MapSlice
var info yaml.Node
err := yaml.Unmarshal(bytes, &info)
if err != nil {
return nil, err
}
if infoCacheEnable && len(filename) > 0 {
infoCache[filename] = info
infoCache[filename] = &info
}
return info, nil
return &info, nil
}
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
initializeInfoCache()
if infoCacheEnable {
info, ok := infoCache[ref]
@ -191,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
log.Printf("Reading info for ref %s#%s", basefile, ref)
}
}
count = count + 1
basedir, _ := filepath.Split(basefile)
parts := strings.Split(ref, "#")
var filename string
@ -204,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
} else {
filename = basefile
}
bytes, err := ReadBytesForFile(filename)
bytes, err := readBytesForFile(filename)
if err != nil {
return nil, err
}
info, err := ReadInfoFromBytes(filename, bytes)
info, err := readInfoFromBytes(filename, bytes)
if info != nil && info.Kind == yaml.DocumentNode {
info = info.Content[0]
}
if err != nil {
log.Printf("File error: %v\n", err)
} else {
if info == nil {
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
}
if len(parts) > 1 {
path := strings.Split(parts[1], "/")
for i, key := range path {
if i > 0 {
m, ok := info.(yaml.MapSlice)
if ok {
m := info
if true {
found := false
for _, section := range m {
if section.Key == key {
info = section.Value
for i := 0; i < len(m.Content); i += 2 {
if m.Content[i].Value == key {
info = m.Content[i+1]
found = true
}
}

View File

@ -1,5 +1,13 @@
# Extensions
This directory contains support code for building Gnostic extensions and associated examples.
**Extension Support is experimental.**
Extensions are used to compile vendor or specification extensions into protocol buffer structures.
This directory contains support code for building Gnostic extensio handlers and
associated examples.
Extension handlers can be used to compile vendor or specification extensions
into protocol buffer structures.
Like plugins, extension handlers are built as separate executables. Extension
bodies are written to extension handlers as serialized
ExtensionHandlerRequests.

View File

@ -1,148 +1,181 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.15.5
// source: extensions/extension.proto
package openapiextension_v1
package gnostic_extension_v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The version number of OpenAPI compiler.
// The version number of Gnostic.
type Version struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (x *Version) Reset() {
*x = Version{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Version) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Version.ProtoReflect.Descriptor instead.
func (*Version) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{0}
return file_extensions_extension_proto_rawDescGZIP(), []int{0}
}
func (m *Version) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Version.Unmarshal(m, b)
}
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
}
func (m *Version) XXX_Merge(src proto.Message) {
xxx_messageInfo_Version.Merge(m, src)
}
func (m *Version) XXX_Size() int {
return xxx_messageInfo_Version.Size(m)
}
func (m *Version) XXX_DiscardUnknown() {
xxx_messageInfo_Version.DiscardUnknown(m)
}
var xxx_messageInfo_Version proto.InternalMessageInfo
func (m *Version) GetMajor() int32 {
if m != nil {
return m.Major
func (x *Version) GetMajor() int32 {
if x != nil {
return x.Major
}
return 0
}
func (m *Version) GetMinor() int32 {
if m != nil {
return m.Minor
func (x *Version) GetMinor() int32 {
if x != nil {
return x.Minor
}
return 0
}
func (m *Version) GetPatch() int32 {
if m != nil {
return m.Patch
func (x *Version) GetPatch() int32 {
if x != nil {
return x.Patch
}
return 0
}
func (m *Version) GetSuffix() string {
if m != nil {
return m.Suffix
func (x *Version) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
// An encoded Request is written to the ExtensionHandler's stdin.
type ExtensionHandlerRequest struct {
// The OpenAPI descriptions that were explicitly listed on the command line.
// The specifications will appear in the order they are specified to gnostic.
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The extension to process.
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
// The version number of openapi compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// The version number of Gnostic.
CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
}
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) Reset() {
*x = ExtensionHandlerRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{1}
return file_extensions_extension_proto_rawDescGZIP(), []int{1}
}
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
}
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
}
func (m *ExtensionHandlerRequest) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
}
func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if m != nil {
return m.Wrapper
func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if x != nil {
return x.Wrapper
}
return nil
}
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if m != nil {
return m.CompilerVersion
func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if x != nil {
return x.CompilerVersion
}
return nil
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
type ExtensionHandlerResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// true if the extension is handled by the extension handler; false otherwise
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
// Error message. If non-empty, the extension handling failed.
// Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@ -151,150 +184,278 @@ type ExtensionHandlerResponse struct {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"`
Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
// text output
Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) Reset() {
*x = ExtensionHandlerResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{2}
return file_extensions_extension_proto_rawDescGZIP(), []int{2}
}
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
}
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
}
func (m *ExtensionHandlerResponse) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
}
func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
func (m *ExtensionHandlerResponse) GetHandled() bool {
if m != nil {
return m.Handled
func (x *ExtensionHandlerResponse) GetHandled() bool {
if x != nil {
return x.Handled
}
return false
}
func (m *ExtensionHandlerResponse) GetError() []string {
if m != nil {
return m.Error
func (x *ExtensionHandlerResponse) GetErrors() []string {
if x != nil {
return x.Errors
}
return nil
}
func (m *ExtensionHandlerResponse) GetValue() *any.Any {
if m != nil {
return m.Value
func (x *ExtensionHandlerResponse) GetValue() *anypb.Any {
if x != nil {
return x.Value
}
return nil
}
type Wrapper struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// version of the OpenAPI specification in which this extension was written.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Name of the extension
// Name of the extension.
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
// Must be a valid yaml for the proto
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// YAML-formatted extension value.
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
}
func (m *Wrapper) Reset() { *m = Wrapper{} }
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) Reset() {
*x = Wrapper{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Wrapper) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead.
func (*Wrapper) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{3}
return file_extensions_extension_proto_rawDescGZIP(), []int{3}
}
func (m *Wrapper) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Wrapper.Unmarshal(m, b)
}
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
}
func (m *Wrapper) XXX_Merge(src proto.Message) {
xxx_messageInfo_Wrapper.Merge(m, src)
}
func (m *Wrapper) XXX_Size() int {
return xxx_messageInfo_Wrapper.Size(m)
}
func (m *Wrapper) XXX_DiscardUnknown() {
xxx_messageInfo_Wrapper.DiscardUnknown(m)
}
var xxx_messageInfo_Wrapper proto.InternalMessageInfo
func (m *Wrapper) GetVersion() string {
if m != nil {
return m.Version
func (x *Wrapper) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (m *Wrapper) GetExtensionName() string {
if m != nil {
return m.ExtensionName
func (x *Wrapper) GetExtensionName() string {
if x != nil {
return x.ExtensionName
}
return ""
}
func (m *Wrapper) GetYaml() string {
if m != nil {
return m.Yaml
func (x *Wrapper) GetYaml() string {
if x != nil {
return x.Yaml
}
return ""
}
func init() {
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
var File_extensions_extension_proto protoreflect.FileDescriptor
var file_extensions_extension_proto_rawDesc = []byte{
0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74,
0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e,
0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14,
0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37,
0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07,
0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65,
0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61,
0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12,
0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57,
0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b,
0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) }
var (
file_extensions_extension_proto_rawDescOnce sync.Once
file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc
)
var fileDescriptor_661e47e790f76671 = []byte{
// 362 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40,
0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50,
0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7,
0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85,
0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71,
0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e,
0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d,
0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef,
0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35,
0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14,
0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39,
0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08,
0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81,
0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3,
0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8,
0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35,
0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0,
0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18,
0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09,
0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d,
0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00,
0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00,
func file_extensions_extension_proto_rawDescGZIP() []byte {
file_extensions_extension_proto_rawDescOnce.Do(func() {
file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData)
})
return file_extensions_extension_proto_rawDescData
}
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_extensions_extension_proto_goTypes = []interface{}{
(*Version)(nil), // 0: gnostic.extension.v1.Version
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
(*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
(*anypb.Any)(nil), // 4: google.protobuf.Any
}
var file_extensions_extension_proto_depIdxs = []int32{
3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version
4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_extensions_extension_proto_init() }
func file_extensions_extension_proto_init() {
if File_extensions_extension_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Version); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Wrapper); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_extensions_extension_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_extensions_extension_proto_goTypes,
DependencyIndexes: file_extensions_extension_proto_depIdxs,
MessageInfos: file_extensions_extension_proto_msgTypes,
}.Build()
File_extensions_extension_proto = out.File
file_extensions_extension_proto_rawDesc = nil
file_extensions_extension_proto_goTypes = nil
file_extensions_extension_proto_depIdxs = nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,9 @@
syntax = "proto3";
package gnostic.extension.v1;
import "google/protobuf/any.proto";
package openapiextension.v1;
// This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler
@ -26,7 +27,7 @@ option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to
// work with it directly.
option java_outer_classname = "OpenAPIExtensionV1";
option java_outer_classname = "GnosticExtension";
// The Java package name must be proto package name with proper prefix.
option java_package = "org.gnostic.v1";
@ -37,9 +38,13 @@ option java_package = "org.gnostic.v1";
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
//
option objc_class_prefix = "OAE"; // "OpenAPI Extension"
// "Gnostic Extension"
option objc_class_prefix = "GNX";
// The version number of OpenAPI compiler.
// The Go package name.
option go_package = "./extensions;gnostic_extension_v1";
// The version number of Gnostic.
message Version {
int32 major = 1;
int32 minor = 2;
@ -52,12 +57,11 @@ message Version {
// An encoded Request is written to the ExtensionHandler's stdin.
message ExtensionHandlerRequest {
// The OpenAPI descriptions that were explicitly listed on the command line.
// The specifications will appear in the order they are specified to gnostic.
// The extension to process.
Wrapper wrapper = 1;
// The version number of openapi compiler.
Version compiler_version = 3;
// The version number of Gnostic.
Version compiler_version = 2;
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
@ -66,7 +70,7 @@ message ExtensionHandlerResponse {
// true if the extension is handled by the extension handler; false otherwise
bool handled = 1;
// Error message. If non-empty, the extension handling failed.
// Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@ -75,7 +79,7 @@ message ExtensionHandlerResponse {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
repeated string error = 2;
repeated string errors = 2;
// text output
google.protobuf.Any value = 3;
@ -85,9 +89,9 @@ message Wrapper {
// version of the OpenAPI specification in which this extension was written.
string version = 1;
// Name of the extension
// Name of the extension.
string extension_name = 2;
// Must be a valid yaml for the proto
// YAML-formatted extension value.
string yaml = 3;
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,71 +12,53 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package openapiextension_v1
package gnostic_extension_v1
import (
"fmt"
"io/ioutil"
"log"
"os"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
)
type documentHandler func(version string, extensionName string, document string)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
func forInputYamlFromOpenapic(handler documentHandler) {
// Main implements the main program of an extension handler.
func Main(handler extensionHandler) {
// unpack the request
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Println("File error:", err.Error())
log.Println("File error:", err.Error())
os.Exit(1)
}
if len(data) == 0 {
fmt.Println("No input data.")
log.Println("No input data.")
os.Exit(1)
}
request := &ExtensionHandlerRequest{}
err = proto.Unmarshal(data, request)
if err != nil {
fmt.Println("Input error:", err.Error())
log.Println("Input error:", err.Error())
os.Exit(1)
}
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
}
// ProcessExtension calles the handler for a specified extension.
func ProcessExtension(handleExtension extensionHandler) {
response := &ExtensionHandlerResponse{}
forInputYamlFromOpenapic(
func(version string, extensionName string, yamlInput string) {
var newObject proto.Message
var err error
handled, newObject, err := handleExtension(extensionName, yamlInput)
if !handled {
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
// If we reach here, then the extension is handled
response.Handled = true
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
response.Value, err = ptypes.MarshalAny(newObject)
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
})
// call the handler
handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml)
// respond with the output of the handler
response := &ExtensionHandlerResponse{
Handled: false, // default assumption
Errors: make([]string, 0),
}
if err != nil {
response.Errors = append(response.Errors, err.Error())
} else if handled {
response.Handled = true
response.Value, err = ptypes.MarshalAny(output)
if err != nil {
response.Errors = append(response.Errors, err.Error())
}
}
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
}

View File

@ -0,0 +1,4 @@
# jsonschema
This directory contains code for reading, writing, and manipulating JSON
schemas.

View File

@ -0,0 +1,84 @@
// THIS FILE IS AUTOMATICALLY GENERATED.
package jsonschema
import (
"encoding/base64"
)
func baseSchemaBytes() ([]byte, error){
return base64.StdEncoding.DecodeString(
`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi
JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl
c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK
ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg
ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp
bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp
dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl
ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK
ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v
bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg
ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki
LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p
bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s
CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog
ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg
ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog
ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg
ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog
ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6
IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog
ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1
ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl
ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw
ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg
ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg
ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg
ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg
IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0
aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg
ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg
ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg
ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK
ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi
ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP
ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy
ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg
ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv
ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi
OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl
SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs
dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k
ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk
cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl
cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh
ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg
ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk
ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk
ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6
IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi
b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9
LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl
cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv
bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog
ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq
ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg
ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg
ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg
ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg
ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu
aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh
bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl
cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs
CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs
ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg
ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg
ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy
YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}

View File

@ -0,0 +1,229 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"strings"
)
//
// DISPLAY
// The following methods display Schemas.
//
// Description returns a string representation of a string or string array.
func (s *StringOrStringArray) Description() string {
if s.String != nil {
return *s.String
}
if s.StringArray != nil {
return strings.Join(*s.StringArray, ", ")
}
return ""
}
// Returns a string representation of a Schema.
func (schema *Schema) String() string {
return schema.describeSchema("")
}
// Helper: Returns a string representation of a Schema indented by a specified string.
func (schema *Schema) describeSchema(indent string) string {
result := ""
if schema.Schema != nil {
result += indent + "$schema: " + *(schema.Schema) + "\n"
}
if schema.ID != nil {
result += indent + "id: " + *(schema.ID) + "\n"
}
if schema.MultipleOf != nil {
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
}
if schema.Maximum != nil {
result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum))
}
if schema.ExclusiveMaximum != nil {
result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum))
}
if schema.ExclusiveMinimum != nil {
result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength))
}
if schema.MinLength != nil {
result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength))
}
if schema.Pattern != nil {
result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern))
}
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
result += indent + "additionalItems:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalItems.Boolean)
result += indent + fmt.Sprintf("additionalItems: %+v\n", b)
}
}
if schema.Items != nil {
result += indent + "items:\n"
items := schema.Items
if items.SchemaArray != nil {
for i, s := range *(items.SchemaArray) {
result += indent + " " + fmt.Sprintf("%d", i) + ":\n"
result += s.describeSchema(indent + " " + " ")
}
} else if items.Schema != nil {
result += items.Schema.describeSchema(indent + " " + " ")
}
}
if schema.MaxItems != nil {
result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems))
}
if schema.MinItems != nil {
result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems))
}
if schema.UniqueItems != nil {
result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems))
}
if schema.MaxProperties != nil {
result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties))
}
if schema.MinProperties != nil {
result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties))
}
if schema.Required != nil {
result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required))
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
result += indent + "additionalProperties:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalProperties.Boolean)
result += indent + fmt.Sprintf("additionalProperties: %+v\n", b)
}
}
if schema.Properties != nil {
result += indent + "properties:\n"
for _, pair := range *(schema.Properties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.PatternProperties != nil {
result += indent + "patternProperties:\n"
for _, pair := range *(schema.PatternProperties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Dependencies != nil {
result += indent + "dependencies:\n"
for _, pair := range *(schema.Dependencies) {
name := pair.Name
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
} else {
a := schemaOrStringArray.StringArray
if a != nil {
result += indent + " " + name + ":\n"
for _, s2 := range *a {
result += indent + " " + " " + s2 + "\n"
}
}
}
}
}
if schema.Enumeration != nil {
result += indent + "enumeration:\n"
for _, value := range *(schema.Enumeration) {
if value.String != nil {
result += indent + " " + fmt.Sprintf("%+v\n", *value.String)
} else {
result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool)
}
}
}
if schema.Type != nil {
result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description())
}
if schema.AllOf != nil {
result += indent + "allOf:\n"
for _, s := range *(schema.AllOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.AnyOf != nil {
result += indent + "anyOf:\n"
for _, s := range *(schema.AnyOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.OneOf != nil {
result += indent + "oneOf:\n"
for _, s := range *(schema.OneOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.Not != nil {
result += indent + "not:\n"
result += schema.Not.describeSchema(indent + " ")
}
if schema.Definitions != nil {
result += indent + "definitions:\n"
for _, pair := range *(schema.Definitions) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Title != nil {
result += indent + "title: " + *(schema.Title) + "\n"
}
if schema.Description != nil {
result += indent + "description: " + *(schema.Description) + "\n"
}
if schema.Default != nil {
result += indent + "default:\n"
result += indent + fmt.Sprintf(" %+v\n", *(schema.Default))
}
if schema.Format != nil {
result += indent + "format: " + *(schema.Format) + "\n"
}
if schema.Ref != nil {
result += indent + "$ref: " + *(schema.Ref) + "\n"
}
return result
}

View File

@ -0,0 +1,228 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package jsonschema supports the reading, writing, and manipulation
// of JSON Schemas.
package jsonschema
import "gopkg.in/yaml.v3"
// The Schema struct models a JSON Schema and, because schemas are
// defined hierarchically, contains many references to itself.
// All fields are pointers and are nil if the associated values
// are not specified.
type Schema struct {
Schema *string // $schema
ID *string // id keyword used for $ref resolution scope
Ref *string // $ref, i.e. JSON Pointers
// http://json-schema.org/latest/json-schema-validation.html
// 5.1. Validation keywords for numeric instances (number and integer)
MultipleOf *SchemaNumber
Maximum *SchemaNumber
ExclusiveMaximum *bool
Minimum *SchemaNumber
ExclusiveMinimum *bool
// 5.2. Validation keywords for strings
MaxLength *int64
MinLength *int64
Pattern *string
// 5.3. Validation keywords for arrays
AdditionalItems *SchemaOrBoolean
Items *SchemaOrSchemaArray
MaxItems *int64
MinItems *int64
UniqueItems *bool
// 5.4. Validation keywords for objects
MaxProperties *int64
MinProperties *int64
Required *[]string
AdditionalProperties *SchemaOrBoolean
Properties *[]*NamedSchema
PatternProperties *[]*NamedSchema
Dependencies *[]*NamedSchemaOrStringArray
// 5.5. Validation keywords for any instance type
Enumeration *[]SchemaEnumValue
Type *StringOrStringArray
AllOf *[]*Schema
AnyOf *[]*Schema
OneOf *[]*Schema
Not *Schema
Definitions *[]*NamedSchema
// 6. Metadata keywords
Title *string
Description *string
Default *yaml.Node
// 7. Semantic validation with "format"
Format *string
}
// These helper structs represent "combination" types that generally can
// have values of one type or another. All are used to represent parts
// of Schemas.
// SchemaNumber represents a value that can be either an Integer or a Float.
type SchemaNumber struct {
Integer *int64
Float *float64
}
// NewSchemaNumberWithInteger creates and returns a new object
func NewSchemaNumberWithInteger(i int64) *SchemaNumber {
result := &SchemaNumber{}
result.Integer = &i
return result
}
// NewSchemaNumberWithFloat creates and returns a new object
func NewSchemaNumberWithFloat(f float64) *SchemaNumber {
result := &SchemaNumber{}
result.Float = &f
return result
}
// SchemaOrBoolean represents a value that can be either a Schema or a Boolean.
type SchemaOrBoolean struct {
Schema *Schema
Boolean *bool
}
// NewSchemaOrBooleanWithSchema creates and returns a new object
func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Schema = s
return result
}
// NewSchemaOrBooleanWithBoolean creates and returns a new object
func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Boolean = &b
return result
}
// StringOrStringArray represents a value that can be either
// a String or an Array of Strings.
type StringOrStringArray struct {
String *string
StringArray *[]string
}
// NewStringOrStringArrayWithString creates and returns a new object
func NewStringOrStringArrayWithString(s string) *StringOrStringArray {
result := &StringOrStringArray{}
result.String = &s
return result
}
// NewStringOrStringArrayWithStringArray creates and returns a new object
func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray {
result := &StringOrStringArray{}
result.StringArray = &a
return result
}
// SchemaOrStringArray represents a value that can be either
// a Schema or an Array of Strings.
type SchemaOrStringArray struct {
Schema *Schema
StringArray *[]string
}
// SchemaOrSchemaArray represents a value that can be either
// a Schema or an Array of Schemas.
type SchemaOrSchemaArray struct {
Schema *Schema
SchemaArray *[]*Schema
}
// NewSchemaOrSchemaArrayWithSchema creates and returns a new object
func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.Schema = s
return result
}
// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object
func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.SchemaArray = &a
return result
}
// SchemaEnumValue represents a value that can be part of an
// enumeration in a Schema.
type SchemaEnumValue struct {
String *string
Bool *bool
}
// NamedSchema is a name-value pair that is used to emulate maps
// with ordered keys.
type NamedSchema struct {
Name string
Value *Schema
}
// NewNamedSchema creates and returns a new object
func NewNamedSchema(name string, value *Schema) *NamedSchema {
return &NamedSchema{Name: name, Value: value}
}
// NamedSchemaOrStringArray is a name-value pair that is used
// to emulate maps with ordered keys.
type NamedSchemaOrStringArray struct {
Name string
Value *SchemaOrStringArray
}
// Access named subschemas by name
func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema {
if array == nil {
return nil
}
for _, pair := range *array {
if pair.Name == name {
return pair.Value
}
}
return nil
}
// PropertyWithName returns the selected element.
func (s *Schema) PropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Properties, name)
}
// PatternPropertyWithName returns the selected element.
func (s *Schema) PatternPropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.PatternProperties, name)
}
// DefinitionWithName returns the selected element.
func (s *Schema) DefinitionWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Definitions, name)
}
// AddProperty adds a named property.
func (s *Schema) AddProperty(name string, property *Schema) {
*s.Properties = append(*s.Properties, NewNamedSchema(name, property))
}

View File

@ -0,0 +1,394 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"log"
"strings"
)
//
// OPERATIONS
// The following methods perform operations on Schemas.
//
// IsEmpty returns true if no members of the Schema are specified.
func (schema *Schema) IsEmpty() bool {
return (schema.Schema == nil) &&
(schema.ID == nil) &&
(schema.MultipleOf == nil) &&
(schema.Maximum == nil) &&
(schema.ExclusiveMaximum == nil) &&
(schema.Minimum == nil) &&
(schema.ExclusiveMinimum == nil) &&
(schema.MaxLength == nil) &&
(schema.MinLength == nil) &&
(schema.Pattern == nil) &&
(schema.AdditionalItems == nil) &&
(schema.Items == nil) &&
(schema.MaxItems == nil) &&
(schema.MinItems == nil) &&
(schema.UniqueItems == nil) &&
(schema.MaxProperties == nil) &&
(schema.MinProperties == nil) &&
(schema.Required == nil) &&
(schema.AdditionalProperties == nil) &&
(schema.Properties == nil) &&
(schema.PatternProperties == nil) &&
(schema.Dependencies == nil) &&
(schema.Enumeration == nil) &&
(schema.Type == nil) &&
(schema.AllOf == nil) &&
(schema.AnyOf == nil) &&
(schema.OneOf == nil) &&
(schema.Not == nil) &&
(schema.Definitions == nil) &&
(schema.Title == nil) &&
(schema.Description == nil) &&
(schema.Default == nil) &&
(schema.Format == nil) &&
(schema.Ref == nil)
}
// IsEqual returns true if two schemas are equal.
func (schema *Schema) IsEqual(schema2 *Schema) bool {
return schema.String() == schema2.String()
}
// SchemaOperation represents a function that can be applied to a Schema.
type SchemaOperation func(schema *Schema, context string)
// Applies a specified function to a Schema and all of the Schemas that it contains.
func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) {
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalItems")
}
}
if schema.Items != nil {
if schema.Items.SchemaArray != nil {
for _, s := range *(schema.Items.SchemaArray) {
s.applyToSchemas(operation, "Items.SchemaArray")
}
} else if schema.Items.Schema != nil {
schema.Items.Schema.applyToSchemas(operation, "Items.Schema")
}
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalProperties")
}
}
if schema.Properties != nil {
for _, pair := range *(schema.Properties) {
s := pair.Value
s.applyToSchemas(operation, "Properties")
}
}
if schema.PatternProperties != nil {
for _, pair := range *(schema.PatternProperties) {
s := pair.Value
s.applyToSchemas(operation, "PatternProperties")
}
}
if schema.Dependencies != nil {
for _, pair := range *(schema.Dependencies) {
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
s.applyToSchemas(operation, "Dependencies")
}
}
}
if schema.AllOf != nil {
for _, s := range *(schema.AllOf) {
s.applyToSchemas(operation, "AllOf")
}
}
if schema.AnyOf != nil {
for _, s := range *(schema.AnyOf) {
s.applyToSchemas(operation, "AnyOf")
}
}
if schema.OneOf != nil {
for _, s := range *(schema.OneOf) {
s.applyToSchemas(operation, "OneOf")
}
}
if schema.Not != nil {
schema.Not.applyToSchemas(operation, "Not")
}
if schema.Definitions != nil {
for _, pair := range *(schema.Definitions) {
s := pair.Value
s.applyToSchemas(operation, "Definitions")
}
}
operation(schema, context)
}
// CopyProperties copies all non-nil properties from the source Schema to the schema Schema.
func (schema *Schema) CopyProperties(source *Schema) {
if source.Schema != nil {
schema.Schema = source.Schema
}
if source.ID != nil {
schema.ID = source.ID
}
if source.MultipleOf != nil {
schema.MultipleOf = source.MultipleOf
}
if source.Maximum != nil {
schema.Maximum = source.Maximum
}
if source.ExclusiveMaximum != nil {
schema.ExclusiveMaximum = source.ExclusiveMaximum
}
if source.Minimum != nil {
schema.Minimum = source.Minimum
}
if source.ExclusiveMinimum != nil {
schema.ExclusiveMinimum = source.ExclusiveMinimum
}
if source.MaxLength != nil {
schema.MaxLength = source.MaxLength
}
if source.MinLength != nil {
schema.MinLength = source.MinLength
}
if source.Pattern != nil {
schema.Pattern = source.Pattern
}
if source.AdditionalItems != nil {
schema.AdditionalItems = source.AdditionalItems
}
if source.Items != nil {
schema.Items = source.Items
}
if source.MaxItems != nil {
schema.MaxItems = source.MaxItems
}
if source.MinItems != nil {
schema.MinItems = source.MinItems
}
if source.UniqueItems != nil {
schema.UniqueItems = source.UniqueItems
}
if source.MaxProperties != nil {
schema.MaxProperties = source.MaxProperties
}
if source.MinProperties != nil {
schema.MinProperties = source.MinProperties
}
if source.Required != nil {
schema.Required = source.Required
}
if source.AdditionalProperties != nil {
schema.AdditionalProperties = source.AdditionalProperties
}
if source.Properties != nil {
schema.Properties = source.Properties
}
if source.PatternProperties != nil {
schema.PatternProperties = source.PatternProperties
}
if source.Dependencies != nil {
schema.Dependencies = source.Dependencies
}
if source.Enumeration != nil {
schema.Enumeration = source.Enumeration
}
if source.Type != nil {
schema.Type = source.Type
}
if source.AllOf != nil {
schema.AllOf = source.AllOf
}
if source.AnyOf != nil {
schema.AnyOf = source.AnyOf
}
if source.OneOf != nil {
schema.OneOf = source.OneOf
}
if source.Not != nil {
schema.Not = source.Not
}
if source.Definitions != nil {
schema.Definitions = source.Definitions
}
if source.Title != nil {
schema.Title = source.Title
}
if source.Description != nil {
schema.Description = source.Description
}
if source.Default != nil {
schema.Default = source.Default
}
if source.Format != nil {
schema.Format = source.Format
}
if source.Ref != nil {
schema.Ref = source.Ref
}
}
// TypeIs returns true if the Type of a Schema includes the specified type
func (schema *Schema) TypeIs(typeName string) bool {
if schema.Type != nil {
// the schema Type is either a string or an array of strings
if schema.Type.String != nil {
return (*(schema.Type.String) == typeName)
} else if schema.Type.StringArray != nil {
for _, n := range *(schema.Type.StringArray) {
if n == typeName {
return true
}
}
}
}
return false
}
// ResolveRefs resolves "$ref" elements in a Schema and its children.
// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf,
// the reference is kept and we expect downstream tools to separately model these
// referenced schemas.
func (schema *Schema) ResolveRefs() {
rootSchema := schema
count := 1
for count > 0 {
count = 0
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.Ref != nil {
resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref))
if err != nil {
log.Printf("%+v", err)
} else if resolvedRef.TypeIs("object") {
// don't substitute for objects, we'll model the referenced schema with a class
} else if context == "OneOf" {
// don't substitute for references inside oneOf declarations
} else if resolvedRef.OneOf != nil {
// don't substitute for references that contain oneOf declarations
} else if resolvedRef.AdditionalProperties != nil {
// don't substitute for references that look like objects
} else {
schema.Ref = nil
schema.CopyProperties(resolvedRef)
count++
}
}
}, "")
}
}
// resolveJSONPointer resolves JSON pointers.
// This current implementation is very crude and custom for OpenAPI 2.0 schemas.
// It panics for any pointer that it is unable to resolve.
func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) {
parts := strings.Split(ref, "#")
if len(parts) == 2 {
documentName := parts[0] + "#"
if documentName == "#" && schema.ID != nil {
documentName = *(schema.ID)
}
path := parts[1]
document := schemas[documentName]
pathParts := strings.Split(path, "/")
// we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases
if len(pathParts) == 1 {
return document, nil
} else if len(pathParts) == 3 {
switch pathParts[1] {
case "definitions":
dictionary := document.Definitions
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
case "properties":
dictionary := document.Properties
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
default:
break
}
}
}
if result == nil {
return nil, fmt.Errorf("unresolved pointer: %+v", ref)
}
return result, nil
}
// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema.
func (schema *Schema) ResolveAllOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AllOf != nil {
for _, allOf := range *(schema.AllOf) {
schema.CopyProperties(allOf)
}
schema.AllOf = nil
}
}, "resolveAllOfs")
}
// ResolveAnyOfs replaces all "anyOf" elements with "oneOf".
func (schema *Schema) ResolveAnyOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AnyOf != nil {
schema.OneOf = schema.AnyOf
schema.AnyOf = nil
}
}, "resolveAnyOfs")
}
// return a pointer to a copy of a passed-in string
func stringptr(input string) (output *string) {
return &input
}
// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperty(name string) {
*schema.Properties = append(*schema.Properties,
NewNamedSchema(name,
&Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)}))
}
// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperties(names []string) {
for _, name := range names {
schema.CopyOfficialSchemaProperty(name)
}
}

View File

@ -0,0 +1,442 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate go run generate-base.go
package jsonschema
import (
"fmt"
"io/ioutil"
"strconv"
"gopkg.in/yaml.v3"
)
// This is a global map of all known Schemas.
// It is initialized when the first Schema is created and inserted.
var schemas map[string]*Schema
// NewBaseSchema builds a schema object from an embedded json representation.
func NewBaseSchema() (schema *Schema, err error) {
b, err := baseSchemaBytes()
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(b, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromFile reads a schema from a file.
// Currently this assumes that schemas are stored in the source distribution of this project.
func NewSchemaFromFile(filename string) (schema *Schema, err error) {
file, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(file, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromObject constructs a schema from a parsed JSON object.
// Due to the complexity of the schema representation, this is a
// custom reader and not the standard Go JSON reader (encoding/json).
func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
switch jsonData.Kind {
case yaml.DocumentNode:
return NewSchemaFromObject(jsonData.Content[0])
case yaml.MappingNode:
schema := &Schema{}
for i := 0; i < len(jsonData.Content); i += 2 {
k := jsonData.Content[i].Value
v := jsonData.Content[i+1]
switch k {
case "$schema":
schema.Schema = schema.stringValue(v)
case "id":
schema.ID = schema.stringValue(v)
case "multipleOf":
schema.MultipleOf = schema.numberValue(v)
case "maximum":
schema.Maximum = schema.numberValue(v)
case "exclusiveMaximum":
schema.ExclusiveMaximum = schema.boolValue(v)
case "minimum":
schema.Minimum = schema.numberValue(v)
case "exclusiveMinimum":
schema.ExclusiveMinimum = schema.boolValue(v)
case "maxLength":
schema.MaxLength = schema.intValue(v)
case "minLength":
schema.MinLength = schema.intValue(v)
case "pattern":
schema.Pattern = schema.stringValue(v)
case "additionalItems":
schema.AdditionalItems = schema.schemaOrBooleanValue(v)
case "items":
schema.Items = schema.schemaOrSchemaArrayValue(v)
case "maxItems":
schema.MaxItems = schema.intValue(v)
case "minItems":
schema.MinItems = schema.intValue(v)
case "uniqueItems":
schema.UniqueItems = schema.boolValue(v)
case "maxProperties":
schema.MaxProperties = schema.intValue(v)
case "minProperties":
schema.MinProperties = schema.intValue(v)
case "required":
schema.Required = schema.arrayOfStringsValue(v)
case "additionalProperties":
schema.AdditionalProperties = schema.schemaOrBooleanValue(v)
case "properties":
schema.Properties = schema.mapOfSchemasValue(v)
case "patternProperties":
schema.PatternProperties = schema.mapOfSchemasValue(v)
case "dependencies":
schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v)
case "enum":
schema.Enumeration = schema.arrayOfEnumValuesValue(v)
case "type":
schema.Type = schema.stringOrStringArrayValue(v)
case "allOf":
schema.AllOf = schema.arrayOfSchemasValue(v)
case "anyOf":
schema.AnyOf = schema.arrayOfSchemasValue(v)
case "oneOf":
schema.OneOf = schema.arrayOfSchemasValue(v)
case "not":
schema.Not = NewSchemaFromObject(v)
case "definitions":
schema.Definitions = schema.mapOfSchemasValue(v)
case "title":
schema.Title = schema.stringValue(v)
case "description":
schema.Description = schema.stringValue(v)
case "default":
schema.Default = v
case "format":
schema.Format = schema.stringValue(v)
case "$ref":
schema.Ref = schema.stringValue(v)
default:
fmt.Printf("UNSUPPORTED (%s)\n", k)
}
}
// insert schema in global map
if schema.ID != nil {
if schemas == nil {
schemas = make(map[string]*Schema, 0)
}
schemas[*(schema.ID)] = schema
}
return schema
default:
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
return nil
}
return nil
}
//
// BUILDERS
// The following methods build elements of Schemas from interface{} values.
// Each returns nil if it is unable to build the desired element.
//
// Gets the string value of an interface{} value if possible.
func (schema *Schema) stringValue(v *yaml.Node) *string {
switch v.Kind {
case yaml.ScalarNode:
return &v.Value
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the numeric value of an interface{} value if possible.
func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber {
number := &SchemaNumber{}
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
number.Float = &v2
return number
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
number.Integer = &v2
return number
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the integer value of an interface{} value if possible.
func (schema *Schema) intValue(v *yaml.Node) *int64 {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
v3 := int64(v2)
return &v3
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
return &v2
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the bool value of an interface{} value if possible.
func (schema *Schema) boolValue(v *yaml.Node) *bool {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!bool":
v2, _ := strconv.ParseBool(v.Value)
return &v2
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a map of Schemas from an interface{} value if possible.
func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema {
switch v.Kind {
case yaml.MappingNode:
m := make([]*NamedSchema, 0)
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)}
m = append(m, pair)
}
return &m
default:
fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of Schemas from an interface{} value if possible.
func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2)
}
}
return &m
case yaml.MappingNode:
m := make([]*Schema, 0)
s := NewSchemaFromObject(v)
m = append(m, s)
return &m
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a Schema or an array of Schemas from an interface{} value if possible.
func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2)
}
}
return &SchemaOrSchemaArray{SchemaArray: &m}
case yaml.MappingNode:
s := NewSchemaFromObject(v)
return &SchemaOrSchemaArray{Schema: s}
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of strings from an interface{} value if possible.
func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string {
switch v.Kind {
case yaml.ScalarNode:
a := []string{v.Value}
return &a
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
return &a
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a string or an array of strings from an interface{} value if possible.
func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray {
switch v.Kind {
case yaml.ScalarNode:
s := &StringOrStringArray{}
s.String = &v.Value
return s
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
s := &StringOrStringArray{}
s.StringArray = &a
return s
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of enum values from an interface{} value if possible.
func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue {
a := make([]SchemaEnumValue, 0)
switch v.Kind {
case yaml.SequenceNode:
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
switch v2.Tag {
case "!!str":
a = append(a, SchemaEnumValue{String: &v2.Value})
case "!!bool":
v3, _ := strconv.ParseBool(v2.Value)
a = append(a, SchemaEnumValue{Bool: &v3})
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag)
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v)
}
return &a
}
// Gets a map of schemas or string arrays from an interface{} value if possible.
func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray {
m := make([]*NamedSchemaOrStringArray, 0)
switch v.Kind {
case yaml.MappingNode:
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
switch v2.Kind {
case yaml.SequenceNode:
a := make([]string, 0)
for _, v3 := range v2.Content {
switch v3.Kind {
case yaml.ScalarNode:
a = append(a, v3.Value)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3)
}
}
s := &SchemaOrStringArray{}
s.StringArray = &a
pair := &NamedSchemaOrStringArray{Name: k2, Value: s}
m = append(m, pair)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v)
}
return &m
}
// Gets a schema or a boolean value from an interface{} value if possible.
func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean {
schemaOrBoolean := &SchemaOrBoolean{}
switch v.Kind {
case yaml.ScalarNode:
v2, _ := strconv.ParseBool(v.Value)
schemaOrBoolean.Boolean = &v2
case yaml.MappingNode:
schemaOrBoolean.Schema = NewSchemaFromObject(v)
default:
fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v)
}
return schemaOrBoolean
}

View File

@ -0,0 +1,150 @@
{
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "#/definitions/positiveInteger" },
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/positiveInteger" },
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}

View File

@ -0,0 +1,369 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"gopkg.in/yaml.v3"
)
const indentation = " "
func renderMappingNode(node *yaml.Node, indent string) (result string) {
result = "{\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i += 2 {
// first print the key
key := node.Content[i].Value
result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key)
// then the value
value := node.Content[i+1]
switch value.Kind {
case yaml.ScalarNode:
result += "\"" + value.Value + "\""
case yaml.MappingNode:
result += renderMappingNode(value, innerIndent)
case yaml.SequenceNode:
result += renderSequenceNode(value, innerIndent)
default:
result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value)
}
if i < len(node.Content)-2 {
result += ","
}
result += "\n"
}
result += indent + "}"
return result
}
func renderSequenceNode(node *yaml.Node, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i++ {
item := node.Content[i]
switch item.Kind {
case yaml.ScalarNode:
result += innerIndent + "\"" + item.Value + "\""
case yaml.MappingNode:
result += innerIndent + renderMappingNode(item, innerIndent) + ""
default:
result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item)
}
if i < len(node.Content)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
func renderStringArray(array []string, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i, item := range array {
result += innerIndent + "\"" + item + "\""
if i < len(array)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
// Render renders a yaml.Node as JSON
func Render(node *yaml.Node) string {
if node.Kind == yaml.DocumentNode {
if len(node.Content) == 1 {
return Render(node.Content[0])
}
} else if node.Kind == yaml.MappingNode {
return renderMappingNode(node, "") + "\n"
} else if node.Kind == yaml.SequenceNode {
return renderSequenceNode(node, "") + "\n"
}
return ""
}
func (object *SchemaNumber) nodeValue() *yaml.Node {
if object.Integer != nil {
return nodeForInt64(*object.Integer)
} else if object.Float != nil {
return nodeForFloat64(*object.Float)
} else {
return nil
}
}
func (object *SchemaOrBoolean) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.Boolean != nil {
return nodeForBoolean(*object.Boolean)
} else {
return nil
}
}
func nodeForStringArray(array []string) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, nodeForString(item))
}
return nodeForSequence(content)
}
func nodeForSchemaArray(array []*Schema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func (object *StringOrStringArray) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrStringArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.SchemaArray != nil {
return nodeForSchemaArray(*(object.SchemaArray))
} else {
return nil
}
}
func (object *SchemaEnumValue) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.Bool != nil {
return nodeForBoolean(*object.Bool)
} else {
return nil
}
}
func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range *array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func nodeForMapping(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: content,
}
}
func nodeForSequence(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Content: content,
}
}
func nodeForString(value string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: value,
}
}
func nodeForBoolean(value bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", value),
}
}
func nodeForInt64(value int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", value),
}
}
func nodeForFloat64(value float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%f", value),
}
}
func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node {
nodes = append(nodes, nodeForString(name))
nodes = append(nodes, value)
return nodes
}
func (schema *Schema) nodeValue() *yaml.Node {
n := &yaml.Node{Kind: yaml.MappingNode}
content := make([]*yaml.Node, 0)
if schema.Title != nil {
content = appendPair(content, "title", nodeForString(*schema.Title))
}
if schema.ID != nil {
content = appendPair(content, "id", nodeForString(*schema.ID))
}
if schema.Schema != nil {
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
}
if schema.Type != nil {
content = appendPair(content, "type", schema.Type.nodeValue())
}
if schema.Items != nil {
content = appendPair(content, "items", schema.Items.nodeValue())
}
if schema.Description != nil {
content = appendPair(content, "description", nodeForString(*schema.Description))
}
if schema.Required != nil {
content = appendPair(content, "required", nodeForStringArray(*schema.Required))
}
if schema.AdditionalProperties != nil {
content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue())
}
if schema.PatternProperties != nil {
content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties))
}
if schema.Properties != nil {
content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties))
}
if schema.Dependencies != nil {
content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies))
}
if schema.Ref != nil {
content = appendPair(content, "$ref", nodeForString(*schema.Ref))
}
if schema.MultipleOf != nil {
content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue())
}
if schema.Maximum != nil {
content = appendPair(content, "maximum", schema.Maximum.nodeValue())
}
if schema.ExclusiveMaximum != nil {
content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
content = appendPair(content, "minimum", schema.Minimum.nodeValue())
}
if schema.ExclusiveMinimum != nil {
content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength))
}
if schema.MinLength != nil {
content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength))
}
if schema.Pattern != nil {
content = appendPair(content, "pattern", nodeForString(*schema.Pattern))
}
if schema.AdditionalItems != nil {
content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue())
}
if schema.MaxItems != nil {
content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems))
}
if schema.MinItems != nil {
content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems))
}
if schema.UniqueItems != nil {
content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems))
}
if schema.MaxProperties != nil {
content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties))
}
if schema.MinProperties != nil {
content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties))
}
if schema.Enumeration != nil {
content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration))
}
if schema.AllOf != nil {
content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf))
}
if schema.AnyOf != nil {
content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf))
}
if schema.OneOf != nil {
content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf))
}
if schema.Not != nil {
content = appendPair(content, "not", schema.Not.nodeValue())
}
if schema.Definitions != nil {
content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions))
}
if schema.Default != nil {
// m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default})
}
if schema.Format != nil {
content = appendPair(content, "format", nodeForString(*schema.Format))
}
n.Content = content
return n
}
// JSONString returns a json representation of a schema.
func (schema *Schema) JSONString() string {
node := schema.nodeValue()
return Render(node)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -41,6 +41,9 @@ option java_package = "org.openapi_v2";
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS";
// The Go package name.
option go_package = "./openapiv2;openapi_v2";
message AdditionalPropertiesItem {
oneof oneof {
Schema schema = 1;
@ -553,7 +556,7 @@ message Response {
repeated NamedAny vendor_extension = 5;
}
// One or more JSON representations for parameters
// One or more JSON representations for responses
message ResponseDefinitions {
repeated NamedResponse additional_properties = 1;
}

View File

@ -1,16 +1,14 @@
# OpenAPI v2 Protocol Buffer Models
This directory contains a Protocol Buffer-language model
and related code for supporting OpenAPI v2.
This directory contains a Protocol Buffer-language model and related code for
supporting OpenAPI v2.
Gnostic applications and plugins can use OpenAPIv2.proto
to generate Protocol Buffer support code for their preferred languages.
Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol
Buffer support code for their preferred languages.
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI
descriptions into the Protocol Buffer-based datastructures
generated from OpenAPIv2.proto.
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
the Protocol Buffer-based datastructures generated from OpenAPIv2.proto.
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic
compiler generator, and OpenAPIv2.pb.go is generated by
protoc, the Protocol Buffer compiler, and protoc-gen-go, the
Protocol Buffer Go code generation plugin.
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler
generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer
compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.

View File

@ -0,0 +1,41 @@
// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openapi_v2
import (
"github.com/googleapis/gnostic/compiler"
"gopkg.in/yaml.v3"
)
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
func ParseDocument(b []byte) (*Document, error) {
info, err := compiler.ReadInfoFromBytes("", b)
if err != nil {
return nil, err
}
root := info.Content[0]
return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
}
// YAMLValue produces a serialized YAML representation of the document.
func (d *Document) YAMLValue(comment string) ([]byte, error) {
rawInfo := d.ToRawInfo()
rawInfo = &yaml.Node{
Kind: yaml.DocumentNode,
Content: []*yaml.Node{rawInfo},
HeadComment: comment,
}
return yaml.Marshal(rawInfo)
}

View File

@ -203,7 +203,7 @@
"additionalProperties": {
"$ref": "#/definitions/response"
},
"description": "One or more JSON representations for parameters"
"description": "One or more JSON representations for responses"
},
"externalDocs": {
"type": "object",
@ -1607,4 +1607,4 @@
}
}
}
}
}

2
vendor/github.com/klauspost/compress/.gitattributes generated vendored Normal file
View File

@ -0,0 +1,2 @@
* -text
*.bin -text -diff

25
vendor/github.com/klauspost/compress/.gitignore generated vendored Normal file
View File

@ -0,0 +1,25 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe

137
vendor/github.com/klauspost/compress/.goreleaser.yml generated vendored Normal file
View File

@ -0,0 +1,137 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
before:
hooks:
- ./gen.sh
builds:
-
id: "s2c"
binary: s2c
main: ./s2/cmd/s2c/main.go
flags:
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
-
id: "s2d"
binary: s2d
main: ./s2/cmd/s2d/main.go
flags:
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
-
id: "s2sx"
binary: s2sx
main: ./s2/cmd/_s2sx/main.go
flags:
- -modfile=s2sx.mod
- -trimpath
env:
- CGO_ENABLED=0
goos:
- aix
- linux
- freebsd
- netbsd
- windows
- darwin
goarch:
- 386
- amd64
- arm
- arm64
- ppc64
- ppc64le
- mips64
- mips64le
goarm:
- 7
archives:
-
id: s2-binaries
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
format_overrides:
- goos: windows
format: zip
files:
- unpack/*
- s2/LICENSE
- s2/README.md
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^doc:'
- '^docs:'
- '^test:'
- '^tests:'
- '^Update\sREADME.md'
nfpms:
-
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com>
description: S2 Compression Tool
license: BSD 3-Clause
formats:
- deb
- rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

View File

@ -26,3 +26,267 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------
Files: gzhttp/*
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------
Files: s2/cmd/internal/readahead/*
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------------------
Files: snappy/*
Files: internal/snapref/*
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

430
vendor/github.com/klauspost/compress/README.md generated vendored Normal file
View File

@ -0,0 +1,430 @@
# compress
This package provides various compression algorithms.
* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go.
* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy.
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams.
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# changelog
* Aug 12, 2021 (v1.13.4)
* Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy).
* zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415)
* Aug 3, 2021 (v1.13.3)
* zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404)
* zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411)
* gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406)
* s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399)
* zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401)
* zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410)
* Jun 14, 2021 (v1.13.1)
* s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396)
* zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394)
* gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389)
* s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395)
* Jun 3, 2021 (v1.13.0)
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
* zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373)
* Apr 27, 2021 (v1.12.2)
* zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365)
* zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363)
* deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367)
* s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358)
* s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362)
* s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368)
* Apr 14, 2021 (v1.12.1)
* snappy package removed. Upstream added as dependency.
* s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353)
* s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352)
* s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348)
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
* zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336)
* deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338)
* s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341)
* Mar 5, 2021 (v1.11.12)
* s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives).
* s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328)
* Mar 1, 2021 (v1.11.9)
* s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324)
* s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325)
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
* zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313)
* Jan 14, 2021 (v1.11.7)
* Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309)
* s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310)
* s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311)
* s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308)
* s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312)
* Jan 7, 2021 (v1.11.6)
* zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306)
* zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305)
* Dec 20, 2020 (v1.11.4)
* zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304)
* Add header decoder [#299](https://github.com/klauspost/compress/pull/299)
* s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297)
* Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300)
* zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303)
* Nov 15, 2020 (v1.11.3)
* inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293)
* zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295)
* Oct 11, 2020 (v1.11.2)
* s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291)
* Oct 1, 2020 (v1.11.1)
* zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286)
* Sept 8, 2020 (v1.11.0)
* zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281)
* zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282)
* inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274)
<details>
<summary>See changes prior to v1.11.0</summary>
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
* huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275)
* June 23, 2020 (v1.10.10)
* zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270)
* June 16, 2020 (v1.10.9):
* zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268)
* zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266)
* Fuzzit tests removed. The service has been purchased and is no longer available.
* June 5, 2020 (v1.10.8):
* 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265)
* June 1, 2020 (v1.10.7):
* Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries)
* Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259)
* Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263)
* May 21, 2020: (v1.10.6)
* zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252)
* zstd: Stricter decompression checks.
* April 12, 2020: (v1.10.5)
* s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239)
* Apr 8, 2020: (v1.10.4)
* zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247)
* Mar 11, 2020: (v1.10.3)
* s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245)
* s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244)
* zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240)
* zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241)
* zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238)
* Feb 27, 2020: (v1.10.2)
* Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232)
* Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227)
* Feb 18, 2020: (v1.10.1)
* Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226)
* deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224)
* Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224)
* Feb 4, 2020: (v1.10.0)
* Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216)
* Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218)
* Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214)
* Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186)
</details>
<details>
<summary>See changes prior to v1.10.0</summary>
* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206).
* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204)
* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed.
* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases.
* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192)
* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder.
* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199)
* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features
* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197)
* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198)
* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit.
* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191)
* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188)
* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187)
* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines.
* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate.
* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184)
* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate.
* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180)
* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB.
* Nov 11, 2019: Reduce inflate memory use by 1KB.
* Nov 10, 2019: Less allocations in deflate bit writer.
* Nov 10, 2019: Fix inconsistent error returned by zstd decoder.
* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174)
* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173)
* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172)
* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105)
</details>
<details>
<summary>See changes prior to v1.9.0</summary>
* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169)
* Oct 3, 2019: Fix inconsistent results on broken zstd streams.
* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools)
* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools).
* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip).
* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes).
* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option.
* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables.
* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode.
* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding.
* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy.
* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing.
* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing.
* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147)
* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146)
* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144)
* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142)
* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder.
* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder.
* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content.
* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix.
* June 17, 2019: zstd decompression bugfix.
* June 17, 2019: fix 32 bit builds.
* June 17, 2019: Easier use in modules (less dependencies).
* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio.
* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression.
* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels.
* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression!
* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels.
* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added.
* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression).
* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below.
* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0).
* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change.
* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change.
* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function.
* May 28, 2017: Reduce allocations when resetting decoder.
* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7.
* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625).
* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before.
* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update.
* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level.
* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression.
* Mar 24, 2016: Small speedup for level 1-3.
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression.
* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%.
* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content.
* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup.
* Jan 16, 2016: Optimization on deflate level 1,2,3 compression.
* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives.
* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs.
* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms.
* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update!
* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet).
* Nov 20 2015: Small optimization to bit writer on 64 bit systems.
* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15).
* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate.
* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file
* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x.
</details>
# deflate usage
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
|--------------------|-----------------------------------------|--------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages.
The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/).
Currently there is only minor speedup on decompression (mostly CRC32 calculation).
Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
It will do compression but without maintaining any state between Write calls.
This means there will be no memory kept between Write calls, but compression and speed will be suboptimal.
This is only relevant in cases where you expect to run many thousands of compressors concurrently,
but with very little activity. This is *not* intended for regular web servers serving individual requests.
Because of this, the size of actual Write calls will affect output size.
In gzip, specify level `-3` / `gzip.StatelessCompression` to enable.
For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter)
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
```
// replace 'ioutil.Discard' with your output.
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
if err != nil {
return err
}
defer gzw.Close()
w := bufio.NewWriterSize(gzw, 4096)
defer w.Flush()
// Write to 'w'
```
This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
## Huffman only compression
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# license
This code is licensed under the same conditions as the original Go code. See LICENSE file.

85
vendor/github.com/klauspost/compress/compressible.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package compress
import "math"
// Estimate returns a normalized compressibility estimate of block b.
// Values close to zero are likely uncompressible.
// Values above 0.1 are likely to be compressible.
// Values above 0.5 are very compressible.
// Very small lengths will return 0.
func Estimate(b []byte) float64 {
if len(b) < 16 {
return 0
}
// Correctly predicted order 1
hits := 0
lastMatch := false
var o1 [256]byte
var hist [256]int
c1 := byte(0)
for _, c := range b {
if c == o1[c1] {
// We only count a hit if there was two correct predictions in a row.
if lastMatch {
hits++
}
lastMatch = true
} else {
lastMatch = false
}
o1[c1] = c
c1 = c
hist[c]++
}
// Use x^0.6 to give better spread
prediction := math.Pow(float64(hits)/float64(len(b)), 0.6)
// Calculate histogram distribution
variance := float64(0)
avg := float64(len(b)) / 256
for _, v := range hist {
Δ := float64(v) - avg
variance += Δ * Δ
}
stddev := math.Sqrt(float64(variance)) / float64(len(b))
exp := math.Sqrt(1 / float64(len(b)))
// Subtract expected stddev
stddev -= exp
if stddev < 0 {
stddev = 0
}
stddev *= 1 + exp
// Use x^0.4 to give better spread
entropy := math.Pow(stddev, 0.4)
// 50/50 weight between prediction and histogram distribution
return math.Pow((prediction+entropy)/2, 0.9)
}
// ShannonEntropyBits returns the number of bits minimum required to represent
// an entropy encoding of the input bytes.
// https://en.wiktionary.org/wiki/Shannon_entropy
func ShannonEntropyBits(b []byte) int {
if len(b) == 0 {
return 0
}
var hist [256]int
for _, c := range b {
hist[c]++
}
shannon := float64(0)
invTotal := 1.0 / float64(len(b))
for _, v := range hist[:] {
if v > 0 {
n := float64(v)
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
}
}
return int(math.Ceil(shannon))
}

4
vendor/github.com/klauspost/compress/gen.sh generated vendored Normal file
View File

@ -0,0 +1,4 @@
#!/bin/sh
cd s2/cmd/_s2sx/ || exit 1
go generate .

3
vendor/github.com/klauspost/compress/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/klauspost/compress
go 1.15

0
vendor/github.com/klauspost/compress/go.sum generated vendored Normal file
View File

View File

@ -161,6 +161,70 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
return s.Out, false, nil
}
// EstimateSizes will estimate the data sizes
func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
s, err = s.prepare(in)
if err != nil {
return 0, 0, 0, err
}
// Create histogram, if none was provided.
tableSz, dataSz, reuseSz = -1, -1, -1
maxCount := s.maxCount
var canReuse = false
if maxCount == 0 {
maxCount, canReuse = s.countSimple(in)
} else {
canReuse = s.canUseTable(s.prevTable)
}
// We want the output size to be less than this:
wantSize := len(in)
if s.WantLogLess > 0 {
wantSize -= wantSize >> s.WantLogLess
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount >= len(in) {
if maxCount > len(in) {
return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
}
if len(in) == 1 {
return 0, 0, 0, ErrIncompressible
}
// One symbol, use RLE
return 0, 0, 0, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return 0, 0, 0, ErrIncompressible
}
// Calculate new table.
err = s.buildCTable()
if err != nil {
return 0, 0, 0, err
}
if false && !s.canUseTable(s.cTable) {
panic("invalid table generated")
}
tableSz, err = s.cTable.estTableSize(s)
if err != nil {
return 0, 0, 0, err
}
if canReuse {
reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
}
dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
// Restore
return tableSz, dataSz, reuseSz, nil
}
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src)
}

View File

@ -245,6 +245,68 @@ func (c cTable) write(s *Scratch) error {
return nil
}
func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
var (
// precomputed conversion table
bitsToWeight [tableLogMax + 1]byte
huffLog = s.actualTableLog
// last weight is not saved.
maxSymbolValue = uint8(s.symbolLen - 1)
huffWeight = s.huffWeight[:256]
)
const (
maxFSETableLog = 6
)
// convert to weight
bitsToWeight[0] = 0
for n := uint8(1); n < huffLog+1; n++ {
bitsToWeight[n] = huffLog + 1 - n
}
// Acquire histogram for FSE.
hist := s.fse.Histogram()
hist = hist[:256]
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
}
// FSE compress if feasible.
if maxSymbolValue >= 2 {
huffMaxCnt := uint32(0)
huffMax := uint8(0)
for i, v := range hist[:16] {
if v == 0 {
continue
}
huffMax = byte(i)
if v > huffMaxCnt {
huffMaxCnt = v
}
}
s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
s.fse.TableLog = maxFSETableLog
b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
if err == nil && len(b) < int(s.symbolLen>>1) {
sz += 1 + len(b)
return sz, nil
}
// Unable to compress (RLE/uncompressible)
}
// write raw values as 4-bits (max : 15)
if maxSymbolValue > (256 - 128) {
// should not happen : likely means source cannot be compressed
return 0, ErrIncompressible
}
// special case, pack weights 4 bits/weight.
sz += 1 + int(maxSymbolValue/2)
return sz, nil
}
// estimateSize returns the estimated size in bytes of the input represented in the
// histogram supplied.
func (c cTable) estimateSize(hist []uint32) int {

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
package snapref
import (
"encoding/binary"

View File

@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!arm64 appengine !gc noasm
package snappy
package snapref
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
package snapref
import (
"encoding/binary"

View File

@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!arm64 appengine !gc noasm
package snappy
package snapref
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the Snappy compression format. It aims for very
// Package snapref implements the Snappy compression format. It aims for very
// high speeds and reasonable compression.
//
// There are actually two Snappy formats: block and stream. They are related,
@ -17,7 +17,7 @@
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy"
package snapref
import (
"hash/crc32"

4
vendor/github.com/klauspost/compress/s2sx.mod generated vendored Normal file
View File

@ -0,0 +1,4 @@
module github.com/klauspost/compress
go 1.16

0
vendor/github.com/klauspost/compress/s2sx.sum generated vendored Normal file
View File

View File

@ -152,7 +152,7 @@ file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87
silesia.tar zskp 2 211947520 67504318 969 208.38
silesia.tar zskp 3 211947520 64595893 2007 100.68
silesia.tar zskp 4 211947520 60995370 7691 26.28
silesia.tar zskp 4 211947520 60995370 8825 22.90
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@ -162,7 +162,7 @@ silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21
silesia.tar gzkp 1 211947520 80369488 1168 173.06
silesia.tar gzkp 1 211947520 80136201 1152 175.45
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
@ -171,13 +171,15 @@ file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30
gob-stream zskp 2 1911399616 205669791 3786 481.34
gob-stream zskp 3 1911399616 175034659 9636 189.17
gob-stream zskp 4 1911399616 167273881 29337 62.13
gob-stream zskp 4 1911399616 165609838 50369 36.19
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
gob-stream gzstd 1 1911399616 357382641 10251 177.82
gob-stream gzkp 1 1911399616 362156523 5695 320.08
gob-stream gzkp 1 1911399616 359753026 5438 335.20
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
@ -187,11 +189,13 @@ file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18
enwik9 zskp 2 1000000000 317276632 5746 165.97
enwik9 zskp 3 1000000000 292243069 12162 78.41
enwik9 zskp 4 1000000000 275241169 36430 26.18
enwik9 zskp 4 1000000000 262183768 82837 11.51
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
enwik9 gzstd 1 1000000000 382578136 9604 99.30
enwik9 gzkp 1 1000000000 383825945 6544 145.73
@ -202,13 +206,15 @@ file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
github-june-2days-2019.json zskp 4 6273951764 503314661 93811 63.78
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
@ -217,13 +223,15 @@ file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
rawstudio-mint14.tar zskp 4 8558382592 3020370044 404956 20.16
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
@ -232,13 +240,15 @@ file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
nyc-taxi-data-10M.csv zskp 4 3325605752 490907191 65939 48.10
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
```
## Decompressor

View File

@ -144,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
cSize = 1
case blockTypeCompressed:
if debug {
if debugDecoder {
println("Data size on stream:", cSize)
}
b.RLESize = 0
@ -153,7 +153,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
maxSize = int(windowSize)
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debug {
if debugDecoder {
printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrCompressedSizeTooBig
@ -168,10 +168,10 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem {
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize)
} else {
b.dataStorage = make([]byte, 0, maxBlockSize)
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
}
}
if cap(b.dst) <= maxSize {
@ -179,7 +179,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debug {
if debugDecoder {
println("Reading block:", err, "(", cSize, ")", len(b.data))
printf("%T", br)
}
@ -249,7 +249,7 @@ func (b *blockDec) startDecoder() {
b: b.dst,
err: err,
}
if debug {
if debugDecoder {
println("Decompressed to", len(b.dst), "bytes, error:", err)
}
b.result <- o
@ -264,7 +264,7 @@ func (b *blockDec) startDecoder() {
default:
panic("Invalid block type")
}
if debug {
if debugDecoder {
println("blockDec: Finished block")
}
}
@ -297,7 +297,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
b.dst = hist.b
hist.b = nil
err := b.decodeCompressed(hist)
if debug {
if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
hist.b = b.dst
@ -390,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
in = in[5:]
}
}
if debug {
if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
var literals []byte
@ -428,7 +428,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
literals[i] = v
}
in = in[1:]
if debug {
if debugDecoder {
printf("Found %d RLE compressed literals\n", litRegenSize)
}
case literalsBlockTreeless:
@ -439,7 +439,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize]
in = in[litCompSize:]
if debug {
if debugDecoder {
printf("Found %d compressed literals\n", litCompSize)
}
case literalsBlockCompressed:
@ -481,7 +481,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if len(literals) != litRegenSize {
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
if debug {
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
}
@ -532,12 +532,12 @@ func (b *blockDec) decodeCompressed(hist *history) error {
br := byteReader{b: in, off: 0}
compMode := br.Uint8()
br.advance(1)
if debug {
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debug {
if debugDecoder {
println("Table", tableIndex(i), "is", mode)
}
var seq *sequenceDec
@ -568,7 +568,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
dec.setRLE(symb)
seq.fse = dec
if debug {
if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v)
}
case compModeFSE:
@ -584,7 +584,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
println("Transform table error:", err)
return err
}
if debug {
if debugDecoder {
println("Read table ok", "symbolLen:", dec.symbolLen)
}
seq.fse = dec
@ -652,7 +652,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if huff != nil {
hist.huffTree = huff
}
if debug {
if debugDecoder {
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
}
@ -669,7 +669,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if err != nil {
return err
}
if debug {
if debugDecoder {
println("History merged ok")
}
br := &bitReader{}
@ -728,7 +728,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
hist.append(b.dst)
hist.recentOffsets = seqs.prevOffset
if debug {
if debugDecoder {
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
}

View File

@ -156,7 +156,7 @@ func (h *literalsHeader) setSize(regenLen int) {
switch {
case inBits < 5:
lh |= (uint64(regenLen) << 3) | (1 << 60)
if debug {
if debugEncoder {
got := int(lh>>3) & 0xff
if got != regenLen {
panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
@ -184,7 +184,7 @@ func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
lh |= 1 << 2
}
lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
if debug {
if debugEncoder {
const mmask = (1 << 24) - 1
n := (lh >> 4) & mmask
if int(n&1023) != inLen {
@ -312,7 +312,7 @@ func (b *blockEnc) encodeRaw(a []byte) {
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...)
if debug {
if debugEncoder {
println("Adding RAW block, length", len(a), "last:", b.last)
}
}
@ -325,7 +325,7 @@ func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
bh.setType(blockTypeRaw)
dst = bh.appendTo(dst)
dst = append(dst, src...)
if debug {
if debugEncoder {
println("Adding RAW block, length", len(src), "last:", b.last)
}
return dst
@ -339,7 +339,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
// Don't compress extremely small blocks
if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debug {
if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
@ -371,7 +371,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
switch err {
case huff0.ErrIncompressible:
if debug {
if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
@ -379,7 +379,7 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
b.output = append(b.output, lits...)
return nil
case huff0.ErrUseRLE:
if debug {
if debugEncoder {
println("Adding RLE block, length", len(lits))
}
bh.setType(blockTypeRLE)
@ -396,12 +396,12 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
bh.setType(blockTypeCompressed)
var lh literalsHeader
if reUsed {
if debug {
if debugEncoder {
println("Reused tree, compressed to", len(out))
}
lh.setType(literalsBlockTreeless)
} else {
if debug {
if debugEncoder {
println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
}
lh.setType(literalsBlockCompressed)
@ -517,7 +517,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals...)
if debug {
if debugEncoder {
println("Adding literals RAW, length", len(b.literals))
}
case huff0.ErrUseRLE:
@ -525,22 +525,22 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals[0])
if debug {
if debugEncoder {
println("Adding literals RLE")
}
case nil:
// Compressed litLen...
if reUsed {
if debug {
if debugEncoder {
println("reused tree")
}
lh.setType(literalsBlockTreeless)
} else {
if debug {
if debugEncoder {
println("new tree, size:", len(b.litEnc.OutTable))
}
lh.setType(literalsBlockCompressed)
if debug {
if debugEncoder {
_, _, err := huff0.ReadTable(out, nil)
if err != nil {
panic(err)
@ -548,18 +548,18 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
}
}
lh.setSizes(len(out), len(b.literals), single)
if debug {
if debugEncoder {
printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
println("Adding literal header:", lh)
}
b.output = lh.appendTo(b.output)
b.output = append(b.output, out...)
b.litEnc.Reuse = huff0.ReusePolicyAllow
if debug {
if debugEncoder {
println("Adding literals compressed")
}
default:
if debug {
if debugEncoder {
println("Adding literals ERROR:", err)
}
return err
@ -577,7 +577,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
n := len(b.sequences) - 0x7f00
b.output = append(b.output, 255, uint8(n), uint8(n>>8))
}
if debug {
if debugEncoder {
println("Encoding", len(b.sequences), "sequences")
}
b.genCodes()
@ -611,17 +611,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
nSize = nSize + (nSize+2*8*16)>>4
switch {
case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
if debug {
if debugEncoder {
println("Using predefined", predefSize>>3, "<=", nSize>>3)
}
return preDef, compModePredefined
case prevSize <= nSize:
if debug {
if debugEncoder {
println("Using previous", prevSize>>3, "<=", nSize>>3)
}
return prev, compModeRepeat
default:
if debug {
if debugEncoder {
println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
}
@ -634,7 +634,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if llEnc.useRLE {
mode |= uint8(compModeRLE) << 6
llEnc.setRLE(b.sequences[0].llCode)
if debug {
if debugEncoder {
println("llEnc.useRLE")
}
} else {
@ -645,7 +645,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if ofEnc.useRLE {
mode |= uint8(compModeRLE) << 4
ofEnc.setRLE(b.sequences[0].ofCode)
if debug {
if debugEncoder {
println("ofEnc.useRLE")
}
} else {
@ -657,7 +657,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if mlEnc.useRLE {
mode |= uint8(compModeRLE) << 2
mlEnc.setRLE(b.sequences[0].mlCode)
if debug {
if debugEncoder {
println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
}
} else {
@ -666,7 +666,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
mode |= uint8(m) << 2
}
b.output = append(b.output, mode)
if debug {
if debugEncoder {
printf("Compression modes: 0b%b", mode)
}
b.output, err = llEnc.writeCount(b.output)
@ -786,7 +786,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
// Size is output minus block header.
bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debug {
if debugEncoder {
println("Rewriting block header", bh)
}
_ = bh.appendTo(b.output[bhOffset:bhOffset])

View File

@ -91,7 +91,7 @@ func (r *readerWrapper) readSmall(n int) ([]byte, error) {
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
}
if debug {
if debugDecoder {
println("readSmall: got", n2, "want", n, "err", err)
}
return nil, err

View File

@ -113,9 +113,6 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
// Returns the number of bytes written and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
if d.stream == nil {
return 0, ErrDecoderNilInput
}
var n int
for {
if len(d.current.b) > 0 {
@ -138,7 +135,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
}
}
if len(d.current.b) > 0 {
if debug {
if debugDecoder {
println("returning", n, "still bytes left:", len(d.current.b))
}
// Only return error at end of block
@ -147,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
if d.current.err != nil {
d.drainOutput()
}
if debug {
if debugDecoder {
println("returning", n, d.current.err, len(d.decoders))
}
return n, d.current.err
@ -167,20 +164,17 @@ func (d *Decoder) Reset(r io.Reader) error {
if r == nil {
d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
d.current.b = d.current.b[:0]
}
d.current.flushed = true
return nil
}
if d.stream == nil {
d.stream = make(chan decodeStream, 1)
d.streamWg.Add(1)
go d.startStreamDecoder(d.stream)
}
// If bytes buffer and < 1MB, do sync decoding anyway.
if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
// If bytes buffer and < 5MB, do sync decoding anyway.
if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
bb2 := bb
if debug {
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
@ -196,12 +190,18 @@ func (d *Decoder) Reset(r io.Reader) error {
d.current.b = dst
d.current.err = err
d.current.flushed = true
if debug {
if debugDecoder {
println("sync decode to", len(dst), "bytes, err:", err)
}
return nil
}
if d.stream == nil {
d.stream = make(chan decodeStream, 1)
d.streamWg.Add(1)
go d.startStreamDecoder(d.stream)
}
// Remove current block.
d.current.decodeOutput = decodeOutput{}
d.current.err = nil
@ -225,7 +225,7 @@ func (d *Decoder) drainOutput() {
d.current.cancel = nil
}
if d.current.d != nil {
if debug {
if debugDecoder {
printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
}
d.decoders <- d.current.d
@ -238,7 +238,7 @@ func (d *Decoder) drainOutput() {
}
for v := range d.current.output {
if v.d != nil {
if debug {
if debugDecoder {
printf("re-adding decoder %p", v.d)
}
d.decoders <- v.d
@ -255,17 +255,15 @@ func (d *Decoder) drainOutput() {
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.stream == nil {
return 0, ErrDecoderNilInput
}
var n int64
for {
if len(d.current.b) > 0 {
n2, err2 := w.Write(d.current.b)
n += int64(n2)
if err2 != nil && d.current.err == nil {
if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
d.current.err = err2
break
} else if n2 != len(d.current.b) {
d.current.err = io.ErrShortWrite
}
}
if d.current.err != nil {
@ -297,7 +295,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
block := <-d.decoders
frame := block.localFrame
defer func() {
if debug {
if debugDecoder {
printf("re-adding decoder: %p", block)
}
frame.rawInput = nil
@ -310,7 +308,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.reset()
err := frame.reset(&frame.bBuf)
if err == io.EOF {
if debug {
if debugDecoder {
println("frame reset return EOF")
}
return dst, nil
@ -355,7 +353,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
return dst, err
}
if len(frame.bBuf) == 0 {
if debug {
if debugDecoder {
println("frame dbuf empty")
}
break
@ -371,7 +369,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.d != nil {
if debug {
if debugDecoder {
printf("re-adding current decoder %p", d.current.d)
}
d.decoders <- d.current.d
@ -391,7 +389,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
return false
}
}
if debug {
if debugDecoder {
println("got", len(d.current.b), "bytes, error:", d.current.err)
}
return true
@ -485,7 +483,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
defer d.streamWg.Done()
frame := newFrameDec(d.o)
for stream := range inStream {
if debug {
if debugDecoder {
println("got new stream")
}
br := readerWrapper{r: stream.r}
@ -493,7 +491,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
for {
frame.history.reset()
err := frame.reset(&br)
if debug && err != nil {
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
if err == nil && frame.DictionaryID != nil {
@ -510,7 +508,7 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
}
break
}
if debug {
if debugDecoder {
println("starting frame decoder")
}

View File

@ -17,14 +17,16 @@ type decoderOptions struct {
lowMem bool
concurrent int
maxDecodedSize uint64
maxWindowSize uint64
dicts []dict
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
lowMem: true,
concurrent: runtime.GOMAXPROCS(0),
lowMem: true,
concurrent: runtime.GOMAXPROCS(0),
maxWindowSize: MaxWindowSize,
}
o.maxDecodedSize = 1 << 63
}
@ -52,7 +54,6 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
// For streaming operations, the maximum window size is capped at 1<<30 bytes.
// Maximum and default is 1 << 63 bytes.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
@ -81,3 +82,21 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
return nil
}
}
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
// If WithDecoderMaxMemory is set to a lower value, that will be used.
// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
func WithDecoderMaxWindow(size uint64) DOption {
return func(o *decoderOptions) error {
if size < MinWindowSize {
return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes")
}
if size > (1<<41)+7*(1<<38) {
return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB")
}
o.maxWindowSize = size
return nil
}
}

View File

@ -82,7 +82,7 @@ func loadDict(b []byte) (*dict, error) {
println("Transform table error:", err)
return err
}
if debug {
if debugDecoder || debugEncoder {
println("Read table ok", "symbolLen:", dec.symbolLen)
}
// Set decoders as predefined so they aren't reused.

View File

@ -38,8 +38,8 @@ func (e *fastBase) AppendCRC(dst []byte) []byte {
// WindowSize returns the window size of the encoder,
// or a window size small enough to contain the input size, if > 0.
func (e *fastBase) WindowSize(size int) int32 {
if size > 0 && size < int(e.maxMatchOff) {
func (e *fastBase) WindowSize(size int64) int32 {
if size > 0 && size < int64(e.maxMatchOff) {
b := int32(1) << uint(bits.Len(uint(size)))
// Keep minimum window.
if b < 1024 {

View File

@ -5,22 +5,61 @@
package zstd
import (
"bytes"
"fmt"
"math/bits"
"github.com/klauspost/compress"
)
const (
bestLongTableBits = 20 // Bits used in the long match table
bestLongTableBits = 22 // Bits used in the long match table
bestLongTableSize = 1 << bestLongTableBits // Size of the table
bestLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
// long match table and match offsets are quite big.
// This greatly depends on the type of input.
bestShortTableBits = 16 // Bits used in the short match table
bestShortTableBits = 18 // Bits used in the short match table
bestShortTableSize = 1 << bestShortTableBits // Size of the table
bestShortLen = 4 // Bytes used for table hash
)
type match struct {
offset int32
s int32
length int32
rep int32
est int32
}
const highScore = 25000
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
mlc := mlCode(uint32(m.length - zstdMinMatch))
var ofc uint8
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
ofc = ofCode(uint32(m.rep))
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
// Add cost of match encoding...
m.est = int32(ofTT.outBits + mlTT.outBits)
m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
// Subtract savings compared to literal encoding...
m.est -= (m.length * bitsPerByte) >> 10
if m.est > 0 {
// Unlikely gain..
m.length = 0
m.est = highScore
}
}
// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2.
@ -109,6 +148,14 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
return
}
// Use this to estimate literal cost.
// Scaled by 10 bits.
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
// Huffman can never go < 1 bit/byte
if bitsPerByte < 1024 {
bitsPerByte = 1024
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
@ -132,7 +179,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
}
_ = addLiterals
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -145,51 +192,49 @@ encodeLoop:
panic("offset0 was 0")
}
type match struct {
offset int32
s int32
length int32
rep int32
}
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{offset: offset, s: s}
}
return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
}
bestOf := func(a, b match) match {
aScore := b.s - a.s + a.length
bScore := a.s - b.s + b.length
if a.rep < 0 {
aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8
}
if b.rep < 0 {
bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8
}
if aScore >= bScore {
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
return a
}
return b
}
const goodEnough = 100
nextHashL := hash8(cv, bestLongTableBits)
nextHashS := hash4x64(cv, bestShortTableBits)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{s: s, est: highScore}
}
if debugAsserts {
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
m.estBits(bitsPerByte)
return m
}
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
if canRepeat && best.length < goodEnough {
best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1))
best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2))
best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3))
cv32 := uint32(cv >> 8)
spp := s + 1
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
if best.length > 0 {
best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1))
best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2))
best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3))
cv32 = uint32(cv >> 24)
spp += 2
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
}
}
// Load next and check...
@ -209,22 +254,28 @@ encodeLoop:
}
s++
candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)]
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s)
cv2 := load6432(src, s+1)
candidateL = e.longTable[hash8(cv, bestLongTableBits)]
candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)]
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
// Long at s+1, s+2
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
if false {
// Short at s+3.
// Too often worse...
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
}
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hash8(load6432(src, sAt), bestLongTableBits)
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
@ -236,6 +287,12 @@ encodeLoop:
}
}
if debugAsserts {
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value
if best.rep > 0 {
s = best.s
@ -274,7 +331,7 @@ encodeLoop:
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, best.length)
}
@ -284,8 +341,8 @@ encodeLoop:
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
h0 := hash8(cv0, bestLongTableBits)
h1 := hash4x64(cv0, bestShortTableBits)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
off++
@ -311,7 +368,7 @@ encodeLoop:
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
if debugAsserts && int(offset1) > len(src) {
panic("invalid offset")
}
@ -352,8 +409,8 @@ encodeLoop:
// every entry
for index0 < s-1 {
cv0 := load6432(src, index0)
h0 := hash8(cv0, bestLongTableBits)
h1 := hash4x64(cv0, bestShortTableBits)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
@ -374,8 +431,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash4x64(cv, bestShortTableBits)
nextHashL := hash8(cv, bestLongTableBits)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -412,7 +469,7 @@ encodeLoop:
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
blk.recentOffsets[2] = uint32(offset3)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -425,7 +482,7 @@ func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.Encode(blk, src)
}
// ResetDict will reset and set a dictionary if not nil
// Reset will reset and set a dictionary if not nil
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d == nil {
@ -441,10 +498,10 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
const hashLog = bestShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash4x64(cv, hashLog) // 0 -> 4
nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5
nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6
nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7
nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
e.dictTable[nextHash] = prevEntry{
prev: e.dictTable[nextHash].offset,
offset: i,
@ -472,7 +529,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
h := hash8(cv, bestLongTableBits)
h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
@ -482,7 +539,7 @@ func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hash8(cv, bestLongTableBits)
h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,

View File

@ -9,6 +9,7 @@ import "fmt"
const (
betterLongTableBits = 19 // Bits used in the long match table
betterLongTableSize = 1 << betterLongTableBits // Size of the table
betterLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
@ -16,6 +17,7 @@ const (
// This greatly depends on the type of input.
betterShortTableBits = 13 // Bits used in the short match table
betterShortTableSize = 1 << betterShortTableBits // Size of the table
betterShortLen = 5 // Bytes used for table hash
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
@ -138,7 +140,7 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -154,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hash5(cv, betterShortTableBits)
nextHashL := hash8(cv, betterLongTableBits)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -204,7 +206,7 @@ encodeLoop:
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -214,10 +216,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
@ -264,7 +266,7 @@ encodeLoop:
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -275,10 +277,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
@ -353,7 +355,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, betterLongTableBits)
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
@ -413,8 +415,8 @@ encodeLoop:
}
// Try to find a better match by searching for a long match at the end of the current best match
if true && s+matched < sLimit {
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
if s+matched < sLimit {
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched
@ -495,10 +497,10 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
@ -516,8 +518,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash5(cv, betterShortTableBits)
nextHashL := hash8(cv, betterLongTableBits)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -553,7 +555,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -656,7 +658,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -672,8 +674,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hash5(cv, betterShortTableBits)
nextHashL := hash8(cv, betterLongTableBits)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -724,7 +726,7 @@ encodeLoop:
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -734,11 +736,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@ -787,7 +789,7 @@ encodeLoop:
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -798,11 +800,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@ -879,7 +881,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, betterLongTableBits)
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
@ -940,7 +942,7 @@ encodeLoop:
}
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
nextHashL := hash8(load6432(src, s+matched), betterLongTableBits)
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched
@ -1021,11 +1023,11 @@ encodeLoop:
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hash8(cv0, betterLongTableBits)
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hash5(cv1, betterShortTableBits)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
@ -1045,8 +1047,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash5(cv, betterShortTableBits)
nextHashL := hash8(cv, betterLongTableBits)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -1084,7 +1086,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -1113,10 +1115,10 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = betterShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash5(cv, hashLog) // 0 -> 4
nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5
nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6
nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7
nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
@ -1145,7 +1147,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
h := hash8(cv, betterLongTableBits)
h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
@ -1155,7 +1157,7 @@ func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hash8(cv, betterLongTableBits)
h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,

View File

@ -10,6 +10,7 @@ const (
dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
@ -17,6 +18,8 @@ const (
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastShortLen = 5 // Bytes used for table hash
)
type doubleFastEncoder struct {
@ -109,7 +112,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -124,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hash5(cv, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -170,7 +173,7 @@ encodeLoop:
s += lenght + repOff
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -208,7 +211,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -304,16 +307,16 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hash8(cv0, dFastLongTableBits)] = te0
e.longTable[hash8(cv1, dFastLongTableBits)] = te1
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
e.table[hash5(cv0, dFastShortTableBits)] = te0
e.table[hash5(cv1, dFastShortTableBits)] = te1
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@ -330,8 +333,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash5(cv, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -368,7 +371,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -427,7 +430,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -436,8 +439,8 @@ encodeLoop:
var t int32
for {
nextHashS := hash5(cv, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -483,7 +486,7 @@ encodeLoop:
s += length + repOff
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, length)
}
@ -521,7 +524,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -614,16 +617,16 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hash8(cv0, dFastLongTableBits)] = te0
e.longTable[hash8(cv1, dFastLongTableBits)] = te1
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
e.table[hash5(cv0, dFastShortTableBits)] = te0
e.table[hash5(cv1, dFastShortTableBits)] = te1
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
@ -640,8 +643,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash5(cv1>>8, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -677,7 +680,7 @@ encodeLoop:
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
@ -767,7 +770,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -782,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHashS := hash5(cv, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@ -830,7 +833,7 @@ encodeLoop:
s += lenght + repOff
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, lenght)
}
@ -868,7 +871,7 @@ encodeLoop:
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hash8(cv, dFastLongTableBits)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
@ -965,8 +968,8 @@ encodeLoop:
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hash8(cv0, dFastLongTableBits)
longHash2 := hash8(cv0, dFastLongTableBits)
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@ -977,8 +980,8 @@ encodeLoop:
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
hashVal1 := hash5(cv0, dFastShortTableBits)
hashVal2 := hash5(cv1, dFastShortTableBits)
hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
e.table[hashVal1] = te0
e.markShardDirty(hashVal1)
e.table[hashVal2] = te1
@ -999,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
nextHashS := hash5(cv, dFastShortTableBits)
nextHashL := hash8(cv, dFastLongTableBits)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@ -1039,7 +1042,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// If we encoded more than 64K mark all dirty.
@ -1071,14 +1074,14 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: e.maxMatchOff,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: i,
}

View File

@ -11,12 +11,13 @@ import (
)
const (
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
maxMatchLength = 131074
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
tableFastHashLen = 6
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
maxMatchLength = 131074
)
type tableEntry struct {
@ -103,7 +104,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -122,8 +123,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHash := hash6(cv, hashLog)
nextHash2 := hash6(cv>>8, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@ -178,7 +179,7 @@ encodeLoop:
s += length + 2
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, length)
}
@ -301,7 +302,7 @@ encodeLoop:
}
// Store this, since we have it.
nextHash := hash6(cv, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
@ -330,7 +331,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -343,7 +344,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
inputMargin = 8
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
if debug {
if debugEncoder {
if len(src) > maxBlockSize {
panic("src too big")
}
@ -391,7 +392,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -405,8 +406,8 @@ encodeLoop:
// By not using them for the first 3 matches
for {
nextHash := hash6(cv, hashLog)
nextHash2 := hash6(cv>>8, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@ -462,7 +463,7 @@ encodeLoop:
s += length + 2
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, length)
}
@ -589,7 +590,7 @@ encodeLoop:
}
// Store this, since we have it.
nextHash := hash6(cv, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
@ -616,7 +617,7 @@ encodeLoop:
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
@ -696,7 +697,7 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debug {
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
@ -715,8 +716,8 @@ encodeLoop:
panic("offset0 was 0")
}
nextHash := hash6(cv, hashLog)
nextHash2 := hash6(cv>>8, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
@ -773,7 +774,7 @@ encodeLoop:
s += length + 2
nextEmit = s
if s >= sLimit {
if debug {
if debugEncoder {
println("repeat ended", s, length)
}
@ -896,7 +897,7 @@ encodeLoop:
}
// Store this, since we have it.
nextHash := hash6(cv, hashLog)
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShardDirty(nextHash)
seq.matchLen = uint32(l) - zstdMinMatch
@ -926,7 +927,7 @@ encodeLoop:
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debug {
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
@ -957,9 +958,9 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hash6(cv, hashLog) // 0 -> 5
nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6
nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,

View File

@ -33,7 +33,7 @@ type encoder interface {
Block() *blockEnc
CRC() *xxhash.Digest
AppendCRC([]byte) []byte
WindowSize(size int) int32
WindowSize(size int64) int32
UseBlock(*blockEnc)
Reset(d *dict, singleBlock bool)
}
@ -48,6 +48,8 @@ type encoderState struct {
err error
writeErr error
nWritten int64
nInput int64
frameContentSize int64
headerWritten bool
eofWritten bool
fullFrameWritten bool
@ -120,7 +122,21 @@ func (e *Encoder) Reset(w io.Writer) {
s.w = w
s.err = nil
s.nWritten = 0
s.nInput = 0
s.writeErr = nil
s.frameContentSize = 0
}
// ResetContentSize will reset and set a content size for the next stream.
// If the bytes written does not match the size given an error will be returned
// when calling Close().
// This is removed when Reset is called.
// Sizes <= 0 results in no content size set.
func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
e.Reset(w)
if size >= 0 {
e.state.frameContentSize = size
}
}
// Write data to the encoder.
@ -190,6 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err
}
s.nWritten += int64(n2)
s.nInput += int64(len(s.filling))
s.current = s.current[:0]
s.filling = s.filling[:0]
s.headerWritten = true
@ -200,8 +217,8 @@ func (e *Encoder) nextBlock(final bool) error {
var tmp [maxHeaderSize]byte
fh := frameHeader{
ContentSize: 0,
WindowSize: uint32(s.encoder.WindowSize(0)),
ContentSize: uint64(s.frameContentSize),
WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
SingleSegment: false,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
@ -243,9 +260,10 @@ func (e *Encoder) nextBlock(final bool) error {
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
go func(src []byte) {
if debug {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
}
defer func() {
@ -290,7 +308,7 @@ func (e *Encoder) nextBlock(final bool) error {
}
switch err {
case errIncompressible:
if debug {
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
@ -313,7 +331,7 @@ func (e *Encoder) nextBlock(final bool) error {
//
// The Copy function uses ReaderFrom if available.
func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debug {
if debugEncoder {
println("Using ReadFrom")
}
@ -336,20 +354,20 @@ func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
switch err {
case io.EOF:
e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
if debug {
if debugEncoder {
println("ReadFrom: got EOF final block:", len(e.state.filling))
}
return n, nil
case nil:
default:
if debug {
if debugEncoder {
println("ReadFrom: got error:", err)
}
e.state.err = err
return n, err
}
if len(src) > 0 {
if debug {
if debugEncoder {
println("ReadFrom: got space left in source:", len(src))
}
continue
@ -394,6 +412,11 @@ func (e *Encoder) Close() error {
if err != nil {
return err
}
if s.frameContentSize > 0 {
if s.nInput != s.frameContentSize {
return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
}
}
if e.state.fullFrameWritten {
return s.err
}
@ -470,7 +493,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
fh := frameHeader{
ContentSize: uint64(len(src)),
WindowSize: uint32(enc.WindowSize(len(src))),
WindowSize: uint32(enc.WindowSize(int64(len(src)))),
SingleSegment: single,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
@ -512,7 +535,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
switch err {
case errIncompressible:
if debug {
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, src)
@ -548,7 +571,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
switch err {
case errIncompressible:
if debug {
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, todo)

View File

@ -189,7 +189,7 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
case level >= 6 && level < 10:
return SpeedBetterCompression
case level >= 10:
return SpeedBetterCompression
return SpeedBestCompression
}
return SpeedDefault
}

View File

@ -22,10 +22,6 @@ type frameDec struct {
WindowSize uint64
// maxWindowSize is the maximum windows size to support.
// should never be bigger than max-int.
maxWindowSize uint64
// In order queue of blocks being decoded.
decoding chan *blockDec
@ -50,8 +46,11 @@ type frameDec struct {
}
const (
// The minimum Window_Size is 1 KB.
// MinWindowSize is the minimum Window Size, which is 1 KB.
MinWindowSize = 1 << 10
// MaxWindowSize is the maximum encoder window size
// and the default decoder maximum window size.
MaxWindowSize = 1 << 29
)
@ -61,12 +60,11 @@ var (
)
func newFrameDec(o decoderOptions) *frameDec {
d := frameDec{
o: o,
maxWindowSize: MaxWindowSize,
if o.maxWindowSize > o.maxDecodedSize {
o.maxWindowSize = o.maxDecodedSize
}
if d.maxWindowSize > o.maxDecodedSize {
d.maxWindowSize = o.maxDecodedSize
d := frameDec{
o: o,
}
return &d
}
@ -78,20 +76,33 @@ func newFrameDec(o decoderOptions) *frameDec {
func (d *frameDec) reset(br byteBuffer) error {
d.HasCheckSum = false
d.WindowSize = 0
var b []byte
var signature [4]byte
for {
var err error
b, err = br.readSmall(4)
// Check if we can read more...
b, err := br.readSmall(1)
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
default:
return err
case nil:
signature[0] = b[0]
}
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
if debug {
println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic))
// Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3)
switch err {
case io.EOF:
return io.EOF
default:
return err
case nil:
copy(signature[1:], b)
}
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
}
// Break if not skippable frame.
break
@ -99,28 +110,34 @@ func (d *frameDec) reset(br byteBuffer) error {
// Read size to skip
b, err = br.readSmall(4)
if err != nil {
println("Reading Frame Size", err)
if debugDecoder {
println("Reading Frame Size", err)
}
return err
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
err = br.skipN(int(n))
if err != nil {
if debug {
if debugDecoder {
println("Reading discarded frame", err)
}
return err
}
}
if !bytes.Equal(b, frameMagic) {
println("Got magic numbers: ", b, "want:", frameMagic)
if !bytes.Equal(signature[:], frameMagic) {
if debugDecoder {
println("Got magic numbers: ", signature, "want:", frameMagic)
}
return ErrMagicMismatch
}
// Read Frame_Header_Descriptor
fhd, err := br.readByte()
if err != nil {
println("Reading Frame_Header_Descriptor", err)
if debugDecoder {
println("Reading Frame_Header_Descriptor", err)
}
return err
}
d.SingleSegment = fhd&(1<<5) != 0
@ -135,7 +152,9 @@ func (d *frameDec) reset(br byteBuffer) error {
if !d.SingleSegment {
wd, err := br.readByte()
if err != nil {
println("Reading Window_Descriptor", err)
if debugDecoder {
println("Reading Window_Descriptor", err)
}
return err
}
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
@ -153,7 +172,7 @@ func (d *frameDec) reset(br byteBuffer) error {
size = 4
}
b, err = br.readSmall(int(size))
b, err := br.readSmall(int(size))
if err != nil {
println("Reading Dictionary_ID", err)
return err
@ -167,7 +186,7 @@ func (d *frameDec) reset(br byteBuffer) error {
case 4:
id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
if debug {
if debugDecoder {
println("Dict size", size, "ID:", id)
}
if id > 0 {
@ -191,7 +210,7 @@ func (d *frameDec) reset(br byteBuffer) error {
}
d.FrameContentSize = 0
if fcsSize > 0 {
b, err = br.readSmall(fcsSize)
b, err := br.readSmall(fcsSize)
if err != nil {
println("Reading Frame content", err)
return err
@ -209,7 +228,7 @@ func (d *frameDec) reset(br byteBuffer) error {
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debug {
if debugDecoder {
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
}
}
@ -230,13 +249,17 @@ func (d *frameDec) reset(br byteBuffer) error {
}
}
if d.WindowSize > d.maxWindowSize {
printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize)
if d.WindowSize > uint64(d.o.maxWindowSize) {
if debugDecoder {
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
}
return ErrWindowSizeExceeded
}
// The minimum Window_Size is 1 KB.
if d.WindowSize < MinWindowSize {
println("got window size: ", d.WindowSize)
if debugDecoder {
println("got window size: ", d.WindowSize)
}
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
@ -252,7 +275,7 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
if debug {
if debugDecoder {
printf("decoding new block %p:%p", block, block.data)
}
err := block.reset(d.rawInput, d.WindowSize)
@ -263,7 +286,7 @@ func (d *frameDec) next(block *blockDec) error {
return err
}
block.input <- struct{}{}
if debug {
if debugDecoder {
println("next block:", block)
}
d.asyncRunningMu.Lock()
@ -318,12 +341,12 @@ func (d *frameDec) checkCRC() error {
}
if !bytes.Equal(tmp[:], want) {
if debug {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
}
return ErrCRCMismatch
}
if debug {
if debugDecoder {
println("CRC ok", tmp[:])
}
return nil
@ -331,8 +354,8 @@ func (d *frameDec) checkCRC() error {
func (d *frameDec) initAsync() {
if !d.o.lowMem && !d.SingleSegment {
// set max extra size history to 10MB.
d.history.maxSize = d.history.windowSize + maxBlockSize*5
// set max extra size history to 2MB.
d.history.maxSize = d.history.windowSize + maxBlockSize
}
// re-alloc if more than one extra block size.
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
@ -344,7 +367,7 @@ func (d *frameDec) initAsync() {
if cap(d.decoding) < d.o.concurrent {
d.decoding = make(chan *blockDec, d.o.concurrent)
}
if debug {
if debugDecoder {
h := d.history
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
}
@ -392,7 +415,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
output <- r
return
}
if debug {
if debugDecoder {
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
d.offset += int64(len(r.b))
}
@ -400,7 +423,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
// Send history to next block
select {
case next = <-d.decoding:
if debug {
if debugDecoder {
println("Sending ", len(d.history.b), "bytes as history")
}
next.history <- &d.history
@ -438,7 +461,7 @@ func (d *frameDec) startDecoder(output chan decodeOutput) {
output <- r
if next == nil {
// There was no decoder available, we wait for one now that we have sent to the writer.
if debug {
if debugDecoder {
println("Sending ", len(d.history.b), " bytes as history")
}
next = <-d.decoding
@ -462,7 +485,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err != nil {
break
}
if debug {
if debugDecoder {
println("next block:", dec)
}
err = dec.decodeBuf(&d.history)

View File

@ -229,7 +229,7 @@ func (s *fseEncoder) setRLE(val byte) {
deltaFindState: 0,
deltaNbBits: 0,
}
if debug {
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val

View File

@ -13,24 +13,24 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463
)
// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes.
// l must be >=4 and <=8. Any other value will return hash for 4 bytes.
// h should always be <32.
// Preferably h and l should be a constant.
// FIXME: This does NOT get resolved, if 'mls' is constant,
// so this cannot be used.
func hashLen(u uint64, hashLog, mls uint8) uint32 {
// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
return hash5(u, hashLog)
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return hash6(u, hashLog)
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return hash7(u, hashLog)
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return hash8(u, hashLog)
return uint32((u * prime8bytes) >> (64 - length))
default:
return hash4x64(u, hashLog)
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
@ -39,39 +39,3 @@ func hashLen(u uint64, hashLog, mls uint8) uint32 {
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> ((32 - h) & 31)
}
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4x64(u uint64, h uint8) uint32 {
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
}
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash5(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
}
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash6(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
}
// hash8 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash8(u uint64, h uint8) uint32 {
return uint32((u * prime8bytes) >> ((64 - h) & 63))
}

View File

@ -1,6 +1,5 @@
// +build !appengine
// +build gc
// +build !purego
//go:build !appengine && gc && !purego
// +build !appengine,gc,!purego
package xxhash

View File

@ -1,3 +1,4 @@
//go:build !amd64 || appengine || !gc || purego
// +build !amd64 appengine !gc purego
package xxhash

View File

@ -10,8 +10,8 @@ import (
"hash/crc32"
"io"
"github.com/golang/snappy"
"github.com/klauspost/compress/huff0"
snappy "github.com/klauspost/compress/internal/snapref"
)
const (
@ -203,7 +203,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
written += int64(n)
continue
case chunkTypeUncompressedData:
if debug {
if debugEncoder {
println("Uncompressed, chunklen", chunkLen)
}
// Section 4.3. Uncompressed data (chunk type 0x01).
@ -246,7 +246,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
continue
case chunkTypeStreamIdentifier:
if debug {
if debugEncoder {
println("stream id", chunkLen, len(snappyMagicBody))
}
// Section 4.1. Stream identifier (chunk type 0xff).

View File

@ -64,8 +64,9 @@ func (r *pooledZipReader) Close() error {
}
type pooledZipWriter struct {
mu sync.Mutex // guards Close and Read
enc *Encoder
mu sync.Mutex // guards Close and Read
enc *Encoder
pool *sync.Pool
}
func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
@ -83,7 +84,7 @@ func (w *pooledZipWriter) Close() error {
var err error
if w.enc != nil {
err = w.enc.Close()
zipReaderPool.Put(w.enc)
w.pool.Put(w.enc)
w.enc = nil
}
return err
@ -104,7 +105,7 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
return nil, err
}
}
return &pooledZipWriter{enc: enc}, nil
return &pooledZipWriter{enc: enc, pool: &pool}, nil
}
}

View File

@ -15,6 +15,12 @@ import (
// enable debug printing
const debug = false
// enable encoding debug printing
const debugEncoder = debug
// enable decoding debug printing
const debugDecoder = debug
// Enable extra assertions.
const debugAsserts = debug || false
@ -82,13 +88,13 @@ var (
)
func println(a ...interface{}) {
if debug {
if debug || debugDecoder || debugEncoder {
log.Println(a...)
}
}
func printf(format string, a ...interface{}) {
if debug {
if debug || debugDecoder || debugEncoder {
log.Printf(format, a...)
}
}

View File

@ -519,11 +519,11 @@ func (disp *display) print(d displayInfo, width, height int, all bool) {
out = align(out, timer, width)
if j.completedTime != nil {
color := aec.BlueF
color := colorRun
if j.isCanceled {
color = aec.YellowF
color = colorCancel
} else if j.hasError {
color = aec.RedF
color = colorError
}
out = aec.Apply(out, color)
}

View File

@ -0,0 +1,12 @@
//go:build !windows
// +build !windows
package progressui
import "github.com/morikuni/aec"
var (
colorRun = aec.BlueF
colorCancel = aec.YellowF
colorError = aec.RedF
)

View File

@ -0,0 +1,12 @@
//go:build windows
// +build windows
package progressui
import "github.com/morikuni/aec"
var (
colorRun = aec.CyanF
colorCancel = aec.YellowF
colorError = aec.RedF
)

View File

@ -53,4 +53,10 @@ const (
// AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
AnnotationDescription = "org.opencontainers.image.description"
// AnnotationBaseImageDigest is the annotation key for the digest of the image's base image.
AnnotationBaseImageDigest = "org.opencontainers.image.base.digest"
// AnnotationBaseImageName is the annotation key for the image reference of the image's base image.
AnnotationBaseImageName = "org.opencontainers.image.base.name"
)

View File

@ -89,9 +89,20 @@ type Image struct {
// Architecture is the CPU architecture which the binaries in this image are built to run on.
Architecture string `json:"architecture"`
// Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
Variant string `json:"variant,omitempty"`
// OS is the name of the operating system which the image is built to run on.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example on Windows `10.0.14393.1066`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`

View File

@ -34,6 +34,10 @@ const (
// referenced by the manifest.
MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip"
// MediaTypeImageLayerZstd is the media type used for zstd compressed
// layers referenced by the manifest.
MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd"
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
// the manifest but with distribution restrictions.
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
@ -43,6 +47,11 @@ const (
// restrictions.
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
// compressed layers referenced by the manifest but with distribution
// restrictions.
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
// MediaTypeImageConfig specifies the media type for the image configuration.
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
)

View File

@ -25,7 +25,7 @@ const (
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
VersionDev = "-dev"
)
// Version is the specification version that the package types support.

View File

@ -137,11 +137,13 @@ func trimOWS(x string) string {
// contains token amongst its comma-separated tokens, ASCII
// case-insensitively.
func headerValueContainsToken(v string, token string) bool {
v = trimOWS(v)
if comma := strings.IndexByte(v, ','); comma != -1 {
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
if tokenEqual(trimOWS(v[:comma]), token) {
return true
}
v = v[comma+1:]
}
return tokenEqual(v, token)
return tokenEqual(trimOWS(v), token)
}
// lowerASCII returns the ASCII lowercase version of b.

View File

@ -38,7 +38,7 @@ RUN make
RUN make install
WORKDIR /root
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local

49
vendor/golang.org/x/net/http2/ascii.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "strings"
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
if len(s) != len(t) {
return false
}
for i := 0; i < len(s); i++ {
if lower(s[i]) != lower(t[i]) {
return false
}
}
return true
}
// lower returns the ASCII lowercase version of b.
func lower(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// isASCIIPrint returns whether s is ASCII and printable according to
// https://tools.ietf.org/html/rfc20#section-4.2.
func isASCIIPrint(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] < ' ' || s[i] > '~' {
return false
}
}
return true
}
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
// and whether or not it was.
func asciiToLower(s string) (lower string, ok bool) {
if !isASCIIPrint(s) {
return "", false
}
return strings.ToLower(s), true
}

View File

@ -7,7 +7,9 @@
package http2
import (
"context"
"crypto/tls"
"errors"
"net/http"
"sync"
)
@ -78,61 +80,69 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
// It gets its own connection.
traceGetConn(req, addr)
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
if err != nil {
return nil, err
}
return cc, nil
}
p.mu.Lock()
for _, cc := range p.conns[addr] {
if st := cc.idleState(); st.canTakeNewRequest {
if p.shouldTraceGetConn(st) {
traceGetConn(req, addr)
for {
p.mu.Lock()
for _, cc := range p.conns[addr] {
if st := cc.idleState(); st.canTakeNewRequest {
if p.shouldTraceGetConn(st) {
traceGetConn(req, addr)
}
p.mu.Unlock()
return cc, nil
}
p.mu.Unlock()
return cc, nil
}
}
if !dialOnMiss {
if !dialOnMiss {
p.mu.Unlock()
return nil, ErrNoCachedConn
}
traceGetConn(req, addr)
call := p.getStartDialLocked(req.Context(), addr)
p.mu.Unlock()
return nil, ErrNoCachedConn
<-call.done
if shouldRetryDial(call, req) {
continue
}
return call.res, call.err
}
traceGetConn(req, addr)
call := p.getStartDialLocked(addr)
p.mu.Unlock()
<-call.done
return call.res, call.err
}
// dialCall is an in-flight Transport dial call to a host.
type dialCall struct {
_ incomparable
p *clientConnPool
_ incomparable
p *clientConnPool
// the context associated with the request
// that created this dialCall
ctx context.Context
done chan struct{} // closed when done
res *ClientConn // valid after done is closed
err error // valid after done is closed
}
// requires p.mu is held.
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall {
if call, ok := p.dialing[addr]; ok {
// A dial is already in-flight. Don't start another.
return call
}
call := &dialCall{p: p, done: make(chan struct{})}
call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx}
if p.dialing == nil {
p.dialing = make(map[string]*dialCall)
}
p.dialing[addr] = call
go call.dial(addr)
go call.dial(call.ctx, addr)
return call
}
// run in its own goroutine.
func (c *dialCall) dial(addr string) {
func (c *dialCall) dial(ctx context.Context, addr string) {
const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
close(c.done)
c.p.mu.Lock()
@ -276,3 +286,28 @@ type noDialClientConnPool struct{ *clientConnPool }
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
return p.getClientConn(req, addr, noDialOnMiss)
}
// shouldRetryDial reports whether the current request should
// retry dialing after the call finished unsuccessfully, for example
// if the dial was canceled because of a context cancellation or
// deadline expiry.
func shouldRetryDial(call *dialCall, req *http.Request) bool {
if call.err == nil {
// No error, no need to retry
return false
}
if call.ctx == req.Context() {
// If the call has the same context as the request, the dial
// should not be retried, since any cancellation will have come
// from this request.
return false
}
if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
// If the call error is not because of a context cancellation or a deadline expiry,
// the dial should not be retried.
return false
}
// Only retry if the error is a context cancellation error or deadline expiry
// and the context associated with the call was canceled or expired.
return call.ctx.Err() != nil
}

27
vendor/golang.org/x/net/http2/go115.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.15
// +build go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

View File

@ -6,7 +6,6 @@ package http2
import (
"net/http"
"strings"
"sync"
)
@ -79,10 +78,10 @@ func buildCommonHeaderMaps() {
}
}
func lowerHeader(v string) string {
func lowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s
return s, true
}
return strings.ToLower(v)
return asciiToLower(v)
}

31
vendor/golang.org/x/net/http2/not_go115.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.15
// +build !go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext opens a TLS connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
cn, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
if err := cn.Handshake(); err != nil {
return nil, err
}
if cfg.InsecureSkipVerify {
return cn, nil
}
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
return nil, err
}
return cn, nil
}

Some files were not shown because too many files have changed in this diff Show More