diff --git a/go.mod b/go.mod
index b3413da217..21afc46a1d 100644
--- a/go.mod
+++ b/go.mod
@@ -29,7 +29,7 @@ require (
github.com/docker/docker v23.0.3+incompatible
github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8
github.com/drone/envsubst v1.0.3
- github.com/dustin/go-humanize v1.0.0
+ github.com/dustin/go-humanize v1.0.1
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fatih/color v1.14.1
github.com/felixge/fgprof v0.9.3
@@ -64,10 +64,10 @@ require (
github.com/jmespath/go-jmespath v0.4.0
github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.15
+ github.com/klauspost/compress v1.16.0
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-ieproxy v0.0.1
- github.com/minio/minio-go/v7 v7.0.45
+ github.com/minio/minio-go/v7 v7.0.52
github.com/mitchellh/go-wordwrap v1.0.1
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
@@ -242,7 +242,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
- github.com/klauspost/cpuid/v2 v2.1.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect
diff --git a/go.sum b/go.sum
index 5f3c1541fd..dbe8c28d44 100644
--- a/go.sum
+++ b/go.sum
@@ -705,8 +705,9 @@ github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g=
github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0=
@@ -1261,12 +1262,12 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
-github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
+github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
+github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
-github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
@@ -1344,8 +1345,8 @@ github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs=
-github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
+github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps=
+github.com/minio/minio-go/v7 v7.0.52/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
index ba95cdd15c..ac12e485a1 100644
--- a/vendor/github.com/dustin/go-humanize/.travis.yml
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -1,12 +1,12 @@
sudo: false
language: go
+go_import_path: github.com/dustin/go-humanize
go:
- - 1.3.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - stable
- master
matrix:
allow_failures:
@@ -15,7 +15,7 @@ matrix:
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- - go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
+ - go vet .
+ - go install -v -race ./...
- go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
index 91b4ae5646..7d0b16b34f 100644
--- a/vendor/github.com/dustin/go-humanize/README.markdown
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -5,7 +5,7 @@ Just a few functions for helping humanize times and sizes.
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
-See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
index 1a2bf61723..3b015fd59e 100644
--- a/vendor/github.com/dustin/go-humanize/bigbytes.go
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -28,6 +28,10 @@ var (
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+ // BigRiByte is 1,024 y bytes in bit.Ints
+ BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
+ // BigQiByte is 1,024 r bytes in bit.Ints
+ BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
)
var (
@@ -51,6 +55,10 @@ var (
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+ // BigRByte is 1,000 SI y bytes in big.Ints
+ BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
+ // BigQByte is 1,000 SI r bytes in big.Ints
+ BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
@@ -71,6 +79,10 @@ var bigBytesSizeTable = map[string]*big.Int{
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
+ "rib": BigRiByte,
+ "rb": BigRByte,
+ "qib": BigQiByte,
+ "qb": BigQByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
@@ -89,6 +101,10 @@ var bigBytesSizeTable = map[string]*big.Int{
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
+ "r": BigRByte,
+ "ri": BigRiByte,
+ "q": BigQByte,
+ "qi": BigQiByte,
}
var ten = big.NewInt(10)
@@ -115,7 +131,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string {
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
@@ -125,7 +141,7 @@ func BigBytes(s *big.Int) string {
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
index 620690dec7..2bc83a03cf 100644
--- a/vendor/github.com/dustin/go-humanize/commaf.go
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -1,3 +1,4 @@
+//go:build go1.6
// +build go1.6
package humanize
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
index 1c62b640d4..bce923f371 100644
--- a/vendor/github.com/dustin/go-humanize/ftoa.go
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -6,6 +6,9 @@ import (
)
func stripTrailingZeros(s string) string {
+ if !strings.ContainsRune(s, '.') {
+ return s
+ }
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
index dec6186599..6470d0d47a 100644
--- a/vendor/github.com/dustin/go-humanize/number.go
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -73,7 +73,7 @@ func FormatFloat(format string, n float64) string {
if n > math.MaxFloat64 {
return "Infinity"
}
- if n < -math.MaxFloat64 {
+ if n < (0.0 - math.MaxFloat64) {
return "-Infinity"
}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
index ae659e0e49..8b85019849 100644
--- a/vendor/github.com/dustin/go-humanize/si.go
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -8,6 +8,8 @@ import (
)
var siPrefixTable = map[float64]string{
+ -30: "q", // quecto
+ -27: "r", // ronto
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
@@ -25,6 +27,8 @@ var siPrefixTable = map[float64]string{
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
+ 27: "R", // ronna
+ 30: "Q", // quetta
}
var revSIPrefixTable = revfmap(siPrefixTable)
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index a2bf06e94f..7a008a4d23 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- - go install mvdan.cc/garble@v0.7.2
+ - go install mvdan.cc/garble@v0.9.3
builds:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 63f2cd5b25..958666ed89 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,12 @@ This package provides various compression algorithms.
# changelog
+* Jan 21st, 2023 (v1.15.15)
+ * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
+ * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
+ * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
+ * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
+
* Jan 3rd, 2023 (v1.15.14)
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 42a237eac4..3c0b398c72 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
- return s, nil, err
+ return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 298c4f8e97..05db94d39a 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+ if MaxEncodedLen(len(src)) > len(dst) {
+ return 0
+ }
+
+ // encodeBlock breaks on too big blocks, so split.
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return d
+}
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md
index 1d80c42a53..8284bb0810 100644
--- a/vendor/github.com/klauspost/compress/s2/README.md
+++ b/vendor/github.com/klauspost/compress/s2/README.md
@@ -20,11 +20,12 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
* Concurrent stream compression
* Faster decompression, even for Snappy compatible content
* Concurrent Snappy/S2 stream decompression
-* Ability to quickly skip forward in compressed stream
+* Skip forward in compressed stream
* Random seeking with indexes
* Compatible with reading Snappy compressed content
* Smaller block size overhead on incompressible blocks
* Block concatenation
+* Block Dictionary support
* Uncompressed stream mode
* Automatic stream size padding
* Snappy compatible block compression
@@ -594,6 +595,123 @@ Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
Decompression speed should be around the same as using the 'better' compression mode.
+## Dictionaries
+
+*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
+neither encoding nor decoding. Performance improvements can be expected in the future.*
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+The same dictionary *must* be used for both encoding and decoding.
+S2 does not keep track of whether the same dictionary is used,
+and using the wrong dictionary will most often not result in an error when decompressing.
+
+Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
+This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
+and treat the blocks similarly.
+
+Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
+the same usage scenario applies to S2 dictionaries.
+
+> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
+
+S2 further limits the dictionary to only be enabled on the first 64KB of a block.
+This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
+
+### Compression
+
+Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
+and a 64KB dictionary trained with zStandard the following sizes can be achieved.
+
+| | Default | Better | Best |
+|--------------------|------------------|------------------|-----------------------|
+| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
+| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
+
+So for highly repetitive content, this case provides an almost 3x reduction in size.
+
+For less uniform data we will use the Go source code tree.
+Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
+
+| | Default | Better | Best |
+|--------------------|-------------------|-------------------|-------------------|
+| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
+| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
+| Saving/file | 362 bytes | 428 bytes | 472 bytes |
+
+
+### Creating Dictionaries
+
+There are no tools to create dictionaries in S2.
+However, there are multiple ways to create a useful dictionary:
+
+#### Using a Sample File
+
+If your input is very uniform, you can just use a sample file as the dictionary.
+
+For example in the `github_users_sample_set` above, the average compression only goes up from
+10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
+
+```Go
+ // Read a sample
+ sample, err := os.ReadFile("sample.json")
+
+ // Create a dictionary.
+ dict := s2.MakeDict(sample, nil)
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // To encode:
+ encoded := dict.Encode(nil, file)
+
+ // To decode:
+ decoded, err := dict.Decode(nil, file)
+```
+
+#### Using Zstandard
+
+Zstandard dictionaries can easily be converted to S2 dictionaries.
+
+This can be helpful to generate dictionaries for files that don't have a fixed structure.
+
+
+Example, with training set files placed in `./training-set`:
+
+`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
+
+This will create a dictionary of 64KB, that can be converted to a dictionary like this:
+
+```Go
+ // Decode the Zstandard dictionary.
+ insp, err := zstd.InspectDictionary(zdict)
+ if err != nil {
+ panic(err)
+ }
+
+ // We are only interested in the contents.
+ // Assume that files start with "// Copyright (c) 2023".
+ // Search for the longest match for that.
+ // This may save a few bytes.
+ dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
+
+ // b := dict.Bytes() will provide a dictionary that can be saved
+ // and reloaded with s2.NewDict(b).
+
+ // We can now encode using this dictionary
+ encodedWithDict := dict.Encode(nil, payload)
+
+ // To decode content:
+ decoded, err := dict.Decode(nil, encodedWithDict)
+```
+
+It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
+
+This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
+
+Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
+This can be omitted, at the expense of a few bytes.
+
# Snappy Compatibility
S2 now offers full compatibility with Snappy.
@@ -929,6 +1047,72 @@ The first copy of a block cannot be a repeat offset and the offset is reset on e
Default streaming block size is 1MB.
+# Dictionary Encoding
+
+Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
+
+A dictionary provides an initial repeat value that can be used to point to a common header.
+
+Other than that the dictionary contains values that can be used as back-references.
+
+Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
+
+## Format
+
+Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
+
+Encoding: `[repeat value (uvarint)][dictionary content...]`
+
+Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
+This value is an offset into the dictionary content and not a back-reference offset,
+so setting this to 0 will make the repeat value point to the first value of the dictionary.
+
+The value must be less than the dictionary length-8
+
+## Encoding
+
+From the decoder point of view the dictionary content is seen as preceding the encoded content.
+
+`[dictionary content][decoded output]`
+
+Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
+
+Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
+However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
+
+The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
+
+When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
+neither by a copy nor repeat operations.
+If the boundary is crossed while copying from the dictionary, the operation should complete,
+but the next instruction is not allowed to reference the dictionary.
+
+Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
+There are no checks whether the supplied dictionary is the correct for a block.
+Because of this there is no overhead by using a dictionary.
+
+## Example
+
+This is the dictionary content. Elements are separated by `[]`.
+
+Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
+
+Initial repeat offset is set at 10, which is the letter `2`.
+
+Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
+
+Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
+
+Output: `10 bananas which were brown were added`
+
+
+## Streams
+
+For streams each block can use the dictionary.
+
+The dictionary cannot not currently be provided on the stream.
+
+
# LICENSE
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go
index 00c5cc72c2..b7c9adfdd8 100644
--- a/vendor/github.com/klauspost/compress/s2/decode.go
+++ b/vendor/github.com/klauspost/compress/s2/decode.go
@@ -13,6 +13,7 @@ import (
"io/ioutil"
"math"
"runtime"
+ "strconv"
"sync"
)
@@ -880,15 +881,20 @@ func (r *Reader) Skip(n int64) error {
// See Reader.ReadSeeker
type ReadSeeker struct {
*Reader
+ readAtMu sync.Mutex
}
-// ReadSeeker will return an io.ReadSeeker compatible version of the reader.
+// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
+// compatible version of the reader.
// If 'random' is specified the returned io.Seeker can be used for
// random seeking, otherwise only forward seeking is supported.
// Enabling random seeking requires the original input to support
// the io.Seeker interface.
// A custom index can be specified which will be used if supplied.
// When using a custom index, it will not be read from the input stream.
+// The ReadAt position will affect regular reads and the current position of Seek.
+// So using Read after ReadAt will continue from where the ReadAt stopped.
+// No functions should be used concurrently.
// The returned ReadSeeker contains a shallow reference to the existing Reader,
// meaning changes performed to one is reflected in the other.
func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
@@ -958,42 +964,55 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
// Reset on EOF
r.err = nil
}
- if offset == 0 && whence == io.SeekCurrent {
- return r.blockStart + int64(r.i), nil
+
+ // Calculate absolute offset.
+ absOffset := offset
+
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ absOffset = r.blockStart + int64(r.i) + offset
+ case io.SeekEnd:
+ if r.index == nil {
+ return 0, ErrUnsupported
+ }
+ absOffset = r.index.TotalUncompressed + offset
+ default:
+ r.err = ErrUnsupported
+ return 0, r.err
}
+
+ if absOffset < 0 {
+ return 0, errors.New("seek before start of file")
+ }
+
if !r.readHeader {
// Make sure we read the header.
_, r.err = r.Read([]byte{})
+ if r.err != nil {
+ return 0, r.err
+ }
}
+
+ // If we are inside current block no need to seek.
+ // This includes no offset changes.
+ if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
+ r.i = int(absOffset - r.blockStart)
+ return r.blockStart + int64(r.i), nil
+ }
+
rs, ok := r.r.(io.ReadSeeker)
if r.index == nil || !ok {
- if whence == io.SeekCurrent && offset >= 0 {
- err := r.Skip(offset)
- return r.blockStart + int64(r.i), err
- }
- if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) {
- err := r.Skip(offset - r.blockStart - int64(r.i))
+ currOffset := r.blockStart + int64(r.i)
+ if absOffset >= currOffset {
+ err := r.Skip(absOffset - currOffset)
return r.blockStart + int64(r.i), err
}
return 0, ErrUnsupported
-
- }
-
- switch whence {
- case io.SeekCurrent:
- offset += r.blockStart + int64(r.i)
- case io.SeekEnd:
- if offset > 0 {
- return 0, errors.New("seek after end of file")
- }
- offset = r.index.TotalUncompressed + offset
}
- if offset < 0 {
- return 0, errors.New("seek before start of file")
- }
-
- c, u, err := r.index.Find(offset)
+ // We can seek and we have an index.
+ c, u, err := r.index.Find(absOffset)
if err != nil {
return r.blockStart + int64(r.i), err
}
@@ -1004,12 +1023,57 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
return 0, err
}
- r.i = r.j // Remove rest of current block.
- if u < offset {
+ r.i = r.j // Remove rest of current block.
+ r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
+ if u < absOffset {
// Forward inside block
- return offset, r.Skip(offset - u)
+ return absOffset, r.Skip(absOffset - u)
+ }
+ if u > absOffset {
+ return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
}
- return offset, nil
+ return absOffset, nil
+}
+
+// ReadAt reads len(p) bytes into p starting at offset off in the
+// underlying input source. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error
+// explaining why more bytes were not returned. In this respect,
+// ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes,
+// ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the
+// input source, ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of ReadAt can execute parallel ReadAt calls on the
+// same input source. This is however not recommended.
+func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
+ r.readAtMu.Lock()
+ defer r.readAtMu.Unlock()
+ _, err := r.Seek(offset, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+ n := 0
+ for n < len(p) {
+ n2, err := r.Read(p[n:])
+ if err != nil {
+ // This will include io.EOF
+ return n + n2, err
+ }
+ n += n2
+ }
+ return n, nil
}
// ReadByte satisfies the io.ByteReader interface.
@@ -1048,3 +1112,370 @@ func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
r.skippableCB[id] = fn
return nil
}
+
+// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func s2DecodeDict(dst, src []byte, dict *Dict) int {
+ if dict == nil {
+ return s2Decode(dst, src)
+ }
+ const debug = false
+ const debugErrs = debug
+
+ if debug {
+ fmt.Println("Starting decode, dst len:", len(dst))
+ }
+ var d, s, length int
+ offset := len(dict.dict) - dict.repeat
+
+ // As long as we can read at least 5 bytes...
+ for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ x = uint32(src[s-1])
+ case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
+ s += 3
+ case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
+ s += 4
+ case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
+ s += 5
+ }
+ length = int(x) + 1
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ length = int(src[s]) + 4
+ s += 1
+ case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
+ s += 2
+ case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
+ s += 3
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
+ s += 3
+
+ case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
+ s += 5
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ startOff := len(dict.dict) - offset + d
+ if startOff < 0 || startOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
+ }
+ copy(dst[d:d+length], dict.dict[startOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ // Remaining with extra checks...
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debugErrs {
+ fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if debug {
+ fmt.Println("literals, length:", length, "d-after:", d+length)
+ }
+
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(src[s-2]) >> 2 & 0x7
+ toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ if toffset == 0 {
+ if debug {
+ fmt.Print("(repeat) ")
+ }
+ // keep last offset
+ switch length {
+ case 5:
+ s += 1
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-1])) + 4
+ case 6:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
+ case 7:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
+ default: // 0-> 4
+ }
+ } else {
+ offset = toffset
+ }
+ length += 4
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ if debugErrs {
+ fmt.Println("src went oob")
+ }
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || length > len(dst)-d {
+ if debugErrs {
+ fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
+ }
+ return decodeErrCodeCorrupt
+ }
+
+ // copy from dict
+ if d < offset {
+ if d > MaxDictSrcOffset {
+ if debugErrs {
+ fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ rOff := len(dict.dict) - (offset - d)
+ if debug {
+ fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
+ }
+ if rOff+length > len(dict.dict) {
+ if debugErrs {
+ fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ if rOff < 0 {
+ if debugErrs {
+ fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
+ }
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:d+length], dict.dict[rOff:])
+ d += length
+ continue
+ }
+
+ if debug {
+ fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
+ }
+
+ // Copy from an earlier sub-slice of dst to a later sub-slice.
+ // If no overlap, use the built-in copy:
+ if offset > length {
+ copy(dst[d:d+length], dst[d-offset:])
+ d += length
+ continue
+ }
+
+ // Unlike the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ //
+ // We align the slices into a and b and show the compiler they are the same size.
+ // This allows the loop to run without bounds checks.
+ a := dst[d : d+length]
+ b := dst[d-offset:]
+ b = b[:len(a)]
+ for i := range a {
+ a[i] = b[i]
+ }
+ d += length
+ }
+
+ if d != len(dst) {
+ if debugErrs {
+ fmt.Println("wanted length", len(dst), "got", d)
+ }
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go
index 11300c3a81..2cb55c2c77 100644
--- a/vendor/github.com/klauspost/compress/s2/decode_other.go
+++ b/vendor/github.com/klauspost/compress/s2/decode_other.go
@@ -57,6 +57,9 @@ func s2Decode(dst, src []byte) int {
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
return decodeErrCodeCorrupt
}
if debug {
@@ -109,6 +112,10 @@ func s2Decode(dst, src []byte) int {
}
if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
+
return decodeErrCodeCorrupt
}
@@ -175,6 +182,9 @@ func s2Decode(dst, src []byte) int {
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
+ if debug {
+ fmt.Println("corrupt: lit size", length)
+ }
return decodeErrCodeCorrupt
}
if debug {
@@ -241,6 +251,9 @@ func s2Decode(dst, src []byte) int {
}
if offset <= 0 || d < offset || length > len(dst)-d {
+ if debug {
+ fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
+ }
return decodeErrCodeCorrupt
}
diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go
new file mode 100644
index 0000000000..24f7ce80bc
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/dict.go
@@ -0,0 +1,331 @@
+// Copyright (c) 2022+ Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "sync"
+)
+
+const (
+ // MinDictSize is the minimum dictionary size when repeat has been read.
+ MinDictSize = 16
+
+ // MaxDictSize is the maximum dictionary size when repeat has been read.
+ MaxDictSize = 65536
+
+ // MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
+ MaxDictSrcOffset = 65535
+)
+
+// Dict contains a dictionary that can be used for encoding and decoding s2
+type Dict struct {
+ dict []byte
+ repeat int // Repeat as index of dict
+
+ fast, better, best sync.Once
+ fastTable *[1 << 14]uint16
+
+ betterTableShort *[1 << 14]uint16
+ betterTableLong *[1 << 17]uint16
+
+ bestTableShort *[1 << 16]uint32
+ bestTableLong *[1 << 19]uint32
+}
+
+// NewDict will read a dictionary.
+// It will return nil if the dictionary is invalid.
+func NewDict(dict []byte) *Dict {
+ if len(dict) == 0 {
+ return nil
+ }
+ var d Dict
+ // Repeat is the first value of the dict
+ r, n := binary.Uvarint(dict)
+ if n <= 0 {
+ return nil
+ }
+ dict = dict[n:]
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize || len(dict) > MaxDictSize {
+ return nil
+ }
+ d.repeat = int(r)
+ if d.repeat > len(dict) {
+ return nil
+ }
+ return &d
+}
+
+// Bytes will return a serialized version of the dictionary.
+// The output can be sent to NewDict.
+func (d *Dict) Bytes() []byte {
+ dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
+ return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
+}
+
+// MakeDict will create a dictionary.
+// 'data' must be at least MinDictSize.
+// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
+// If searchStart is set the start repeat value will be set to the last
+// match of this content.
+// If no matches are found, it will attempt to find shorter matches.
+// This content should match the typical start of a block.
+// If at least 4 bytes cannot be matched, repeat is set to start of block.
+func MakeDict(data []byte, searchStart []byte) *Dict {
+ if len(data) == 0 {
+ return nil
+ }
+ if len(data) > MaxDictSize {
+ data = data[len(data)-MaxDictSize:]
+ }
+ var d Dict
+ dict := data
+ d.dict = dict
+ if cap(d.dict) < len(d.dict)+16 {
+ d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
+ }
+ if len(dict) < MinDictSize {
+ return nil
+ }
+
+ // Find the longest match possible, last entry if multiple.
+ for s := len(searchStart); s > 4; s-- {
+ if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
+ d.repeat = idx
+ break
+ }
+ }
+
+ return &d
+}
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if cap(dst) < n {
+ dst = make([]byte, n)
+ } else {
+ dst = dst[:n]
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockDictGo(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBetter compresses better than Encode but typically with a
+// 10-40% speed decrease on both compression and decompression.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBetter(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBetterDict(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// EncodeBest returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// EncodeBest compresses as good as reasonably possible but with a
+// big speed decrease.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+//
+// The blocks will require the same amount of memory to decode as encoding,
+// and does not make for concurrent decoding.
+// Also note that blocks do not contain CRC information, so corruption may be undetected.
+//
+// If you need to encode larger amounts of data, consider using
+// the streaming interface which gives all of these features.
+func (d *Dict) EncodeBest(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ dstP := binary.PutUvarint(dst, uint64(len(src)))
+
+ if len(src) == 0 {
+ return dst[:dstP]
+ }
+ if len(src) < minNonLiteralBlockSize {
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+ }
+ n := encodeBlockBest(dst[dstP:], src, d)
+ if n > 0 {
+ dstP += n
+ return dst[:dstP]
+ }
+ // Not compressible
+ dstP += emitLiteral(dst[dstP:], src)
+ return dst[:dstP]
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= cap(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ if s2DecodeDict(dst, src[s:], d) != 0 {
+ return nil, ErrCorrupt
+ }
+ return dst, nil
+}
+
+func (d *Dict) initFast() {
+ d.fast.Do(func() {
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint16
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8-2; i += 3 {
+ x0 := load64(d.dict, i)
+ h0 := hash6(x0, tableBits)
+ h1 := hash6(x0>>8, tableBits)
+ h2 := hash6(x0>>16, tableBits)
+ table[h0] = uint16(i)
+ table[h1] = uint16(i + 1)
+ table[h2] = uint16(i + 2)
+ }
+ d.fastTable = &table
+ })
+}
+
+func (d *Dict) initBetter() {
+ d.better.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint16
+ var sTable [maxSTableSize]uint16
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ lTable[hash7(cv, lTableBits)] = uint16(i)
+ sTable[hash4(cv, sTableBits)] = uint16(i)
+ }
+ d.betterTableShort = &sTable
+ d.betterTableLong = &lTable
+ })
+}
+
+func (d *Dict) initBest() {
+ d.best.Do(func() {
+ const (
+ // Long hash matches.
+ lTableBits = 19
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 16
+ maxSTableSize = 1 << sTableBits
+ )
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // We stop so any entry of length 8 can always be read.
+ for i := 0; i < len(d.dict)-8; i++ {
+ cv := load64(d.dict, i)
+ hashL := hash8(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL := lTable[hashL]
+ candidateS := sTable[hashS]
+ lTable[hashL] = uint32(i) | candidateL<<16
+ sTable[hashS] = uint32(i) | candidateS<<16
+ }
+ d.bestTableShort = &sTable
+ d.bestTableLong = &lTable
+ })
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
index 1aefabf313..c2ca7236a1 100644
--- a/vendor/github.com/klauspost/compress/s2/encode.go
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -58,6 +58,32 @@ func Encode(dst, src []byte) []byte {
return dst[:d]
}
+// EstimateBlockSize will perform a very fast compression
+// without outputting the result and return the compressed output size.
+// The function returns -1 if no improvement could be achieved.
+// Using actual compression will most often produce better compression than the estimate.
+func EstimateBlockSize(src []byte) (d int) {
+ if len(src) < 6 || int64(len(src)) > 0xffffffff {
+ return -1
+ }
+ if len(src) <= 1024 {
+ d = calcBlockSizeSmall(src)
+ } else {
+ d = calcBlockSize(src)
+ }
+
+ if d == 0 {
+ return -1
+ }
+ // Size of the varint encoded block size.
+ d += (bits.Len64(uint64(len(src))) + 7) / 7
+
+ if d >= len(src) {
+ return -1
+ }
+ return d
+}
+
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
@@ -132,7 +158,7 @@ func EncodeBest(dst, src []byte) []byte {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
- n := encodeBlockBest(dst[d:], src)
+ n := encodeBlockBest(dst[d:], src, nil)
if n > 0 {
d += n
return dst[:d]
@@ -404,10 +430,11 @@ type Writer struct {
buffers sync.Pool
pad int
- writer io.Writer
- randSrc io.Reader
- writerWg sync.WaitGroup
- index Index
+ writer io.Writer
+ randSrc io.Reader
+ writerWg sync.WaitGroup
+ index Index
+ customEnc func(dst, src []byte) int
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
@@ -773,6 +800,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
}
func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
+ if w.customEnc != nil {
+ return w.customEnc(obuf, uncompressed)
+ }
if w.snappy {
switch w.level {
case levelFast:
@@ -790,7 +820,7 @@ func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
case levelBetter:
return encodeBlockBetter(obuf, uncompressed)
case levelBest:
- return encodeBlockBest(obuf, uncompressed)
+ return encodeBlockBest(obuf, uncompressed, nil)
}
return 0
}
@@ -1339,3 +1369,15 @@ func WriterFlushOnWrite() WriterOption {
return nil
}
}
+
+// WriterCustomEncoder allows to override the encoder for blocks on the stream.
+// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
+// Block size (initial varint) should not be added by the encoder.
+// Returning value 0 indicates the block could not be compressed.
+// The function should expect to be called concurrently.
+func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
+ return func(w *Writer) error {
+ w.customEnc = fn
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
index 54c71d3b5d..11657f0949 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_all.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -8,6 +8,7 @@ package s2
import (
"bytes"
"encoding/binary"
+ "fmt"
"math/bits"
)
@@ -455,3 +456,594 @@ emitRemainder:
}
return d
}
+
+// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 14
+ maxTableSize = 1 << tableBits
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+ dict.initFast()
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form can start with a dict entry (copy or repeat).
+ s := 0
+
+ // Convert dict repeat to offset
+ repeat := len(dict.dict) - dict.repeat
+ cv := load64(src, 0)
+
+ // While in dict
+searchDict:
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ if nextS > sLimit {
+ if debug {
+ fmt.Println("slimit reached", s, nextS)
+ }
+ break searchDict
+ }
+ candidateDict := int(dict.fastTable[hash0])
+ candidateDict2 := int(dict.fastTable[hash1])
+ candidate2 := int(table[hash1])
+ candidate := int(table[hash0])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+
+ if repeat > s {
+ candidate := len(dict.dict) - repeat + s
+ if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ cv = load64(src, s)
+ continue
+ }
+ } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ if debug {
+ fmt.Println("emitted reg repeat", s-base, "s:", s)
+ }
+ cv = load64(src, s)
+ continue searchDict
+ }
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+ }
+ // Start with table. These matches will always be closer.
+ if uint32(cv) == load32(src, candidate) {
+ goto emitMatch
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ goto emitMatch
+ }
+
+ // Check dict. Dicts have longer offsets, so we want longer matches.
+ if cv == load64(dict.dict, candidateDict) {
+ table[hash2] = uint32(s + 2)
+ goto emitDict
+ }
+
+ candidateDict = int(dict.fastTable[hash2])
+ // Check if upper 7 bytes match
+ if candidateDict2 >= 1 {
+ if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
+ table[hash2] = uint32(s + 2)
+ candidateDict = candidateDict2
+ s++
+ goto emitDict
+ }
+ }
+
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ goto emitMatch
+ }
+ if candidateDict >= 2 {
+ // Check if upper 6 bytes match
+ if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
+ s += 2
+ goto emitDict
+ }
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ continue searchDict
+
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateDict) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
+ candidateDict--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = s + (len(dict.dict)) - candidateDict
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateDict += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateDict += 8
+ }
+
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], repeat, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block
+ d += emitCopy(dst[d:], repeat, 4)
+ d += emitRepeat(dst[d:], repeat, s-base-4)
+ }
+ if false {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index and continue loop to try new candidate.
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>8, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s - 1)
+ cv = load64(src, s)
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ if debug {
+ fmt.Println("non-dict matching at", s, "repeat:", repeat)
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if nextEmit > 0 {
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ } else {
+ // First match, cannot be repeat.
+ d += emitCopy(dst[d:], repeat, s-base)
+ }
+ if debug {
+ fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopy(dst[d:], repeat, s-base)
+ if debug {
+ // Validate match.
+ if s <= candidate {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+ if debug {
+ fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if debug && s == candidate {
+ panic("s == candidate")
+ }
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
index 6b93daa5ae..ebc332ad5f 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -3,6 +3,8 @@
package s2
+const hasAmd64Asm = true
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
index 1b7ea394fa..1d13e869a1 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_best.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -7,6 +7,7 @@ package s2
import (
"fmt"
+ "math"
"math/bits"
)
@@ -18,7 +19,7 @@ import (
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
-func encodeBlockBest(dst, src []byte) (d int) {
+func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
@@ -30,6 +31,8 @@ func encodeBlockBest(dst, src []byte) (d int) {
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
+
+ debug = false
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
@@ -39,6 +42,10 @@ func encodeBlockBest(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
return 0
}
+ sLimitDict := len(src) - inputMargin
+ if sLimitDict > MaxDictSrcOffset-inputMargin {
+ sLimitDict = MaxDictSrcOffset - inputMargin
+ }
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
@@ -52,10 +59,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
+ repeat := 1
+ if dict != nil {
+ dict.initBest()
+ s = 0
+ repeat = len(dict.dict) - dict.repeat
+ }
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
- repeat := 1
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
@@ -67,11 +79,11 @@ func encodeBlockBest(dst, src []byte) (d int) {
for {
type match struct {
- offset int
- s int
- length int
- score int
- rep bool
+ offset int
+ s int
+ length int
+ score int
+ rep, dict bool
}
var best match
for {
@@ -85,6 +97,12 @@ func encodeBlockBest(dst, src []byte) (d int) {
if nextS > sLimit {
goto emitRemainder
}
+ if dict != nil && s >= MaxDictSrcOffset {
+ dict = nil
+ if repeat > s {
+ repeat = math.MinInt32
+ }
+ }
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
@@ -114,7 +132,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
}
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
s += 4
- for s <= sLimit {
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
@@ -130,6 +156,62 @@ func encodeBlockBest(dst, src []byte) (d int) {
}
return m
}
+ matchDict := func(candidate, s int, first uint32, rep bool) match {
+ // Calculate offset as if in continuous array with s
+ offset := -len(dict.dict) + candidate
+ if best.length != 0 && best.s-best.offset == s-offset && !rep {
+ // Don't retest if we have the same offset.
+ return match{offset: offset, s: s}
+ }
+
+ if load32(dict.dict, candidate) != first {
+ return match{offset: offset, s: s}
+ }
+ m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
+ s += 4
+ if !rep {
+ for s < sLimitDict && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ } else {
+ for s < len(src) && m.length < len(dict.dict) {
+ if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
+ if src[s] == dict.dict[m.length] {
+ m.length++
+ s++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
+ m.length += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ m.length += 8
+ }
+ }
+ m.length -= candidate
+ m.score = score(m)
+ if m.score <= -m.s {
+ // Eliminate if no savings, we might find a better one.
+ m.length = 0
+ }
+ return m
+ }
bestOf := func(a, b match) match {
if b.length == 0 {
@@ -146,35 +228,82 @@ func encodeBlockBest(dst, src []byte) (d int) {
return b
}
- best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
- best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
- best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
-
+ if s > 0 {
+ best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
+ best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
+ }
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
+ }
{
- best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
+ } else if s-repeat < -4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ candidate++
+ best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
+ }
+
if best.length > 0 {
+ hashS := hash4(cv>>8, sTableBits)
// s+1
- nextShort := sTable[hash4(cv>>8, sTableBits)]
+ nextShort := sTable[hashS]
s := s + 1
cv := load64(src, s)
- nextLong := lTable[hash8(cv, lTableBits)]
+ hashL := hash8(cv, lTableBits)
+ nextLong := lTable[hashL]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
- // Repeat at + 2
- best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
+
+ // Dict at + 1
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
// s+2
if true {
- nextShort = sTable[hash4(cv>>8, sTableBits)]
+ hashS := hash4(cv>>8, sTableBits)
+
+ nextShort = sTable[hashS]
s++
cv = load64(src, s)
- nextLong = lTable[hash8(cv, lTableBits)]
+ hashL := hash8(cv, lTableBits)
+ nextLong = lTable[hashL]
+
+ if (dict == nil || repeat <= s) && repeat > 0 {
+ // Repeat at + 2
+ best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
+ } else if repeat-s > 4 && dict != nil {
+ candidate := len(dict.dict) - (repeat - s)
+ best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
+ }
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
+
+ // Dict at +2
+ // Very small gain
+ if dict != nil {
+ candidateL := dict.bestTableLong[hashL]
+ candidateS := dict.bestTableShort[hashS]
+
+ best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
+ best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
+ }
}
// Search for a match at best match end, see if that is better.
// Allow some bytes at the beginning to mismatch.
@@ -227,7 +356,7 @@ func encodeBlockBest(dst, src []byte) (d int) {
// Extend backwards, not needed for repeats...
s = best.s
- if !best.rep {
+ if !best.rep && !best.dict {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
@@ -244,7 +373,6 @@ func encodeBlockBest(dst, src []byte) (d int) {
base := s
offset := s - best.offset
-
s += best.length
if offset > 65535 && s-base <= 5 && !best.rep {
@@ -256,16 +384,28 @@ func encodeBlockBest(dst, src []byte) (d int) {
cv = load64(src, s)
continue
}
+ if debug && nextEmit != base {
+ fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
+ }
d += emitLiteral(dst[d:], src[nextEmit:base])
if best.rep {
- if nextEmit > 0 {
+ if nextEmit > 0 || best.dict {
+ if debug {
+ fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], offset, best.length)
} else {
- // First match, cannot be repeat.
+ // First match without dict cannot be a repeat.
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
d += emitCopy(dst[d:], offset, best.length)
}
} else {
+ if debug {
+ fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
+ }
d += emitCopy(dst[d:], offset, best.length)
}
repeat = offset
@@ -296,6 +436,9 @@ emitRemainder:
if d+len(src)-nextEmit > dstLimit {
return 0
}
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", len(src)-nextEmit, "literals")
+ }
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
@@ -642,7 +785,6 @@ func emitRepeatSize(offset, length int) int {
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
- length = maxRepeat - 4
}
if left > 0 {
return 5 + emitRepeatSize(offset, left)
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
index 3b66ba42bf..f46adb4117 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_better.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -6,6 +6,8 @@
package s2
import (
+ "bytes"
+ "fmt"
"math/bits"
)
@@ -476,3 +478,623 @@ emitRemainder:
}
return d
}
+
+// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ // Initialize the hash tables.
+ const (
+ // Long hash matches.
+ lTableBits = 17
+ maxLTableSize = 1 << lTableBits
+
+ // Short hash matches.
+ sTableBits = 14
+ maxSTableSize = 1 << sTableBits
+
+ maxAhead = 8 // maximum bytes ahead without checking sLimit
+
+ debug = false
+ )
+
+ sLimit := len(src) - inputMargin
+ if sLimit > MaxDictSrcOffset-maxAhead {
+ sLimit = MaxDictSrcOffset - maxAhead
+ }
+ if len(src) < minNonLiteralBlockSize {
+ return 0
+ }
+
+ dict.initBetter()
+
+ var lTable [maxLTableSize]uint32
+ var sTable [maxSTableSize]uint32
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 6
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 0
+ cv := load64(src, s)
+
+ // We initialize repeat to 0, so we never match on first attempt
+ repeat := len(dict.dict) - dict.repeat
+
+ // While in dict
+searchDict:
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ break searchDict
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ dictL := int(dict.betterTableLong[hashL])
+ dictS := int(dict.betterTableShort[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if s != 0 {
+ if cv == valLong {
+ goto emitMatch
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ goto emitMatch
+ }
+ }
+
+ // Check dict repeat.
+ if repeat >= s+4 {
+ candidate := len(dict.dict) - repeat + s
+ if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
+ // Extend back
+ base := s
+ for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != base {
+ fmt.Println("emitted ", base-nextEmit, "literals")
+ }
+ s += 4
+ candidate += 4
+ for candidate < len(dict.dict)-8 && s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ d += emitRepeat(dst[d:], repeat, s-base)
+ if debug {
+ fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
+ }
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+ cv = load64(src, s)
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ cv = load64(src, s)
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+ continue
+ }
+ }
+ // Don't try to find match at s==0
+ if s == 0 {
+ cv = load64(src, nextS)
+ s = nextS
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ goto emitMatch
+ }
+
+ // Long dict...
+ if uint32(cv) == load32(dict.dict, dictL) {
+ candidateL = dictL
+ goto emitDict
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ goto emitMatch
+ }
+ if uint32(cv) == load32(dict.dict, dictS) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ goto emitMatch
+ }
+ candidateL = dictS
+ goto emitDict
+ }
+ cv = load64(src, nextS)
+ s = nextS
+ }
+ emitDict:
+ {
+ if debug {
+ if load32(dict.dict, candidateL) != load32(src, s) {
+ panic("dict emit mismatch")
+ }
+ }
+ // Extend backwards.
+ // The top bytes will be rechecked to get the full match.
+ for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ offset := s + (len(dict.dict)) - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
+ if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
+ }
+ // Matches longer than 64 are split.
+ if s <= sLimit || s-base < 8 {
+ d += emitCopy(dst[d:], offset, s-base)
+ } else {
+ // Split to ensure we don't start a copy within next block.
+ d += emitCopy(dst[d:], offset, 4)
+ d += emitRepeat(dst[d:], offset, s-base-4)
+ }
+ repeat = offset
+ }
+ if false {
+ // Validate match.
+ if s <= candidateL {
+ panic("s <= candidate")
+ }
+ a := src[base:s]
+ b := dict.dict[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ break searchDict
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
+ }
+ continue
+ }
+ emitMatch:
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if debug && nextEmit != s {
+ fmt.Println("emitted ", s-nextEmit, "literals")
+ }
+ if repeat == offset {
+ if debug {
+ fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ if debug {
+ fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
+ }
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
+ }
+
+ // Search without dict:
+ if repeat > s {
+ repeat = 0
+ }
+
+ // No more dict
+ sLimit = len(src) - inputMargin
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ if debug {
+ fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
+ }
+ for {
+ candidateL := 0
+ nextS := 0
+ for {
+ // Next src position to check
+ nextS = s + (s-nextEmit)>>7 + 1
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hashL := hash7(cv, lTableBits)
+ hashS := hash4(cv, sTableBits)
+ candidateL = int(lTable[hashL])
+ candidateS := int(sTable[hashS])
+ lTable[hashL] = uint32(s)
+ sTable[hashS] = uint32(s)
+
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidate] {
+ s++
+ candidate++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ cv = load64(src, s)
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
+ break
+ }
+
+ // Check our short candidate
+ if uint32(cv) == uint32(valShort) {
+ // Try a long candidate at s+1
+ hashL = hash7(cv>>8, lTableBits)
+ candidateL = int(lTable[hashL])
+ lTable[hashL] = uint32(s + 1)
+ if uint32(cv>>8) == load32(src, candidateL) {
+ s++
+ break
+ }
+ // Use our short candidate.
+ candidateL = candidateS
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
+ candidateL--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ base := s
+ offset := base - candidateL
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidateL += 4
+ for s < len(src) {
+ if len(src)-s < 8 {
+ if src[s] == src[candidateL] {
+ s++
+ candidateL++
+ continue
+ }
+ break
+ }
+ if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidateL += 8
+ }
+
+ if offset > 65535 && s-base <= 5 && repeat != offset {
+ // Bail if the match is equal or worse to the encoding.
+ s = nextS + 1
+ if s >= sLimit {
+ goto emitRemainder
+ }
+ cv = load64(src, s)
+ continue
+ }
+
+ d += emitLiteral(dst[d:], src[nextEmit:base])
+ if repeat == offset {
+ d += emitRepeat(dst[d:], offset, s-base)
+ } else {
+ d += emitCopy(dst[d:], offset, s-base)
+ repeat = offset
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+
+ // Index short & long
+ index0 := base + 1
+ index1 := s - 2
+
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
index db08fc355e..d7749d75c2 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_go.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -4,9 +4,12 @@
package s2
import (
+ "bytes"
"math/bits"
)
+const hasAmd64Asm = false
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@@ -312,3 +315,405 @@ func matchLen(a []byte, b []byte) int {
}
return len(a) + checked
}
+
+func calcBlockSize(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 13
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+func calcBlockSizeSmall(src []byte) (d int) {
+ // Initialize the hash table.
+ const (
+ tableBits = 9
+ maxTableSize = 1 << tableBits
+ )
+
+ var table [maxTableSize]uint32
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // Bail if we can't compress to at least this.
+ dstLimit := len(src) - len(src)>>5 - 5
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ cv := load64(src, s)
+
+ // We search for a repeat at -1, but don't output repeats when nextEmit == 0
+ repeat := 1
+
+ for {
+ candidate := 0
+ for {
+ // Next src position to check
+ nextS := s + (s-nextEmit)>>6 + 4
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ hash0 := hash6(cv, tableBits)
+ hash1 := hash6(cv>>8, tableBits)
+ candidate = int(table[hash0])
+ candidate2 := int(table[hash1])
+ table[hash0] = uint32(s)
+ table[hash1] = uint32(s + 1)
+ hash2 := hash6(cv>>16, tableBits)
+
+ // Check repeat at offset checkRep.
+ const checkRep = 1
+ if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ base := s + checkRep
+ // Extend back
+ for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
+ i--
+ base--
+ }
+ d += emitLiteralSize(src[nextEmit:base])
+
+ // Extend forward
+ candidate := s - repeat + 4 + checkRep
+ s += 4 + checkRep
+ for s <= sLimit {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ cv = load64(src, s)
+ continue
+ }
+
+ if uint32(cv) == load32(src, candidate) {
+ break
+ }
+ candidate = int(table[hash2])
+ if uint32(cv>>8) == load32(src, candidate2) {
+ table[hash2] = uint32(s + 2)
+ candidate = candidate2
+ s++
+ break
+ }
+ table[hash2] = uint32(s + 2)
+ if uint32(cv>>16) == load32(src, candidate) {
+ s += 2
+ break
+ }
+
+ cv = load64(src, nextS)
+ s = nextS
+ }
+
+ // Extend backwards
+ for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
+ candidate--
+ s--
+ }
+
+ // Bail if we exceed the maximum size.
+ if d+(s-nextEmit) > dstLimit {
+ return 0
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ d += emitLiteralSize(src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+ repeat = base - candidate
+
+ // Extend the 4-byte match as long as possible.
+ s += 4
+ candidate += 4
+ for s <= len(src)-8 {
+ if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
+ s += bits.TrailingZeros64(diff) >> 3
+ break
+ }
+ s += 8
+ candidate += 8
+ }
+
+ d += emitCopyNoRepeatSize(repeat, s-base)
+ if false {
+ // Validate match.
+ a := src[base:s]
+ b := src[base-repeat : base-repeat+(s-base)]
+ if !bytes.Equal(a, b) {
+ panic("mismatch")
+ }
+ }
+
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ if d > dstLimit {
+ // Do we have space for more, if not bail.
+ return 0
+ }
+ // Check for an immediate match, otherwise start search at s+1
+ x := load64(src, s-2)
+ m2Hash := hash6(x, tableBits)
+ currHash := hash6(x>>16, tableBits)
+ candidate = int(table[currHash])
+ table[m2Hash] = uint32(s - 2)
+ table[currHash] = uint32(s)
+ if uint32(x>>16) != load32(src, candidate) {
+ cv = load64(src, s+1)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ // Bail if we exceed the maximum size.
+ if d+len(src)-nextEmit > dstLimit {
+ return 0
+ }
+ d += emitLiteralSize(src[nextEmit:])
+ }
+ return d
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralSize(lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ switch {
+ case len(lit) <= 60:
+ return len(lit) + 1
+ case len(lit) <= 1<<8:
+ return len(lit) + 2
+ case len(lit) <= 1<<16:
+ return len(lit) + 3
+ case len(lit) <= 1<<24:
+ return len(lit) + 4
+ default:
+ return len(lit) + 5
+ }
+}
+
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockAsm should be unreachable")
+}
+
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
+ panic("cvtLZ4BlockSnappyAsm should be unreachable")
+}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
index 7e00bac3ea..9f3dc8c29f 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -146,6 +146,20 @@ func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
//go:noescape
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 4294967295 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSize(src []byte) int
+
+// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
+// Maximum input 1024 bytes.
+// It assumes that the varint-encoded length of the decompressed bytes has already been written.
+//
+//go:noescape
+func calcBlockSizeSmall(src []byte) int
+
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
@@ -192,3 +206,13 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int
//
//go:noescape
func matchLen(a []byte, b []byte) int
+
+// cvtLZ4Block converts an LZ4 block to S2
+//
+//go:noescape
+func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+
+// cvtLZ4Block converts an LZ4 block to S2
+//
+//go:noescape
+func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 81a487d6de..19bd5237bc 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -36,8 +36,8 @@ zero_loop_encodeBlockAsm:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -47,609 +47,601 @@ zero_loop_encodeBlockAsm:
MOVQ src_base+24(FP), DX
search_loop_encodeBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBlockAsm
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x10, R11
- IMULQ R9, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
SHLQ $0x10, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x32, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
JNE no_repeat_found_encodeBlockAsm
- LEAL 1(CX), DI
- MOVL 12(SP), R8
- MOVL DI, SI
- SUBL 16(SP), SI
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
JZ repeat_extend_back_end_encodeBlockAsm
repeat_extend_back_loop_encodeBlockAsm:
- CMPL DI, R8
+ CMPL SI, DI
JLE repeat_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(SI*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeBlockAsm
- LEAL -1(DI), DI
- DECL SI
+ LEAL -1(SI), SI
+ DECL BX
JNZ repeat_extend_back_loop_encodeBlockAsm
repeat_extend_back_end_encodeBlockAsm:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeBlockAsm
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeBlockAsm
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_repeat_emit_encodeBlockAsm
- CMPL SI, $0x01000000
+ CMPL BX, $0x01000000
JLT four_bytes_repeat_emit_encodeBlockAsm
MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_repeat_emit_encodeBlockAsm
four_bytes_repeat_emit_encodeBlockAsm:
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_repeat_emit_encodeBlockAsm
three_bytes_repeat_emit_encodeBlockAsm:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeBlockAsm
two_bytes_repeat_emit_encodeBlockAsm:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeBlockAsm
JMP memmove_long_repeat_emit_encodeBlockAsm
one_byte_repeat_emit_encodeBlockAsm:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_repeat_emit_encodeBlockAsm:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeBlockAsm
memmove_long_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R12
- SHRQ $0x05, R12
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R13
- SUBQ R11, R13
- DECQ R12
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R10)(R13*1), R11
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R12
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R13*1), X4
- MOVOU -16(R10)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R9, R13
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeBlockAsm:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R9
- SUBL CX, R9
- LEAQ (DX)(CX*1), R10
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R12, R12
- CMPL R9, $0x08
+ XORL R11, R11
+ CMPL R8, $0x08
JL matchlen_match4_repeat_extend_encodeBlockAsm
matchlen_loopback_repeat_extend_encodeBlockAsm:
- MOVQ (R10)(R12*1), R11
- XORQ (SI)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_loop_repeat_extend_encodeBlockAsm:
- LEAL -8(R9), R9
- LEAL 8(R12), R12
- CMPL R9, $0x08
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm
JZ repeat_extend_forward_end_encodeBlockAsm
matchlen_match4_repeat_extend_encodeBlockAsm:
- CMPL R9, $0x04
+ CMPL R8, $0x04
JL matchlen_match2_repeat_extend_encodeBlockAsm
- MOVL (R10)(R12*1), R11
- CMPL (SI)(R12*1), R11
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm
- SUBL $0x04, R9
- LEAL 4(R12), R12
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm:
- CMPL R9, $0x02
+ CMPL R8, $0x02
JL matchlen_match1_repeat_extend_encodeBlockAsm
- MOVW (R10)(R12*1), R11
- CMPW (SI)(R12*1), R11
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm
- SUBL $0x02, R9
- LEAL 2(R12), R12
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
matchlen_match1_repeat_extend_encodeBlockAsm:
- CMPL R9, $0x01
+ CMPL R8, $0x01
JL repeat_extend_forward_end_encodeBlockAsm
- MOVB (R10)(R12*1), R11
- CMPB (SI)(R12*1), R11
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeBlockAsm:
- ADDL R12, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
- TESTL R8, R8
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
JZ repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_repeat_encodeBlockAsm:
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_match_repeat_encodeBlockAsm
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_match_repeat_encodeBlockAsm
cant_repeat_two_offset_match_repeat_encodeBlockAsm:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_match_repeat_encodeBlockAsm
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_match_repeat_encodeBlockAsm
- CMPL SI, $0x0100ffff
+ CMPL BX, $0x0100ffff
JLT repeat_five_match_repeat_encodeBlockAsm
- LEAL -16842747(SI), SI
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_repeat_encodeBlockAsm
repeat_five_match_repeat_encodeBlockAsm:
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_match_repeat_encodeBlockAsm:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_match_repeat_encodeBlockAsm:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_match_repeat_encodeBlockAsm:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_match_repeat_encodeBlockAsm:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_as_copy_encodeBlockAsm:
// emitCopy
- CMPL DI, $0x00010000
+ CMPL SI, $0x00010000
JL two_byte_offset_repeat_as_copy_encodeBlockAsm
-
-four_bytes_loop_back_repeat_as_copy_encodeBlockAsm:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm
MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(SI), SI
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
ADDQ $0x05, AX
- CMPL SI, $0x04
+ CMPL BX, $0x04
JL four_bytes_remain_repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL SI, $0x0100ffff
+ CMPL BX, $0x0100ffff
JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
- LEAL -16842747(SI), SI
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
- JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm
four_bytes_remain_repeat_as_copy_encodeBlockAsm:
- TESTL SI, SI
+ TESTL BX, BX
JZ repeat_end_emit_encodeBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVL DI, 1(AX)
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm
two_byte_offset_repeat_as_copy_encodeBlockAsm:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm
- MOVL $0x00000001, R8
- LEAL 16(R8), R8
- MOVB DI, 1(AX)
- MOVL DI, R9
- SHRL $0x08, R9
- SHLL $0x05, R9
- ORL R9, R8
- MOVB R8, (AX)
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, SI
+ SUBL $0x08, BX
// emitRepeat
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x0100ffff
+ CMPL BX, $0x0100ffff
JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(SI), SI
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
long_offset_short_repeat_as_copy_encodeBlockAsm:
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x0100ffff
+ CMPL BX, $0x0100ffff
JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
- LEAL -16842747(SI), SI
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(BX), BX
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
+ ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
- JMP two_byte_offset_repeat_as_copy_encodeBlockAsm
two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm
emit_copy_three_repeat_as_copy_encodeBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeBlockAsm:
@@ -657,16 +649,16 @@ repeat_end_emit_encodeBlockAsm:
JMP search_loop_encodeBlockAsm
no_repeat_found_encodeBlockAsm:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBlockAsm
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeBlockAsm
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeBlockAsm
MOVL 20(SP), CX
JMP search_loop_encodeBlockAsm
@@ -676,549 +668,542 @@ candidate3_match_encodeBlockAsm:
JMP candidate_match_encodeBlockAsm
candidate2_match_encodeBlockAsm:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBlockAsm:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBlockAsm
match_extend_back_loop_encodeBlockAsm:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBlockAsm
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBlockAsm
JMP match_extend_back_loop_encodeBlockAsm
match_extend_back_end_encodeBlockAsm:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 5(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBlockAsm:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeBlockAsm
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeBlockAsm
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JLT three_bytes_match_emit_encodeBlockAsm
- CMPL R8, $0x01000000
+ CMPL DI, $0x01000000
JLT four_bytes_match_emit_encodeBlockAsm
MOVB $0xfc, (AX)
- MOVL R8, 1(AX)
+ MOVL DI, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_match_emit_encodeBlockAsm
four_bytes_match_emit_encodeBlockAsm:
- MOVL R8, R10
- SHRL $0x10, R10
+ MOVL DI, R9
+ SHRL $0x10, R9
MOVB $0xf8, (AX)
- MOVW R8, 1(AX)
- MOVB R10, 3(AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeBlockAsm
three_bytes_match_emit_encodeBlockAsm:
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBlockAsm
two_bytes_match_emit_encodeBlockAsm:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeBlockAsm
JMP memmove_long_match_emit_encodeBlockAsm
one_byte_match_emit_encodeBlockAsm:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBlockAsm:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBlockAsm:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeBlockAsm
memmove_long_match_emit_encodeBlockAsm:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeBlockAsm:
match_nolit_loop_encodeBlockAsm:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeBlockAsm
matchlen_loopback_match_nolit_encodeBlockAsm:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeBlockAsm
matchlen_loop_match_nolit_encodeBlockAsm:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm
JZ match_nolit_end_encodeBlockAsm
matchlen_match4_match_nolit_encodeBlockAsm:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeBlockAsm
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeBlockAsm
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeBlockAsm:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeBlockAsm
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeBlockAsm:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JL two_byte_offset_match_nolit_encodeBlockAsm
-
-four_bytes_loop_back_match_nolit_encodeBlockAsm:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE four_bytes_remain_match_nolit_encodeBlockAsm
MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(R10), R10
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
ADDQ $0x05, AX
- CMPL R10, $0x04
+ CMPL R9, $0x04
JL four_bytes_remain_match_nolit_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy
- CMPL R10, $0x0100ffff
+ CMPL R9, $0x0100ffff
JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy
- LEAL -16842747(R10), R10
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
repeat_five_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
- JMP four_bytes_loop_back_match_nolit_encodeBlockAsm
four_bytes_remain_match_nolit_encodeBlockAsm:
- TESTL R10, R10
+ TESTL R9, R9
JZ match_nolit_emitcopy_end_encodeBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVL SI, 1(AX)
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
two_byte_offset_match_nolit_encodeBlockAsm:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- MOVL SI, R8
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, DI
- MOVB DI, (AX)
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ MOVL BX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R10
+ SUBL $0x08, R9
// emitRepeat
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R10, $0x0100ffff
+ CMPL R9, $0x0100ffff
JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(R10), R10
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
long_offset_short_match_nolit_encodeBlockAsm:
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R10, $0x0100ffff
+ CMPL R9, $0x0100ffff
JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
- LEAL -16842747(R10), R10
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R9), R9
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
- JMP two_byte_offset_match_nolit_encodeBlockAsm
two_byte_offset_short_match_nolit_encodeBlockAsm:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm
emit_copy_three_match_nolit_encodeBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeBlockAsm:
CMPL CX, 8(SP)
JGE emit_remainder_encodeBlockAsm
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x10, R8
- IMULQ R9, R8
- SHRQ $0x32, R8
- SHLQ $0x10, SI
- IMULQ R9, SI
- SHRQ $0x32, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeBlockAsm
INCL CX
JMP search_loop_encodeBlockAsm
@@ -1422,8 +1407,8 @@ zero_loop_encodeBlockAsm4MB:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -1433,555 +1418,551 @@ zero_loop_encodeBlockAsm4MB:
MOVQ src_base+24(FP), DX
search_loop_encodeBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBlockAsm4MB
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x10, R11
- IMULQ R9, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
SHLQ $0x10, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x32, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
JNE no_repeat_found_encodeBlockAsm4MB
- LEAL 1(CX), DI
- MOVL 12(SP), R8
- MOVL DI, SI
- SUBL 16(SP), SI
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
JZ repeat_extend_back_end_encodeBlockAsm4MB
repeat_extend_back_loop_encodeBlockAsm4MB:
- CMPL DI, R8
+ CMPL SI, DI
JLE repeat_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(SI*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeBlockAsm4MB
- LEAL -1(DI), DI
- DECL SI
+ LEAL -1(SI), SI
+ DECL BX
JNZ repeat_extend_back_loop_encodeBlockAsm4MB
repeat_extend_back_end_encodeBlockAsm4MB:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeBlockAsm4MB
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeBlockAsm4MB
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_repeat_emit_encodeBlockAsm4MB
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
three_bytes_repeat_emit_encodeBlockAsm4MB:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
two_bytes_repeat_emit_encodeBlockAsm4MB:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeBlockAsm4MB
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
one_byte_repeat_emit_encodeBlockAsm4MB:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
memmove_long_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R12
- SHRQ $0x05, R12
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R13
- SUBQ R11, R13
- DECQ R12
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R10)(R13*1), R11
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R12
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R13*1), X4
- MOVOU -16(R10)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R9, R13
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeBlockAsm4MB:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R9
- SUBL CX, R9
- LEAQ (DX)(CX*1), R10
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R12, R12
- CMPL R9, $0x08
+ XORL R11, R11
+ CMPL R8, $0x08
JL matchlen_match4_repeat_extend_encodeBlockAsm4MB
matchlen_loopback_repeat_extend_encodeBlockAsm4MB:
- MOVQ (R10)(R12*1), R11
- XORQ (SI)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_loop_repeat_extend_encodeBlockAsm4MB:
- LEAL -8(R9), R9
- LEAL 8(R12), R12
- CMPL R9, $0x08
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB
JZ repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match4_repeat_extend_encodeBlockAsm4MB:
- CMPL R9, $0x04
+ CMPL R8, $0x04
JL matchlen_match2_repeat_extend_encodeBlockAsm4MB
- MOVL (R10)(R12*1), R11
- CMPL (SI)(R12*1), R11
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
- SUBL $0x04, R9
- LEAL 4(R12), R12
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm4MB:
- CMPL R9, $0x02
+ CMPL R8, $0x02
JL matchlen_match1_repeat_extend_encodeBlockAsm4MB
- MOVW (R10)(R12*1), R11
- CMPW (SI)(R12*1), R11
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
- SUBL $0x02, R9
- LEAL 2(R12), R12
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
matchlen_match1_repeat_extend_encodeBlockAsm4MB:
- CMPL R9, $0x01
+ CMPL R8, $0x01
JL repeat_extend_forward_end_encodeBlockAsm4MB
- MOVB (R10)(R12*1), R11
- CMPB (SI)(R12*1), R11
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm4MB
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeBlockAsm4MB:
- ADDL R12, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
- TESTL R8, R8
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
JZ repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_match_repeat_encodeBlockAsm4MB
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_match_repeat_encodeBlockAsm4MB
cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_match_repeat_encodeBlockAsm4MB
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_match_repeat_encodeBlockAsm4MB
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_match_repeat_encodeBlockAsm4MB:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_match_repeat_encodeBlockAsm4MB:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_match_repeat_encodeBlockAsm4MB:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_as_copy_encodeBlockAsm4MB:
// emitCopy
- CMPL DI, $0x00010000
+ CMPL SI, $0x00010000
JL two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
-
-four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(SI), SI
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
ADDQ $0x05, AX
- CMPL SI, $0x04
+ CMPL BX, $0x04
JL four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
- JMP four_bytes_loop_back_repeat_as_copy_encodeBlockAsm4MB
four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
- TESTL SI, SI
+ TESTL BX, BX
JZ repeat_end_emit_encodeBlockAsm4MB
- MOVB $0x03, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVL DI, 1(AX)
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm4MB
two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
- MOVL $0x00000001, R8
- LEAL 16(R8), R8
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R8
- MOVB R8, (AX)
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, SI
+ SUBL $0x08, BX
// emitRepeat
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x00010100
+ CMPL BX, $0x00010100
JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(SI), SI
- MOVL SI, DI
+ LEAL -65536(BX), BX
+ MOVL BX, SI
MOVW $0x001d, (AX)
- MOVW SI, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
+ MOVW BX, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
- JMP two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm4MB
emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeBlockAsm4MB:
@@ -1989,16 +1970,16 @@ repeat_end_emit_encodeBlockAsm4MB:
JMP search_loop_encodeBlockAsm4MB
no_repeat_found_encodeBlockAsm4MB:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBlockAsm4MB
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeBlockAsm4MB
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeBlockAsm4MB
MOVL 20(SP), CX
JMP search_loop_encodeBlockAsm4MB
@@ -2008,506 +1989,502 @@ candidate3_match_encodeBlockAsm4MB:
JMP candidate_match_encodeBlockAsm4MB
candidate2_match_encodeBlockAsm4MB:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBlockAsm4MB:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBlockAsm4MB
match_extend_back_loop_encodeBlockAsm4MB:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBlockAsm4MB
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBlockAsm4MB
JMP match_extend_back_loop_encodeBlockAsm4MB
match_extend_back_end_encodeBlockAsm4MB:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 4(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBlockAsm4MB
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBlockAsm4MB:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
- JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeBlockAsm4MB
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeBlockAsm4MB
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JLT three_bytes_match_emit_encodeBlockAsm4MB
- MOVL R8, R10
- SHRL $0x10, R10
+ MOVL DI, R9
+ SHRL $0x10, R9
MOVB $0xf8, (AX)
- MOVW R8, 1(AX)
- MOVB R10, 3(AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeBlockAsm4MB
three_bytes_match_emit_encodeBlockAsm4MB:
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBlockAsm4MB
two_bytes_match_emit_encodeBlockAsm4MB:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeBlockAsm4MB
JMP memmove_long_match_emit_encodeBlockAsm4MB
one_byte_match_emit_encodeBlockAsm4MB:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBlockAsm4MB:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeBlockAsm4MB
memmove_long_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeBlockAsm4MB:
match_nolit_loop_encodeBlockAsm4MB:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeBlockAsm4MB
matchlen_loopback_match_nolit_encodeBlockAsm4MB:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeBlockAsm4MB
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeBlockAsm4MB
matchlen_loop_match_nolit_encodeBlockAsm4MB:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB
JZ match_nolit_end_encodeBlockAsm4MB
matchlen_match4_match_nolit_encodeBlockAsm4MB:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeBlockAsm4MB
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm4MB:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeBlockAsm4MB
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeBlockAsm4MB:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeBlockAsm4MB
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm4MB
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeBlockAsm4MB:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JL two_byte_offset_match_nolit_encodeBlockAsm4MB
-
-four_bytes_loop_back_match_nolit_encodeBlockAsm4MB:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE four_bytes_remain_match_nolit_encodeBlockAsm4MB
MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(R10), R10
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
ADDQ $0x05, AX
- CMPL R10, $0x04
+ CMPL R9, $0x04
JL four_bytes_remain_match_nolit_encodeBlockAsm4MB
// emitRepeat
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
- JMP four_bytes_loop_back_match_nolit_encodeBlockAsm4MB
four_bytes_remain_match_nolit_encodeBlockAsm4MB:
- TESTL R10, R10
+ TESTL R9, R9
JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
- MOVB $0x03, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVL SI, 1(AX)
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
two_byte_offset_match_nolit_encodeBlockAsm4MB:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm4MB
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R10
+ SUBL $0x08, R9
// emitRepeat
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
long_offset_short_match_nolit_encodeBlockAsm4MB:
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
// emitRepeat
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL R10, $0x00010100
+ CMPL R9, $0x00010100
JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(R10), R10
- MOVL R10, SI
+ LEAL -65536(R9), R9
+ MOVL R9, BX
MOVW $0x001d, (AX)
- MOVW R10, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
+ MOVW R9, 2(AX)
+ SARL $0x10, BX
+ MOVB BL, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
- JMP two_byte_offset_match_nolit_encodeBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeBlockAsm4MB
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
emit_copy_three_match_nolit_encodeBlockAsm4MB:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeBlockAsm4MB:
CMPL CX, 8(SP)
JGE emit_remainder_encodeBlockAsm4MB
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeBlockAsm4MB
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeBlockAsm4MB:
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x10, R8
- IMULQ R9, R8
- SHRQ $0x32, R8
- SHLQ $0x10, SI
- IMULQ R9, SI
- SHRQ $0x32, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeBlockAsm4MB
INCL CX
JMP search_loop_encodeBlockAsm4MB
@@ -2703,8 +2680,8 @@ zero_loop_encodeBlockAsm12B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -2714,428 +2691,426 @@ zero_loop_encodeBlockAsm12B:
MOVQ src_base+24(FP), DX
search_loop_encodeBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBlockAsm12B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x18, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- SHLQ $0x18, R11
- IMULQ R9, R11
- SHRQ $0x34, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
SHLQ $0x18, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x34, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
JNE no_repeat_found_encodeBlockAsm12B
- LEAL 1(CX), DI
- MOVL 12(SP), R8
- MOVL DI, SI
- SUBL 16(SP), SI
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
JZ repeat_extend_back_end_encodeBlockAsm12B
repeat_extend_back_loop_encodeBlockAsm12B:
- CMPL DI, R8
+ CMPL SI, DI
JLE repeat_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(SI*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeBlockAsm12B
- LEAL -1(DI), DI
- DECL SI
+ LEAL -1(SI), SI
+ DECL BX
JNZ repeat_extend_back_loop_encodeBlockAsm12B
repeat_extend_back_end_encodeBlockAsm12B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeBlockAsm12B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeBlockAsm12B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeBlockAsm12B
two_bytes_repeat_emit_encodeBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeBlockAsm12B
JMP memmove_long_repeat_emit_encodeBlockAsm12B
one_byte_repeat_emit_encodeBlockAsm12B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_repeat_emit_encodeBlockAsm12B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
memmove_long_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R12
- SHRQ $0x05, R12
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R13
- SUBQ R11, R13
- DECQ R12
- JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R13*1), R11
- LEAQ -32(AX)(R13*1), R14
-
-emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R11
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
+ JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
+
+emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R12
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R13*1), X4
- MOVOU -16(R10)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R9, R13
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeBlockAsm12B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R9
- SUBL CX, R9
- LEAQ (DX)(CX*1), R10
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R12, R12
- CMPL R9, $0x08
+ XORL R11, R11
+ CMPL R8, $0x08
JL matchlen_match4_repeat_extend_encodeBlockAsm12B
matchlen_loopback_repeat_extend_encodeBlockAsm12B:
- MOVQ (R10)(R12*1), R11
- XORQ (SI)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_loop_repeat_extend_encodeBlockAsm12B:
- LEAL -8(R9), R9
- LEAL 8(R12), R12
- CMPL R9, $0x08
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B
JZ repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match4_repeat_extend_encodeBlockAsm12B:
- CMPL R9, $0x04
+ CMPL R8, $0x04
JL matchlen_match2_repeat_extend_encodeBlockAsm12B
- MOVL (R10)(R12*1), R11
- CMPL (SI)(R12*1), R11
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
- SUBL $0x04, R9
- LEAL 4(R12), R12
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm12B:
- CMPL R9, $0x02
+ CMPL R8, $0x02
JL matchlen_match1_repeat_extend_encodeBlockAsm12B
- MOVW (R10)(R12*1), R11
- CMPW (SI)(R12*1), R11
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
- SUBL $0x02, R9
- LEAL 2(R12), R12
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
matchlen_match1_repeat_extend_encodeBlockAsm12B:
- CMPL R9, $0x01
+ CMPL R8, $0x01
JL repeat_extend_forward_end_encodeBlockAsm12B
- MOVB (R10)(R12*1), R11
- CMPB (SI)(R12*1), R11
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm12B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeBlockAsm12B:
- ADDL R12, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
- TESTL R8, R8
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
JZ repeat_as_copy_encodeBlockAsm12B
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_match_repeat_encodeBlockAsm12B
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_match_repeat_encodeBlockAsm12B
cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_match_repeat_encodeBlockAsm12B
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_match_repeat_encodeBlockAsm12B:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_match_repeat_encodeBlockAsm12B:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_match_repeat_encodeBlockAsm12B:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_as_copy_encodeBlockAsm12B:
// emitCopy
-two_byte_offset_repeat_as_copy_encodeBlockAsm12B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
- MOVL $0x00000001, R8
- LEAL 16(R8), R8
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R8
- MOVB R8, (AX)
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, SI
+ SUBL $0x08, BX
// emitRepeat
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
long_offset_short_repeat_as_copy_encodeBlockAsm12B:
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
- JMP two_byte_offset_repeat_as_copy_encodeBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm12B
emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeBlockAsm12B:
@@ -3143,16 +3118,16 @@ repeat_end_emit_encodeBlockAsm12B:
JMP search_loop_encodeBlockAsm12B
no_repeat_found_encodeBlockAsm12B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBlockAsm12B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeBlockAsm12B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeBlockAsm12B
MOVL 20(SP), CX
JMP search_loop_encodeBlockAsm12B
@@ -3162,391 +3137,389 @@ candidate3_match_encodeBlockAsm12B:
JMP candidate_match_encodeBlockAsm12B
candidate2_match_encodeBlockAsm12B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBlockAsm12B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBlockAsm12B
match_extend_back_loop_encodeBlockAsm12B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBlockAsm12B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBlockAsm12B
JMP match_extend_back_loop_encodeBlockAsm12B
match_extend_back_end_encodeBlockAsm12B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBlockAsm12B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeBlockAsm12B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeBlockAsm12B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBlockAsm12B
two_bytes_match_emit_encodeBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeBlockAsm12B
JMP memmove_long_match_emit_encodeBlockAsm12B
one_byte_match_emit_encodeBlockAsm12B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBlockAsm12B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeBlockAsm12B
memmove_long_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeBlockAsm12B:
match_nolit_loop_encodeBlockAsm12B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeBlockAsm12B
matchlen_loopback_match_nolit_encodeBlockAsm12B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeBlockAsm12B
matchlen_loop_match_nolit_encodeBlockAsm12B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm12B
JZ match_nolit_end_encodeBlockAsm12B
matchlen_match4_match_nolit_encodeBlockAsm12B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeBlockAsm12B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm12B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm12B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeBlockAsm12B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm12B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeBlockAsm12B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeBlockAsm12B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm12B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeBlockAsm12B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBlockAsm12B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm12B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R10
+ SUBL $0x08, R9
// emitRepeat
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
long_offset_short_match_nolit_encodeBlockAsm12B:
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
// emitRepeat
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
- JMP two_byte_offset_match_nolit_encodeBlockAsm12B
two_byte_offset_short_match_nolit_encodeBlockAsm12B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
emit_copy_three_match_nolit_encodeBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeBlockAsm12B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeBlockAsm12B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x18, R8
- IMULQ R9, R8
- SHRQ $0x34, R8
- SHLQ $0x18, SI
- IMULQ R9, SI
- SHRQ $0x34, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeBlockAsm12B
INCL CX
JMP search_loop_encodeBlockAsm12B
@@ -3731,8 +3704,8 @@ zero_loop_encodeBlockAsm10B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -3742,428 +3715,426 @@ zero_loop_encodeBlockAsm10B:
MOVQ src_base+24(FP), DX
search_loop_encodeBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBlockAsm10B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x20, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R9, R11
- SHRQ $0x36, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
SHLQ $0x20, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x36, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
JNE no_repeat_found_encodeBlockAsm10B
- LEAL 1(CX), DI
- MOVL 12(SP), R8
- MOVL DI, SI
- SUBL 16(SP), SI
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
JZ repeat_extend_back_end_encodeBlockAsm10B
repeat_extend_back_loop_encodeBlockAsm10B:
- CMPL DI, R8
+ CMPL SI, DI
JLE repeat_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(SI*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeBlockAsm10B
- LEAL -1(DI), DI
- DECL SI
+ LEAL -1(SI), SI
+ DECL BX
JNZ repeat_extend_back_loop_encodeBlockAsm10B
repeat_extend_back_end_encodeBlockAsm10B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeBlockAsm10B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeBlockAsm10B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeBlockAsm10B
two_bytes_repeat_emit_encodeBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeBlockAsm10B
JMP memmove_long_repeat_emit_encodeBlockAsm10B
one_byte_repeat_emit_encodeBlockAsm10B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_repeat_emit_encodeBlockAsm10B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
memmove_long_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R12
- SHRQ $0x05, R12
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R13
- SUBQ R11, R13
- DECQ R12
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R13*1), R11
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R12
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R13*1), X4
- MOVOU -16(R10)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R9, R13
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeBlockAsm10B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R9
- SUBL CX, R9
- LEAQ (DX)(CX*1), R10
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R12, R12
- CMPL R9, $0x08
+ XORL R11, R11
+ CMPL R8, $0x08
JL matchlen_match4_repeat_extend_encodeBlockAsm10B
matchlen_loopback_repeat_extend_encodeBlockAsm10B:
- MOVQ (R10)(R12*1), R11
- XORQ (SI)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_loop_repeat_extend_encodeBlockAsm10B:
- LEAL -8(R9), R9
- LEAL 8(R12), R12
- CMPL R9, $0x08
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B
JZ repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match4_repeat_extend_encodeBlockAsm10B:
- CMPL R9, $0x04
+ CMPL R8, $0x04
JL matchlen_match2_repeat_extend_encodeBlockAsm10B
- MOVL (R10)(R12*1), R11
- CMPL (SI)(R12*1), R11
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
- SUBL $0x04, R9
- LEAL 4(R12), R12
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm10B:
- CMPL R9, $0x02
+ CMPL R8, $0x02
JL matchlen_match1_repeat_extend_encodeBlockAsm10B
- MOVW (R10)(R12*1), R11
- CMPW (SI)(R12*1), R11
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
- SUBL $0x02, R9
- LEAL 2(R12), R12
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
matchlen_match1_repeat_extend_encodeBlockAsm10B:
- CMPL R9, $0x01
+ CMPL R8, $0x01
JL repeat_extend_forward_end_encodeBlockAsm10B
- MOVB (R10)(R12*1), R11
- CMPB (SI)(R12*1), R11
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm10B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeBlockAsm10B:
- ADDL R12, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
- TESTL R8, R8
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
JZ repeat_as_copy_encodeBlockAsm10B
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_match_repeat_encodeBlockAsm10B
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_match_repeat_encodeBlockAsm10B
cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_match_repeat_encodeBlockAsm10B
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_match_repeat_encodeBlockAsm10B:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_match_repeat_encodeBlockAsm10B:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_match_repeat_encodeBlockAsm10B:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_as_copy_encodeBlockAsm10B:
// emitCopy
-two_byte_offset_repeat_as_copy_encodeBlockAsm10B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
- MOVL $0x00000001, R8
- LEAL 16(R8), R8
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R8
- MOVB R8, (AX)
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, SI
+ SUBL $0x08, BX
// emitRepeat
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
long_offset_short_repeat_as_copy_encodeBlockAsm10B:
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
// emitRepeat
- MOVL SI, R8
- LEAL -4(SI), SI
- CMPL R8, $0x08
+ MOVL BX, DI
+ LEAL -4(BX), BX
+ CMPL DI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL R8, $0x0c
+ CMPL DI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
- JMP two_byte_offset_repeat_as_copy_encodeBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm10B
emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeBlockAsm10B:
@@ -4171,16 +4142,16 @@ repeat_end_emit_encodeBlockAsm10B:
JMP search_loop_encodeBlockAsm10B
no_repeat_found_encodeBlockAsm10B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBlockAsm10B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeBlockAsm10B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeBlockAsm10B
MOVL 20(SP), CX
JMP search_loop_encodeBlockAsm10B
@@ -4190,391 +4161,389 @@ candidate3_match_encodeBlockAsm10B:
JMP candidate_match_encodeBlockAsm10B
candidate2_match_encodeBlockAsm10B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBlockAsm10B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBlockAsm10B
match_extend_back_loop_encodeBlockAsm10B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBlockAsm10B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBlockAsm10B
JMP match_extend_back_loop_encodeBlockAsm10B
match_extend_back_end_encodeBlockAsm10B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBlockAsm10B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeBlockAsm10B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeBlockAsm10B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBlockAsm10B
two_bytes_match_emit_encodeBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeBlockAsm10B
JMP memmove_long_match_emit_encodeBlockAsm10B
one_byte_match_emit_encodeBlockAsm10B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBlockAsm10B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeBlockAsm10B
memmove_long_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeBlockAsm10B:
match_nolit_loop_encodeBlockAsm10B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeBlockAsm10B
matchlen_loopback_match_nolit_encodeBlockAsm10B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeBlockAsm10B
matchlen_loop_match_nolit_encodeBlockAsm10B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm10B
JZ match_nolit_end_encodeBlockAsm10B
matchlen_match4_match_nolit_encodeBlockAsm10B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeBlockAsm10B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm10B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm10B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeBlockAsm10B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm10B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeBlockAsm10B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeBlockAsm10B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm10B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeBlockAsm10B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBlockAsm10B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm10B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R10
+ SUBL $0x08, R9
// emitRepeat
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
long_offset_short_match_nolit_encodeBlockAsm10B:
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
// emitRepeat
- MOVL R10, DI
- LEAL -4(R10), R10
- CMPL DI, $0x08
+ MOVL R9, SI
+ LEAL -4(R9), R9
+ CMPL SI, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
- JMP two_byte_offset_match_nolit_encodeBlockAsm10B
two_byte_offset_short_match_nolit_encodeBlockAsm10B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
emit_copy_three_match_nolit_encodeBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeBlockAsm10B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeBlockAsm10B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeBlockAsm10B:
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x20, R8
- IMULQ R9, R8
- SHRQ $0x36, R8
- SHLQ $0x20, SI
- IMULQ R9, SI
- SHRQ $0x36, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeBlockAsm10B
INCL CX
JMP search_loop_encodeBlockAsm10B
@@ -4759,8 +4728,8 @@ zero_loop_encodeBlockAsm8B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -4770,414 +4739,412 @@ zero_loop_encodeBlockAsm8B:
MOVQ src_base+24(FP), DX
search_loop_encodeBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x04, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBlockAsm8B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x20, R10
- IMULQ R9, R10
- SHRQ $0x38, R10
- SHLQ $0x20, R11
- IMULQ R9, R11
- SHRQ $0x38, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
SHLQ $0x20, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x38, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
JNE no_repeat_found_encodeBlockAsm8B
- LEAL 1(CX), DI
- MOVL 12(SP), R8
- MOVL DI, SI
- SUBL 16(SP), SI
+ LEAL 1(CX), SI
+ MOVL 12(SP), DI
+ MOVL SI, BX
+ SUBL 16(SP), BX
JZ repeat_extend_back_end_encodeBlockAsm8B
repeat_extend_back_loop_encodeBlockAsm8B:
- CMPL DI, R8
+ CMPL SI, DI
JLE repeat_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(SI*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(BX*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeBlockAsm8B
- LEAL -1(DI), DI
- DECL SI
+ LEAL -1(SI), SI
+ DECL BX
JNZ repeat_extend_back_loop_encodeBlockAsm8B
repeat_extend_back_end_encodeBlockAsm8B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeBlockAsm8B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeBlockAsm8B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeBlockAsm8B
two_bytes_repeat_emit_encodeBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeBlockAsm8B
JMP memmove_long_repeat_emit_encodeBlockAsm8B
one_byte_repeat_emit_encodeBlockAsm8B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_repeat_emit_encodeBlockAsm8B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
memmove_long_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R12
- SHRQ $0x05, R12
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R13
- SUBQ R11, R13
- DECQ R12
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R13*1), R11
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(AX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R12
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R13*1), X4
- MOVOU -16(R10)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R9, R13
- JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
+ JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
+ MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeBlockAsm8B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R9
- SUBL CX, R9
- LEAQ (DX)(CX*1), R10
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), R8
+ SUBL CX, R8
+ LEAQ (DX)(CX*1), R9
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R12, R12
- CMPL R9, $0x08
+ XORL R11, R11
+ CMPL R8, $0x08
JL matchlen_match4_repeat_extend_encodeBlockAsm8B
matchlen_loopback_repeat_extend_encodeBlockAsm8B:
- MOVQ (R10)(R12*1), R11
- XORQ (SI)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R9)(R11*1), R10
+ XORQ (BX)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_loop_repeat_extend_encodeBlockAsm8B:
- LEAL -8(R9), R9
- LEAL 8(R12), R12
- CMPL R9, $0x08
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
+ CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B
JZ repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match4_repeat_extend_encodeBlockAsm8B:
- CMPL R9, $0x04
+ CMPL R8, $0x04
JL matchlen_match2_repeat_extend_encodeBlockAsm8B
- MOVL (R10)(R12*1), R11
- CMPL (SI)(R12*1), R11
+ MOVL (R9)(R11*1), R10
+ CMPL (BX)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
- SUBL $0x04, R9
- LEAL 4(R12), R12
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeBlockAsm8B:
- CMPL R9, $0x02
+ CMPL R8, $0x02
JL matchlen_match1_repeat_extend_encodeBlockAsm8B
- MOVW (R10)(R12*1), R11
- CMPW (SI)(R12*1), R11
+ MOVW (R9)(R11*1), R10
+ CMPW (BX)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
- SUBL $0x02, R9
- LEAL 2(R12), R12
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
matchlen_match1_repeat_extend_encodeBlockAsm8B:
- CMPL R9, $0x01
+ CMPL R8, $0x01
JL repeat_extend_forward_end_encodeBlockAsm8B
- MOVB (R10)(R12*1), R11
- CMPB (SI)(R12*1), R11
+ MOVB (R9)(R11*1), R10
+ CMPB (BX)(R11*1), R10
JNE repeat_extend_forward_end_encodeBlockAsm8B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeBlockAsm8B:
- ADDL R12, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
- TESTL R8, R8
+ ADDL R11, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+ TESTL DI, DI
JZ repeat_as_copy_encodeBlockAsm8B
// emitRepeat
- MOVL SI, DI
- LEAL -4(SI), SI
- CMPL DI, $0x08
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
JLE repeat_two_match_repeat_encodeBlockAsm8B
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_match_repeat_encodeBlockAsm8B
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_match_repeat_encodeBlockAsm8B:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_match_repeat_encodeBlockAsm8B:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_as_copy_encodeBlockAsm8B:
// emitCopy
-two_byte_offset_repeat_as_copy_encodeBlockAsm8B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
- MOVL $0x00000001, R8
- LEAL 16(R8), R8
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R8
- MOVB R8, (AX)
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, SI
+ SUBL $0x08, BX
// emitRepeat
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- MOVL SI, DI
- LEAL -4(SI), SI
- CMPL DI, $0x08
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
long_offset_short_repeat_as_copy_encodeBlockAsm8B:
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
// emitRepeat
- MOVL SI, DI
- LEAL -4(SI), SI
- CMPL DI, $0x08
+ MOVL BX, SI
+ LEAL -4(BX), BX
+ CMPL SI, $0x08
JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL SI, $0x0c
JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- CMPL SI, $0x00000104
+ CMPL BX, $0x00000104
JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- LEAL -256(SI), SI
+ LEAL -256(BX), BX
MOVW $0x0019, (AX)
- MOVW SI, 2(AX)
+ MOVW BX, 2(AX)
ADDQ $0x04, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(SI), SI
+ LEAL -4(BX), BX
MOVW $0x0015, (AX)
- MOVB SI, 2(AX)
+ MOVB BL, 2(AX)
ADDQ $0x03, AX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, SI
- ORL $0x01, SI
- MOVW SI, (AX)
+ SHLL $0x02, BX
+ ORL $0x01, BX
+ MOVW BX, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ R8, R8
- LEAL 1(R8)(SI*4), SI
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ XORQ DI, DI
+ LEAL 1(DI)(BX*4), BX
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
- JMP two_byte_offset_repeat_as_copy_encodeBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeBlockAsm8B
emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeBlockAsm8B:
@@ -5185,16 +5152,16 @@ repeat_end_emit_encodeBlockAsm8B:
JMP search_loop_encodeBlockAsm8B
no_repeat_found_encodeBlockAsm8B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBlockAsm8B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeBlockAsm8B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeBlockAsm8B
MOVL 20(SP), CX
JMP search_loop_encodeBlockAsm8B
@@ -5204,381 +5171,379 @@ candidate3_match_encodeBlockAsm8B:
JMP candidate_match_encodeBlockAsm8B
candidate2_match_encodeBlockAsm8B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBlockAsm8B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBlockAsm8B
match_extend_back_loop_encodeBlockAsm8B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBlockAsm8B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBlockAsm8B
JMP match_extend_back_loop_encodeBlockAsm8B
match_extend_back_end_encodeBlockAsm8B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBlockAsm8B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeBlockAsm8B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeBlockAsm8B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeBlockAsm8B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBlockAsm8B
two_bytes_match_emit_encodeBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeBlockAsm8B
JMP memmove_long_match_emit_encodeBlockAsm8B
one_byte_match_emit_encodeBlockAsm8B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBlockAsm8B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeBlockAsm8B
memmove_long_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeBlockAsm8B:
match_nolit_loop_encodeBlockAsm8B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeBlockAsm8B
matchlen_loopback_match_nolit_encodeBlockAsm8B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeBlockAsm8B
matchlen_loop_match_nolit_encodeBlockAsm8B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm8B
JZ match_nolit_end_encodeBlockAsm8B
matchlen_match4_match_nolit_encodeBlockAsm8B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeBlockAsm8B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeBlockAsm8B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeBlockAsm8B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeBlockAsm8B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeBlockAsm8B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeBlockAsm8B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeBlockAsm8B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeBlockAsm8B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeBlockAsm8B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBlockAsm8B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm8B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm8B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R10
+ SUBL $0x08, R9
// emitRepeat
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- MOVL R10, SI
- LEAL -4(R10), R10
- CMPL SI, $0x08
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
long_offset_short_match_nolit_encodeBlockAsm8B:
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
// emitRepeat
- MOVL R10, SI
- LEAL -4(R10), R10
- CMPL SI, $0x08
+ MOVL R9, BX
+ LEAL -4(R9), R9
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
- CMPL R10, $0x00000104
+ CMPL R9, $0x00000104
JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
- LEAL -256(R10), R10
+ LEAL -256(R9), R9
MOVW $0x0019, (AX)
- MOVW R10, 2(AX)
+ MOVW R9, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(R10), R10
+ LEAL -4(R9), R9
MOVW $0x0015, (AX)
- MOVB R10, 2(AX)
+ MOVB R9, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, R10
- ORL $0x01, R10
- MOVW R10, (AX)
+ SHLL $0x02, R9
+ ORL $0x01, R9
+ MOVW R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(R10*4), R10
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ XORQ SI, SI
+ LEAL 1(SI)(R9*4), R9
+ MOVB BL, 1(AX)
+ SARL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, R9
+ MOVB R9, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- JMP two_byte_offset_match_nolit_encodeBlockAsm8B
two_byte_offset_short_match_nolit_encodeBlockAsm8B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
emit_copy_three_match_nolit_encodeBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeBlockAsm8B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeBlockAsm8B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeBlockAsm8B:
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x20, R8
- IMULQ R9, R8
- SHRQ $0x38, R8
- SHLQ $0x20, SI
- IMULQ R9, SI
- SHRQ $0x38, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeBlockAsm8B
INCL CX
JMP search_loop_encodeBlockAsm8B
@@ -5763,8 +5728,8 @@ zero_loop_encodeBetterBlockAsm:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -6(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -5774,818 +5739,810 @@ zero_loop_encodeBetterBlockAsm:
MOVQ src_base+24(FP), DX
search_loop_encodeBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x07, SI
- CMPL SI, $0x63
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
JLE check_maxskip_ok_encodeBetterBlockAsm
- LEAL 100(CX), SI
+ LEAL 100(CX), BX
JMP check_maxskip_cont_encodeBetterBlockAsm
check_maxskip_ok_encodeBetterBlockAsm:
- LEAL 1(CX)(SI*1), SI
+ LEAL 1(CX)(BX*1), BX
check_maxskip_cont_encodeBetterBlockAsm:
- CMPL SI, 8(SP)
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBetterBlockAsm
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 524312(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 524312(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeBetterBlockAsm
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeBetterBlockAsm
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeBetterBlockAsm
no_short_found_encodeBetterBlockAsm:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeBetterBlockAsm
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeBetterBlockAsm
MOVL 20(SP), CX
JMP search_loop_encodeBetterBlockAsm
candidateS_match_encodeBetterBlockAsm:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBetterBlockAsm
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBetterBlockAsm:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBetterBlockAsm
match_extend_back_loop_encodeBetterBlockAsm:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBetterBlockAsm
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBetterBlockAsm
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBetterBlockAsm
JMP match_extend_back_loop_encodeBetterBlockAsm
match_extend_back_end_encodeBetterBlockAsm:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 5(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBetterBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBetterBlockAsm:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeBetterBlockAsm
matchlen_loopback_match_nolit_encodeBetterBlockAsm:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeBetterBlockAsm
matchlen_loop_match_nolit_encodeBetterBlockAsm:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm
JZ match_nolit_end_encodeBetterBlockAsm
matchlen_match4_match_nolit_encodeBetterBlockAsm:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeBetterBlockAsm
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeBetterBlockAsm
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeBetterBlockAsm:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeBetterBlockAsm
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeBetterBlockAsm:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL 16(SP), R8
+ CMPL 16(SP), DI
JEQ match_is_repeat_encodeBetterBlockAsm
- CMPL R12, $0x01
+ CMPL R11, $0x01
JG match_length_ok_encodeBetterBlockAsm
- CMPL R8, $0x0000ffff
+ CMPL DI, $0x0000ffff
JLE match_length_ok_encodeBetterBlockAsm
MOVL 20(SP), CX
INCL CX
JMP search_loop_encodeBetterBlockAsm
match_length_ok_encodeBetterBlockAsm:
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeBetterBlockAsm
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeBetterBlockAsm
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_match_emit_encodeBetterBlockAsm
- CMPL SI, $0x01000000
+ CMPL BX, $0x01000000
JLT four_bytes_match_emit_encodeBetterBlockAsm
MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm
four_bytes_match_emit_encodeBetterBlockAsm:
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm
three_bytes_match_emit_encodeBetterBlockAsm:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm
two_bytes_match_emit_encodeBetterBlockAsm:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeBetterBlockAsm
JMP memmove_long_match_emit_encodeBetterBlockAsm
one_byte_match_emit_encodeBetterBlockAsm:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm
memmove_long_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeBetterBlockAsm:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JL two_byte_offset_match_nolit_encodeBetterBlockAsm
-
-four_bytes_loop_back_match_nolit_encodeBetterBlockAsm:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm
MOVB $0xff, (AX)
- MOVL R8, 1(AX)
- LEAL -64(R12), R12
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
ADDQ $0x05, AX
- CMPL R12, $0x04
+ CMPL R11, $0x04
JL four_bytes_remain_match_nolit_encodeBetterBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R12, $0x0100ffff
+ CMPL R11, $0x0100ffff
JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
- LEAL -16842747(R12), R12
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
- JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm
four_bytes_remain_match_nolit_encodeBetterBlockAsm:
- TESTL R12, R12
+ TESTL R11, R11
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVL R8, 1(AX)
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
two_byte_offset_match_nolit_encodeBetterBlockAsm:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB R8, 1(AX)
- MOVL R8, R9
- SHRL $0x08, R9
- SHLL $0x05, R9
- ORL R9, SI
- MOVB SI, (AX)
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ MOVL DI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R12
+ SUBL $0x08, R11
// emitRepeat
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R12, $0x0100ffff
+ CMPL R11, $0x0100ffff
JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- LEAL -16842747(R12), R12
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
long_offset_short_match_nolit_encodeBetterBlockAsm:
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R12, $0x0100ffff
+ CMPL R11, $0x0100ffff
JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
- LEAL -16842747(R12), R12
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
- JMP two_byte_offset_match_nolit_encodeBetterBlockAsm
two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
emit_copy_three_match_nolit_encodeBetterBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
match_is_repeat_encodeBetterBlockAsm:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_repeat_encodeBetterBlockAsm
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL SI, $0x01000000
+ CMPL BX, $0x01000000
JLT four_bytes_match_emit_repeat_encodeBetterBlockAsm
MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
four_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
three_bytes_match_emit_repeat_encodeBetterBlockAsm:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
two_bytes_match_emit_repeat_encodeBetterBlockAsm:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_repeat_encodeBetterBlockAsm
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
one_byte_match_emit_repeat_encodeBetterBlockAsm:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
memmove_long_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitRepeat
emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R12, $0x0100ffff
+ CMPL R11, $0x0100ffff
JLT repeat_five_match_nolit_repeat_encodeBetterBlockAsm
- LEAL -16842747(R12), R12
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ LEAL -16842747(R11), R11
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
match_nolit_emitcopy_end_encodeBetterBlockAsm:
@@ -6597,50 +6554,50 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm:
RET
match_nolit_dst_ok_encodeBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R12
- IMULQ SI, R12
- SHRQ $0x2f, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x32, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 524312(SP)(R11*4)
- MOVL R14, 524312(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeBetterBlockAsm
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x08, R8
- IMULQ SI, R8
- SHRQ $0x2f, R8
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x08, DI
+ IMULQ BX, DI
+ SHRQ $0x2f, DI
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm
emit_remainder_encodeBetterBlockAsm:
@@ -6842,8 +6799,8 @@ zero_loop_encodeBetterBlockAsm4MB:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -6(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -6853,756 +6810,752 @@ zero_loop_encodeBetterBlockAsm4MB:
MOVQ src_base+24(FP), DX
search_loop_encodeBetterBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x07, SI
- CMPL SI, $0x63
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
JLE check_maxskip_ok_encodeBetterBlockAsm4MB
- LEAL 100(CX), SI
+ LEAL 100(CX), BX
JMP check_maxskip_cont_encodeBetterBlockAsm4MB
check_maxskip_ok_encodeBetterBlockAsm4MB:
- LEAL 1(CX)(SI*1), SI
+ LEAL 1(CX)(BX*1), BX
check_maxskip_cont_encodeBetterBlockAsm4MB:
- CMPL SI, 8(SP)
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBetterBlockAsm4MB
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 524312(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 524312(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeBetterBlockAsm4MB
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeBetterBlockAsm4MB
no_short_found_encodeBetterBlockAsm4MB:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeBetterBlockAsm4MB
MOVL 20(SP), CX
JMP search_loop_encodeBetterBlockAsm4MB
candidateS_match_encodeBetterBlockAsm4MB:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBetterBlockAsm4MB
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBetterBlockAsm4MB:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBetterBlockAsm4MB
match_extend_back_loop_encodeBetterBlockAsm4MB:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBetterBlockAsm4MB
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBetterBlockAsm4MB
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBetterBlockAsm4MB
JMP match_extend_back_loop_encodeBetterBlockAsm4MB
match_extend_back_end_encodeBetterBlockAsm4MB:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 4(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 4(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBetterBlockAsm4MB
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBetterBlockAsm4MB:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_loop_match_nolit_encodeBetterBlockAsm4MB:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB
JZ match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeBetterBlockAsm4MB
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm4MB
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeBetterBlockAsm4MB:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL 16(SP), R8
+ CMPL 16(SP), DI
JEQ match_is_repeat_encodeBetterBlockAsm4MB
- CMPL R12, $0x01
+ CMPL R11, $0x01
JG match_length_ok_encodeBetterBlockAsm4MB
- CMPL R8, $0x0000ffff
+ CMPL DI, $0x0000ffff
JLE match_length_ok_encodeBetterBlockAsm4MB
MOVL 20(SP), CX
INCL CX
JMP search_loop_encodeBetterBlockAsm4MB
match_length_ok_encodeBetterBlockAsm4MB:
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeBetterBlockAsm4MB
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeBetterBlockAsm4MB
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_match_emit_encodeBetterBlockAsm4MB
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
three_bytes_match_emit_encodeBetterBlockAsm4MB:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
two_bytes_match_emit_encodeBetterBlockAsm4MB:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
one_byte_match_emit_encodeBetterBlockAsm4MB:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
memmove_long_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JL two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
-
-four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
MOVB $0xff, (AX)
- MOVL R8, 1(AX)
- LEAL -64(R12), R12
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
ADDQ $0x05, AX
- CMPL R12, $0x04
+ CMPL R11, $0x04
JL four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
- JMP four_bytes_loop_back_match_nolit_encodeBetterBlockAsm4MB
four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
- TESTL R12, R12
+ TESTL R11, R11
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
- MOVB $0x03, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVL R8, 1(AX)
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, SI
- MOVB SI, (AX)
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R12
+ SUBL $0x08, R11
// emitRepeat
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
- JMP two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
match_is_repeat_encodeBetterBlockAsm4MB:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_repeat_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL R12, $0x00010100
+ CMPL R11, $0x00010100
JLT repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
- LEAL -65536(R12), R12
- MOVL R12, R8
+ LEAL -65536(R11), R11
+ MOVL R11, DI
MOVW $0x001d, (AX)
- MOVW R12, 2(AX)
- SARL $0x10, R8
- MOVB R8, 4(AX)
+ MOVW R11, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
@@ -7614,50 +7567,50 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
RET
match_nolit_dst_ok_encodeBetterBlockAsm4MB:
- MOVQ $0x00cf1bbcdcbfa563, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R12
- IMULQ SI, R12
- SHRQ $0x2f, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x32, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 524312(SP)(R11*4)
- MOVL R14, 524312(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm4MB:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeBetterBlockAsm4MB
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x08, R8
- IMULQ SI, R8
- SHRQ $0x2f, R8
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x08, DI
+ IMULQ BX, DI
+ SHRQ $0x2f, DI
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm4MB
emit_remainder_encodeBetterBlockAsm4MB:
@@ -7851,8 +7804,8 @@ zero_loop_encodeBetterBlockAsm12B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -6(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -7862,601 +7815,599 @@ zero_loop_encodeBetterBlockAsm12B:
MOVQ src_base+24(FP), DX
search_loop_encodeBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBetterBlockAsm12B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x34, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 65560(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 65560(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeBetterBlockAsm12B
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeBetterBlockAsm12B
no_short_found_encodeBetterBlockAsm12B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeBetterBlockAsm12B
MOVL 20(SP), CX
JMP search_loop_encodeBetterBlockAsm12B
candidateS_match_encodeBetterBlockAsm12B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBetterBlockAsm12B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBetterBlockAsm12B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBetterBlockAsm12B
match_extend_back_loop_encodeBetterBlockAsm12B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBetterBlockAsm12B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBetterBlockAsm12B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBetterBlockAsm12B
JMP match_extend_back_loop_encodeBetterBlockAsm12B
match_extend_back_end_encodeBetterBlockAsm12B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBetterBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBetterBlockAsm12B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeBetterBlockAsm12B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_loop_match_nolit_encodeBetterBlockAsm12B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B
JZ match_nolit_end_encodeBetterBlockAsm12B
matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeBetterBlockAsm12B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeBetterBlockAsm12B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm12B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeBetterBlockAsm12B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL 16(SP), R8
+ CMPL 16(SP), DI
JEQ match_is_repeat_encodeBetterBlockAsm12B
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeBetterBlockAsm12B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeBetterBlockAsm12B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
two_bytes_match_emit_encodeBetterBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
one_byte_match_emit_encodeBetterBlockAsm12B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
memmove_long_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeBetterBlockAsm12B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBetterBlockAsm12B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, SI
- MOVB SI, (AX)
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R12
+ SUBL $0x08, R11
// emitRepeat
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
long_offset_short_match_nolit_encodeBetterBlockAsm12B:
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
- JMP two_byte_offset_match_nolit_encodeBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
match_is_repeat_encodeBetterBlockAsm12B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_repeat_encodeBetterBlockAsm12B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_repeat_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
@@ -8468,50 +8419,50 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
RET
match_nolit_dst_ok_encodeBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x34, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x32, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x34, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 65560(SP)(R11*4)
- MOVL R14, 65560(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm12B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeBetterBlockAsm12B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x32, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x32, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm12B
emit_remainder_encodeBetterBlockAsm12B:
@@ -8694,8 +8645,8 @@ zero_loop_encodeBetterBlockAsm10B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -6(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -8705,601 +8656,599 @@ zero_loop_encodeBetterBlockAsm10B:
MOVQ src_base+24(FP), DX
search_loop_encodeBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBetterBlockAsm10B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x36, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 16408(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 16408(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeBetterBlockAsm10B
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeBetterBlockAsm10B
no_short_found_encodeBetterBlockAsm10B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeBetterBlockAsm10B
MOVL 20(SP), CX
JMP search_loop_encodeBetterBlockAsm10B
candidateS_match_encodeBetterBlockAsm10B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBetterBlockAsm10B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBetterBlockAsm10B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBetterBlockAsm10B
match_extend_back_loop_encodeBetterBlockAsm10B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBetterBlockAsm10B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBetterBlockAsm10B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBetterBlockAsm10B
JMP match_extend_back_loop_encodeBetterBlockAsm10B
match_extend_back_end_encodeBetterBlockAsm10B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBetterBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBetterBlockAsm10B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeBetterBlockAsm10B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_loop_match_nolit_encodeBetterBlockAsm10B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B
JZ match_nolit_end_encodeBetterBlockAsm10B
matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeBetterBlockAsm10B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeBetterBlockAsm10B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm10B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeBetterBlockAsm10B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL 16(SP), R8
+ CMPL 16(SP), DI
JEQ match_is_repeat_encodeBetterBlockAsm10B
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeBetterBlockAsm10B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeBetterBlockAsm10B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
two_bytes_match_emit_encodeBetterBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
one_byte_match_emit_encodeBetterBlockAsm10B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
memmove_long_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeBetterBlockAsm10B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBetterBlockAsm10B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, SI
- MOVB SI, (AX)
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R12
+ SUBL $0x08, R11
// emitRepeat
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
long_offset_short_match_nolit_encodeBetterBlockAsm10B:
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
- JMP two_byte_offset_match_nolit_encodeBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
match_is_repeat_encodeBetterBlockAsm10B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_repeat_encodeBetterBlockAsm10B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_repeat_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JLT repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
@@ -9311,50 +9260,50 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
RET
match_nolit_dst_ok_encodeBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x36, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x34, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x36, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 16408(SP)(R11*4)
- MOVL R14, 16408(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm10B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeBetterBlockAsm10B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x34, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x34, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x34, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm10B
emit_remainder_encodeBetterBlockAsm10B:
@@ -9537,8 +9486,8 @@ zero_loop_encodeBetterBlockAsm8B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -6(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -9548,587 +9497,585 @@ zero_loop_encodeBetterBlockAsm8B:
MOVQ src_base+24(FP), DX
search_loop_encodeBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x04, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeBetterBlockAsm8B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x38, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 4120(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 4120(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeBetterBlockAsm8B
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeBetterBlockAsm8B
no_short_found_encodeBetterBlockAsm8B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeBetterBlockAsm8B
MOVL 20(SP), CX
JMP search_loop_encodeBetterBlockAsm8B
candidateS_match_encodeBetterBlockAsm8B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeBetterBlockAsm8B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeBetterBlockAsm8B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeBetterBlockAsm8B
match_extend_back_loop_encodeBetterBlockAsm8B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeBetterBlockAsm8B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeBetterBlockAsm8B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeBetterBlockAsm8B
JMP match_extend_back_loop_encodeBetterBlockAsm8B
match_extend_back_end_encodeBetterBlockAsm8B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeBetterBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeBetterBlockAsm8B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeBetterBlockAsm8B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_loop_match_nolit_encodeBetterBlockAsm8B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B
JZ match_nolit_end_encodeBetterBlockAsm8B
matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeBetterBlockAsm8B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeBetterBlockAsm8B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeBetterBlockAsm8B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeBetterBlockAsm8B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL 16(SP), R8
+ CMPL 16(SP), DI
JEQ match_is_repeat_encodeBetterBlockAsm8B
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeBetterBlockAsm8B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeBetterBlockAsm8B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
two_bytes_match_emit_encodeBetterBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
one_byte_match_emit_encodeBetterBlockAsm8B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x04
+ CMPQ R8, $0x04
JLE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R10), R11
- MOVL R11, (AX)
+ MOVL (R9), R10
+ MOVL R10, (AX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R10), R11
- MOVL -4(R10)(R9*1), R10
- MOVL R11, (AX)
- MOVL R10, -4(AX)(R9*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (AX)
+ MOVL R9, -4(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
memmove_long_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeBetterBlockAsm8B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
-two_byte_offset_match_nolit_encodeBetterBlockAsm8B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, SI
- MOVB SI, (AX)
+ MOVL $0x00000001, BX
+ LEAL 16(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
- SUBL $0x08, R12
+ SUBL $0x08, R11
// emitRepeat
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
long_offset_short_match_nolit_encodeBetterBlockAsm8B:
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- JMP two_byte_offset_match_nolit_encodeBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
match_is_repeat_encodeBetterBlockAsm8B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_repeat_encodeBetterBlockAsm8B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_repeat_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ DI, $0x04
JLE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R8), R9
+ MOVL R9, (AX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R8), R9
+ MOVL -4(R8)(DI*1), R8
+ MOVL R9, (AX)
+ MOVL R8, -4(AX)(DI*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R12
+ SUBQ R9, R12
+ DECQ R10
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R8)(R12*1), R9
+ LEAQ -32(AX)(R12*1), R13
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
- ADDQ $0x20, R14
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
ADDQ $0x20, R13
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R12
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R8)(R12*1), X4
+ MOVOU -16(R8)(R12*1), X5
+ MOVOA X4, -32(AX)(R12*1)
+ MOVOA X5, -16(AX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ DI, R12
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitRepeat
- MOVL R12, SI
- LEAL -4(R12), R12
- CMPL SI, $0x08
+ MOVL R11, BX
+ LEAL -4(R11), R11
+ CMPL BX, $0x08
JLE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
- CMPL SI, $0x0c
+ CMPL BX, $0x0c
JGE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
- CMPL R12, $0x00000104
+ CMPL R11, $0x00000104
JLT repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
- LEAL -256(R12), R12
+ LEAL -256(R11), R11
MOVW $0x0019, (AX)
- MOVW R12, 2(AX)
+ MOVW R11, 2(AX)
ADDQ $0x04, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
- LEAL -4(R12), R12
+ LEAL -4(R11), R11
MOVW $0x0015, (AX)
- MOVB R12, 2(AX)
+ MOVB R11, 2(AX)
ADDQ $0x03, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
- SHLL $0x02, R12
- ORL $0x01, R12
- MOVW R12, (AX)
+ SHLL $0x02, R11
+ ORL $0x01, R11
+ MOVW R11, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R12*4), R12
- MOVB R8, 1(AX)
- SARL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ XORQ BX, BX
+ LEAL 1(BX)(R11*4), R11
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R11
+ MOVB R11, (AX)
ADDQ $0x02, AX
match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
@@ -10140,50 +10087,50 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
RET
match_nolit_dst_ok_encodeBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x38, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x36, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x38, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 4120(SP)(R11*4)
- MOVL R14, 4120(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeBetterBlockAsm8B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeBetterBlockAsm8B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x36, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x36, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x36, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm8B
emit_remainder_encodeBetterBlockAsm8B:
@@ -10366,8 +10313,8 @@ zero_loop_encodeSnappyBlockAsm:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -10377,321 +10324,321 @@ zero_loop_encodeSnappyBlockAsm:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x10, R11
- IMULQ R9, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
SHLQ $0x10, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x32, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
- JNE no_repeat_found_encodeSnappyBlockAsm
- LEAL 1(CX), DI
- MOVL 12(SP), SI
- MOVL DI, R8
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
JZ repeat_extend_back_end_encodeSnappyBlockAsm
repeat_extend_back_loop_encodeSnappyBlockAsm:
- CMPL DI, SI
+ CMPL SI, BX
JLE repeat_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(R8*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeSnappyBlockAsm
- LEAL -1(DI), DI
- DECL R8
+ LEAL -1(SI), SI
+ DECL DI
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
repeat_extend_back_end_encodeSnappyBlockAsm:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeSnappyBlockAsm
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL SI, $0x01000000
+ CMPL BX, $0x01000000
JLT four_bytes_repeat_emit_encodeSnappyBlockAsm
MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
four_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVL SI, R10
- SHRL $0x10, R10
+ MOVL BX, R9
+ SHRL $0x10, R9
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R10, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R9, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
three_bytes_repeat_emit_encodeSnappyBlockAsm:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
two_bytes_repeat_emit_encodeSnappyBlockAsm:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeSnappyBlockAsm
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
one_byte_repeat_emit_encodeSnappyBlockAsm:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
memmove_long_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R11, R11
- CMPL R8, $0x08
+ XORL R10, R10
+ CMPL DI, $0x08
JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm:
- MOVQ (R9)(R11*1), R10
- XORQ (SI)(R11*1), R10
- TESTQ R10, R10
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R9, R9
#else
- BSFQ R10, R10
+ BSFQ R9, R9
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_loop_repeat_extend_encodeSnappyBlockAsm:
- LEAL -8(R8), R8
- LEAL 8(R11), R11
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm
JZ repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- MOVL (R9)(R11*1), R10
- CMPL (SI)(R11*1), R10
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- SUBL $0x04, R8
- LEAL 4(R11), R11
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm
- MOVW (R9)(R11*1), R10
- CMPW (SI)(R11*1), R10
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
- SUBL $0x02, R8
- LEAL 2(R11), R11
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL repeat_extend_forward_end_encodeSnappyBlockAsm
- MOVB (R9)(R11*1), R10
- CMPB (SI)(R11*1), R10
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R10), R10
repeat_extend_forward_end_encodeSnappyBlockAsm:
- ADDL R11, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
// emitCopy
- CMPL DI, $0x00010000
+ CMPL SI, $0x00010000
JL two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(SI), SI
+ MOVL SI, 1(AX)
+ LEAL -64(BX), BX
ADDQ $0x05, AX
- CMPL SI, $0x04
+ CMPL BX, $0x04
JL four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
- TESTL SI, SI
+ TESTL BX, BX
JZ repeat_end_emit_encodeSnappyBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVL DI, 1(AX)
+ XORL DI, DI
+ LEAL -1(DI)(BX*4), BX
+ MOVB BL, (AX)
+ MOVL SI, 1(AX)
ADDQ $0x05, AX
JMP repeat_end_emit_encodeSnappyBlockAsm
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeSnappyBlockAsm
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeSnappyBlockAsm:
@@ -10699,16 +10646,16 @@ repeat_end_emit_encodeSnappyBlockAsm:
JMP search_loop_encodeSnappyBlockAsm
no_repeat_found_encodeSnappyBlockAsm:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBlockAsm
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeSnappyBlockAsm
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeSnappyBlockAsm
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBlockAsm
@@ -10718,331 +10665,331 @@ candidate3_match_encodeSnappyBlockAsm:
JMP candidate_match_encodeSnappyBlockAsm
candidate2_match_encodeSnappyBlockAsm:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBlockAsm:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBlockAsm
match_extend_back_loop_encodeSnappyBlockAsm:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBlockAsm
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBlockAsm
JMP match_extend_back_loop_encodeSnappyBlockAsm
match_extend_back_end_encodeSnappyBlockAsm:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 5(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeSnappyBlockAsm
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBlockAsm
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JLT three_bytes_match_emit_encodeSnappyBlockAsm
- CMPL R8, $0x01000000
+ CMPL DI, $0x01000000
JLT four_bytes_match_emit_encodeSnappyBlockAsm
MOVB $0xfc, (AX)
- MOVL R8, 1(AX)
+ MOVL DI, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
four_bytes_match_emit_encodeSnappyBlockAsm:
- MOVL R8, R10
- SHRL $0x10, R10
+ MOVL DI, R9
+ SHRL $0x10, R9
MOVB $0xf8, (AX)
- MOVW R8, 1(AX)
- MOVB R10, 3(AX)
+ MOVW DI, 1(AX)
+ MOVB R9, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
three_bytes_match_emit_encodeSnappyBlockAsm:
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
two_bytes_match_emit_encodeSnappyBlockAsm:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeSnappyBlockAsm
JMP memmove_long_match_emit_encodeSnappyBlockAsm
one_byte_match_emit_encodeSnappyBlockAsm:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
memmove_long_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeSnappyBlockAsm:
match_nolit_loop_encodeSnappyBlockAsm:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBlockAsm:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_loop_match_nolit_encodeSnappyBlockAsm:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm
JZ match_nolit_end_encodeSnappyBlockAsm
matchlen_match4_match_nolit_encodeSnappyBlockAsm:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBlockAsm
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBlockAsm
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeSnappyBlockAsm:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeSnappyBlockAsm
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeSnappyBlockAsm:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JL two_byte_offset_match_nolit_encodeSnappyBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(R10), R10
+ MOVL BX, 1(AX)
+ LEAL -64(R9), R9
ADDQ $0x05, AX
- CMPL R10, $0x04
+ CMPL R9, $0x04
JL four_bytes_remain_match_nolit_encodeSnappyBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
- TESTL R10, R10
+ TESTL R9, R9
JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVL SI, 1(AX)
+ XORL SI, SI
+ LEAL -1(SI)(R9*4), R9
+ MOVB R9, (AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
two_byte_offset_match_nolit_encodeSnappyBlockAsm:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
emit_copy_three_match_nolit_encodeSnappyBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBlockAsm:
CMPL CX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeSnappyBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x10, R8
- IMULQ R9, R8
- SHRQ $0x32, R8
- SHLQ $0x10, SI
- IMULQ R9, SI
- SHRQ $0x32, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeSnappyBlockAsm
INCL CX
JMP search_loop_encodeSnappyBlockAsm
@@ -11246,8 +11193,8 @@ zero_loop_encodeSnappyBlockAsm64K:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -11257,278 +11204,278 @@ zero_loop_encodeSnappyBlockAsm64K:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x10, R11
- IMULQ R9, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
SHLQ $0x10, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x32, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
- JNE no_repeat_found_encodeSnappyBlockAsm64K
- LEAL 1(CX), DI
- MOVL 12(SP), SI
- MOVL DI, R8
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL CX, R8
SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm64K
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
repeat_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL DI, SI
+ CMPL SI, BX
JLE repeat_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(R8*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
- LEAL -1(DI), DI
- DECL R8
+ LEAL -1(SI), SI
+ DECL DI
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
repeat_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeSnappyBlockAsm64K
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeSnappyBlockAsm64K
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeSnappyBlockAsm64K
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
one_byte_repeat_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R11, R11
- CMPL R8, $0x08
+ XORL R10, R10
+ CMPL DI, $0x08
JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K:
- MOVQ (R9)(R11*1), R10
- XORQ (SI)(R11*1), R10
- TESTQ R10, R10
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R9, R9
#else
- BSFQ R10, R10
+ BSFQ R9, R9
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K:
- LEAL -8(R8), R8
- LEAL 8(R11), R11
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K
JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- MOVL (R9)(R11*1), R10
- CMPL (SI)(R11*1), R10
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- SUBL $0x04, R8
- LEAL 4(R11), R11
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
- MOVW (R9)(R11*1), R10
- CMPW (SI)(R11*1), R10
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
- SUBL $0x02, R8
- LEAL 2(R11), R11
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL repeat_extend_forward_end_encodeSnappyBlockAsm64K
- MOVB (R9)(R11*1), R10
- CMPB (SI)(R11*1), R10
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
- LEAL 1(R11), R11
+ LEAL 1(R10), R10
repeat_extend_forward_end_encodeSnappyBlockAsm64K:
- ADDL R11, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeSnappyBlockAsm64K
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeSnappyBlockAsm64K:
@@ -11536,16 +11483,16 @@ repeat_end_emit_encodeSnappyBlockAsm64K:
JMP search_loop_encodeSnappyBlockAsm64K
no_repeat_found_encodeSnappyBlockAsm64K:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBlockAsm64K
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeSnappyBlockAsm64K
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeSnappyBlockAsm64K
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBlockAsm64K
@@ -11555,288 +11502,288 @@ candidate3_match_encodeSnappyBlockAsm64K:
JMP candidate_match_encodeSnappyBlockAsm64K
candidate2_match_encodeSnappyBlockAsm64K:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBlockAsm64K:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBlockAsm64K
match_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBlockAsm64K
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBlockAsm64K
match_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBlockAsm64K
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm64K:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeSnappyBlockAsm64K
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBlockAsm64K
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
two_bytes_match_emit_encodeSnappyBlockAsm64K:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeSnappyBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
one_byte_match_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
memmove_long_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
match_nolit_loop_encodeSnappyBlockAsm64K:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBlockAsm64K:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K
JZ match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeSnappyBlockAsm64K
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm64K
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeSnappyBlockAsm64K:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
CMPL CX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeSnappyBlockAsm64K
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm64K:
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x10, R8
- IMULQ R9, R8
- SHRQ $0x32, R8
- SHLQ $0x10, SI
- IMULQ R9, SI
- SHRQ $0x32, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x32, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeSnappyBlockAsm64K
INCL CX
JMP search_loop_encodeSnappyBlockAsm64K
@@ -12021,8 +11968,8 @@ zero_loop_encodeSnappyBlockAsm12B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -12032,278 +11979,278 @@ zero_loop_encodeSnappyBlockAsm12B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x18, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- SHLQ $0x18, R11
- IMULQ R9, R11
- SHRQ $0x34, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
SHLQ $0x18, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x34, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
- JNE no_repeat_found_encodeSnappyBlockAsm12B
- LEAL 1(CX), DI
- MOVL 12(SP), SI
- MOVL DI, R8
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x18, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL CX, R8
SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm12B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
repeat_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL DI, SI
+ CMPL SI, BX
JLE repeat_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(R8*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
- LEAL -1(DI), DI
- DECL R8
+ LEAL -1(SI), SI
+ DECL DI
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
repeat_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeSnappyBlockAsm12B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeSnappyBlockAsm12B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeSnappyBlockAsm12B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
one_byte_repeat_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R11, R11
- CMPL R8, $0x08
+ XORL R10, R10
+ CMPL DI, $0x08
JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B:
- MOVQ (R9)(R11*1), R10
- XORQ (SI)(R11*1), R10
- TESTQ R10, R10
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R9, R9
#else
- BSFQ R10, R10
+ BSFQ R9, R9
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B:
- LEAL -8(R8), R8
- LEAL 8(R11), R11
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B
JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- MOVL (R9)(R11*1), R10
- CMPL (SI)(R11*1), R10
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- SUBL $0x04, R8
- LEAL 4(R11), R11
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
- MOVW (R9)(R11*1), R10
- CMPW (SI)(R11*1), R10
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
- SUBL $0x02, R8
- LEAL 2(R11), R11
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL repeat_extend_forward_end_encodeSnappyBlockAsm12B
- MOVB (R9)(R11*1), R10
- CMPB (SI)(R11*1), R10
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R10), R10
repeat_extend_forward_end_encodeSnappyBlockAsm12B:
- ADDL R11, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeSnappyBlockAsm12B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeSnappyBlockAsm12B:
@@ -12311,16 +12258,16 @@ repeat_end_emit_encodeSnappyBlockAsm12B:
JMP search_loop_encodeSnappyBlockAsm12B
no_repeat_found_encodeSnappyBlockAsm12B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBlockAsm12B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeSnappyBlockAsm12B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeSnappyBlockAsm12B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBlockAsm12B
@@ -12330,288 +12277,288 @@ candidate3_match_encodeSnappyBlockAsm12B:
JMP candidate_match_encodeSnappyBlockAsm12B
candidate2_match_encodeSnappyBlockAsm12B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBlockAsm12B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBlockAsm12B
match_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBlockAsm12B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBlockAsm12B
match_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm12B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeSnappyBlockAsm12B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBlockAsm12B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
two_bytes_match_emit_encodeSnappyBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeSnappyBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
one_byte_match_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
memmove_long_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
match_nolit_loop_encodeSnappyBlockAsm12B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBlockAsm12B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B
JZ match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeSnappyBlockAsm12B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm12B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeSnappyBlockAsm12B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeSnappyBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x18, R8
- IMULQ R9, R8
- SHRQ $0x34, R8
- SHLQ $0x18, SI
- IMULQ R9, SI
- SHRQ $0x34, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x000000cf1bbcdcbb, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x18, DI
+ IMULQ R8, DI
+ SHRQ $0x34, DI
+ SHLQ $0x18, BX
+ IMULQ R8, BX
+ SHRQ $0x34, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeSnappyBlockAsm12B
INCL CX
JMP search_loop_encodeSnappyBlockAsm12B
@@ -12796,8 +12743,8 @@ zero_loop_encodeSnappyBlockAsm10B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -12807,278 +12754,278 @@ zero_loop_encodeSnappyBlockAsm10B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x20, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R9, R11
- SHRQ $0x36, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
SHLQ $0x20, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x36, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
- JNE no_repeat_found_encodeSnappyBlockAsm10B
- LEAL 1(CX), DI
- MOVL 12(SP), SI
- MOVL DI, R8
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL CX, R8
SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm10B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
repeat_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL DI, SI
+ CMPL SI, BX
JLE repeat_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(R8*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
- LEAL -1(DI), DI
- DECL R8
+ LEAL -1(SI), SI
+ DECL DI
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
repeat_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeSnappyBlockAsm10B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeSnappyBlockAsm10B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeSnappyBlockAsm10B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
one_byte_repeat_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R11, R11
- CMPL R8, $0x08
+ XORL R10, R10
+ CMPL DI, $0x08
JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B:
- MOVQ (R9)(R11*1), R10
- XORQ (SI)(R11*1), R10
- TESTQ R10, R10
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R9, R9
#else
- BSFQ R10, R10
+ BSFQ R9, R9
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B:
- LEAL -8(R8), R8
- LEAL 8(R11), R11
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B
JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- MOVL (R9)(R11*1), R10
- CMPL (SI)(R11*1), R10
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- SUBL $0x04, R8
- LEAL 4(R11), R11
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
- MOVW (R9)(R11*1), R10
- CMPW (SI)(R11*1), R10
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
- SUBL $0x02, R8
- LEAL 2(R11), R11
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL repeat_extend_forward_end_encodeSnappyBlockAsm10B
- MOVB (R9)(R11*1), R10
- CMPB (SI)(R11*1), R10
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R10), R10
repeat_extend_forward_end_encodeSnappyBlockAsm10B:
- ADDL R11, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL SI, $0x00000800
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeSnappyBlockAsm10B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeSnappyBlockAsm10B:
@@ -13086,16 +13033,16 @@ repeat_end_emit_encodeSnappyBlockAsm10B:
JMP search_loop_encodeSnappyBlockAsm10B
no_repeat_found_encodeSnappyBlockAsm10B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBlockAsm10B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeSnappyBlockAsm10B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeSnappyBlockAsm10B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBlockAsm10B
@@ -13105,288 +13052,288 @@ candidate3_match_encodeSnappyBlockAsm10B:
JMP candidate_match_encodeSnappyBlockAsm10B
candidate2_match_encodeSnappyBlockAsm10B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBlockAsm10B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBlockAsm10B
match_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBlockAsm10B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBlockAsm10B
match_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm10B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeSnappyBlockAsm10B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBlockAsm10B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
two_bytes_match_emit_encodeSnappyBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeSnappyBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
one_byte_match_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
memmove_long_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
match_nolit_loop_encodeSnappyBlockAsm10B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBlockAsm10B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B
JZ match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeSnappyBlockAsm10B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm10B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeSnappyBlockAsm10B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL BX, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeSnappyBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm10B:
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x20, R8
- IMULQ R9, R8
- SHRQ $0x36, R8
- SHLQ $0x20, SI
- IMULQ R9, SI
- SHRQ $0x36, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x36, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x36, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeSnappyBlockAsm10B
INCL CX
JMP search_loop_encodeSnappyBlockAsm10B
@@ -13571,8 +13518,8 @@ zero_loop_encodeSnappyBlockAsm8B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -13582,276 +13529,276 @@ zero_loop_encodeSnappyBlockAsm8B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x04, SI
- LEAL 4(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R10
- MOVQ DI, R11
- SHRQ $0x08, R11
- SHLQ $0x20, R10
- IMULQ R9, R10
- SHRQ $0x38, R10
- SHLQ $0x20, R11
- IMULQ R9, R11
- SHRQ $0x38, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 24(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- LEAL 1(CX), R10
- MOVL R10, 24(SP)(R11*4)
- MOVQ DI, R10
- SHRQ $0x10, R10
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
SHLQ $0x20, R10
- IMULQ R9, R10
+ IMULQ R8, R10
SHRQ $0x38, R10
- MOVL CX, R9
- SUBL 16(SP), R9
- MOVL 1(DX)(R9*1), R11
- MOVQ DI, R9
- SHRQ $0x08, R9
- CMPL R9, R11
- JNE no_repeat_found_encodeSnappyBlockAsm8B
- LEAL 1(CX), DI
- MOVL 12(SP), SI
- MOVL DI, R8
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x38, R9
+ MOVL CX, R8
SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_encodeSnappyBlockAsm8B
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
repeat_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL DI, SI
+ CMPL SI, BX
JLE repeat_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(R8*1), BL
- MOVB -1(DX)(DI*1), R9
- CMPB BL, R9
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
- LEAL -1(DI), DI
- DECL R8
+ LEAL -1(SI), SI
+ DECL DI
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
repeat_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
- MOVL DI, R8
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R9
- SUBL SI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
JLT one_byte_repeat_emit_encodeSnappyBlockAsm8B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_repeat_emit_encodeSnappyBlockAsm8B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_repeat_emit_encodeSnappyBlockAsm8B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
one_byte_repeat_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ DI, $0x08
JLE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ DI, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ DI, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R8), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R8), R9
+ MOVQ -8(R8)(DI*1), R8
+ MOVQ R9, (AX)
+ MOVQ R8, -8(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
+ MOVOU (R8), X0
+ MOVOU -16(R8)(DI*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU X1, -16(AX)(DI*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), SI
+ LEAQ (AX)(DI*1), BX
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R8), X0
+ MOVOU 16(R8), X1
+ MOVOU -32(R8)(DI*1), X2
+ MOVOU -16(R8)(DI*1), X3
+ MOVQ DI, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R8)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R8)(R11*1), X4
+ MOVOU -16(R8)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ DI, R11
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(DI*1)
+ MOVOU X3, -16(AX)(DI*1)
+ MOVQ BX, AX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
ADDL $0x05, CX
- MOVL CX, SI
- SUBL 16(SP), SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), SI
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R11, R11
- CMPL R8, $0x08
+ XORL R10, R10
+ CMPL DI, $0x08
JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B:
- MOVQ (R9)(R11*1), R10
- XORQ (SI)(R11*1), R10
- TESTQ R10, R10
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R9, R9
#else
- BSFQ R10, R10
+ BSFQ R9, R9
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B:
- LEAL -8(R8), R8
- LEAL 8(R11), R11
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B
JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- MOVL (R9)(R11*1), R10
- CMPL (SI)(R11*1), R10
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- SUBL $0x04, R8
- LEAL 4(R11), R11
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
- MOVW (R9)(R11*1), R10
- CMPW (SI)(R11*1), R10
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
- SUBL $0x02, R8
- LEAL 2(R11), R11
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL repeat_extend_forward_end_encodeSnappyBlockAsm8B
- MOVB (R9)(R11*1), R10
- CMPB (SI)(R11*1), R10
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R10), R10
repeat_extend_forward_end_encodeSnappyBlockAsm8B:
- ADDL R11, CX
- MOVL CX, SI
- SUBL DI, SI
- MOVL 16(SP), DI
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
- CMPL SI, $0x40
+ CMPL BX, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(SI), SI
+ MOVW SI, 1(AX)
+ LEAL -60(BX), BX
ADDQ $0x03, AX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
- CMPL SI, $0x0c
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
JGE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(SI*4), SI
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
+ LEAL -15(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
ADDQ $0x02, AX
JMP repeat_end_emit_encodeSnappyBlockAsm8B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(SI*4), SI
- MOVB SI, (AX)
- MOVW DI, 1(AX)
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW SI, 1(AX)
ADDQ $0x03, AX
repeat_end_emit_encodeSnappyBlockAsm8B:
@@ -13859,16 +13806,16 @@ repeat_end_emit_encodeSnappyBlockAsm8B:
JMP search_loop_encodeSnappyBlockAsm8B
no_repeat_found_encodeSnappyBlockAsm8B:
- CMPL (DX)(SI*1), DI
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBlockAsm8B
- SHRQ $0x08, DI
- MOVL 24(SP)(R10*4), SI
- LEAL 2(CX), R9
- CMPL (DX)(R8*1), DI
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
JEQ candidate2_match_encodeSnappyBlockAsm8B
- MOVL R9, 24(SP)(R10*4)
- SHRQ $0x08, DI
- CMPL (DX)(SI*1), DI
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
JEQ candidate3_match_encodeSnappyBlockAsm8B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBlockAsm8B
@@ -13878,286 +13825,286 @@ candidate3_match_encodeSnappyBlockAsm8B:
JMP candidate_match_encodeSnappyBlockAsm8B
candidate2_match_encodeSnappyBlockAsm8B:
- MOVL R9, 24(SP)(R10*4)
+ MOVL R8, 24(SP)(R9*4)
INCL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBlockAsm8B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBlockAsm8B
match_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBlockAsm8B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBlockAsm8B
match_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm8B:
- MOVL CX, DI
- MOVL 12(SP), R8
- CMPL R8, DI
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(R8*1), DI
- SUBL R8, R9
- LEAL -1(R9), R8
- CMPL R8, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), DI
+ CMPL DI, $0x3c
JLT one_byte_match_emit_encodeSnappyBlockAsm8B
- CMPL R8, $0x00000100
+ CMPL DI, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBlockAsm8B
MOVB $0xf4, (AX)
- MOVW R8, 1(AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
two_bytes_match_emit_encodeSnappyBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB R8, 1(AX)
+ MOVB DI, 1(AX)
ADDQ $0x02, AX
- CMPL R8, $0x40
+ CMPL DI, $0x40
JL memmove_match_emit_encodeSnappyBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
one_byte_match_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, R8
- MOVB R8, (AX)
+ SHLB $0x02, DI
+ MOVB DI, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (DI), R10
- MOVQ R10, (AX)
+ MOVQ (SI), R9
+ MOVQ R9, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (DI), R10
- MOVQ -8(DI)(R9*1), DI
- MOVQ R10, (AX)
- MOVQ DI, -8(AX)(R9*1)
+ MOVQ (SI), R9
+ MOVQ -8(SI)(R8*1), SI
+ MOVQ R9, (AX)
+ MOVQ SI, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (DI), X0
- MOVOU -16(DI)(R9*1), X1
+ MOVOU (SI), X0
+ MOVOU -16(SI)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
- MOVQ R8, AX
+ MOVQ DI, AX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
memmove_long_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R9*1), R8
+ LEAQ (AX)(R8*1), DI
// genMemMoveLong
- MOVOU (DI), X0
- MOVOU 16(DI), X1
- MOVOU -32(DI)(R9*1), X2
- MOVOU -16(DI)(R9*1), X3
- MOVQ R9, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU -32(SI)(R8*1), X2
+ MOVOU -16(SI)(R8*1), X3
+ MOVQ R8, R10
+ SHRQ $0x05, R10
+ MOVQ AX, R9
+ ANDL $0x0000001f, R9
+ MOVQ $0x00000040, R11
+ SUBQ R9, R11
+ DECQ R10
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(DI)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(SI)(R11*1), R9
+ LEAQ -32(AX)(R11*1), R12
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
- ADDQ $0x20, R13
- ADDQ $0x20, R10
+ MOVOU (R9), X4
+ MOVOU 16(R9), X5
+ MOVOA X4, (R12)
+ MOVOA X5, 16(R12)
ADDQ $0x20, R12
- DECQ R11
+ ADDQ $0x20, R9
+ ADDQ $0x20, R11
+ DECQ R10
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(DI)(R12*1), X4
- MOVOU -16(DI)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R9, R12
+ MOVOU -32(SI)(R11*1), X4
+ MOVOU -16(SI)(R11*1), X5
+ MOVOA X4, -32(AX)(R11*1)
+ MOVOA X5, -16(AX)(R11*1)
+ ADDQ $0x20, R11
+ CMPQ R8, R11
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ R8, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ DI, AX
emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
match_nolit_loop_encodeSnappyBlockAsm8B:
- MOVL CX, DI
- SUBL SI, DI
- MOVL DI, 16(SP)
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(SI*1), SI
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
// matchLen
- XORL R10, R10
- CMPL DI, $0x08
+ XORL R9, R9
+ CMPL SI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B:
- MOVQ (R8)(R10*1), R9
- XORQ (SI)(R10*1), R9
- TESTQ R9, R9
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R8, R8
#else
- BSFQ R9, R9
+ BSFQ R8, R8
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBlockAsm8B:
- LEAL -8(DI), DI
- LEAL 8(R10), R10
- CMPL DI, $0x08
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B
JZ match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
- CMPL DI, $0x04
+ CMPL SI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- MOVL (R8)(R10*1), R9
- CMPL (SI)(R10*1), R9
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- SUBL $0x04, DI
- LEAL 4(R10), R10
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
- CMPL DI, $0x02
+ CMPL SI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
- MOVW (R8)(R10*1), R9
- CMPW (SI)(R10*1), R9
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
- SUBL $0x02, DI
- LEAL 2(R10), R10
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
- CMPL DI, $0x01
+ CMPL SI, $0x01
JL match_nolit_end_encodeSnappyBlockAsm8B
- MOVB (R8)(R10*1), R9
- CMPB (SI)(R10*1), R9
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
JNE match_nolit_end_encodeSnappyBlockAsm8B
- LEAL 1(R10), R10
+ LEAL 1(R9), R9
match_nolit_end_encodeSnappyBlockAsm8B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
- CMPL R10, $0x40
+ CMPL R9, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(R10), R10
+ MOVW BX, 1(AX)
+ LEAL -60(R9), R9
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
- CMPL R10, $0x0c
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(R10*4), R10
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, R10
- MOVB R10, (AX)
+ LEAL -15(SI), SI
+ MOVB BL, 1(AX)
+ SHRL $0x08, BX
+ SHLL $0x05, BX
+ ORL BX, SI
+ MOVB SI, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(R10*4), R10
- MOVB R10, (AX)
- MOVW SI, 1(AX)
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
CMPL CX, 8(SP)
JGE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ -2(DX)(CX*1), DI
+ MOVQ -2(DX)(CX*1), SI
CMPQ AX, (SP)
JL match_nolit_dst_ok_encodeSnappyBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm8B:
- MOVQ $0x9e3779b1, R9
- MOVQ DI, R8
- SHRQ $0x10, DI
- MOVQ DI, SI
- SHLQ $0x20, R8
- IMULQ R9, R8
- SHRQ $0x38, R8
- SHLQ $0x20, SI
- IMULQ R9, SI
- SHRQ $0x38, SI
- LEAL -2(CX), R9
- LEAQ 24(SP)(SI*4), R10
- MOVL (R10), SI
- MOVL R9, 24(SP)(R8*4)
- MOVL CX, (R10)
- CMPL (DX)(SI*1), DI
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x38, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x38, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
JEQ match_nolit_loop_encodeSnappyBlockAsm8B
INCL CX
JMP search_loop_encodeSnappyBlockAsm8B
@@ -14342,8 +14289,8 @@ zero_loop_encodeSnappyBetterBlockAsm:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -14353,369 +14300,369 @@ zero_loop_encodeSnappyBetterBlockAsm:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x07, SI
- CMPL SI, $0x63
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ CMPL BX, $0x63
JLE check_maxskip_ok_encodeSnappyBetterBlockAsm
- LEAL 100(CX), SI
+ LEAL 100(CX), BX
JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
check_maxskip_ok_encodeSnappyBetterBlockAsm:
- LEAL 1(CX)(SI*1), SI
+ LEAL 1(CX)(BX*1), BX
check_maxskip_cont_encodeSnappyBetterBlockAsm:
- CMPL SI, 8(SP)
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBetterBlockAsm
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 524312(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 524312(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 524312(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 524312(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeSnappyBetterBlockAsm
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeSnappyBetterBlockAsm
no_short_found_encodeSnappyBetterBlockAsm:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeSnappyBetterBlockAsm
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBetterBlockAsm
candidateS_match_encodeSnappyBetterBlockAsm:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x2f, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x2f, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBetterBlockAsm
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBetterBlockAsm:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
match_extend_back_loop_encodeSnappyBetterBlockAsm:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBetterBlockAsm
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBetterBlockAsm
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
match_extend_back_end_encodeSnappyBetterBlockAsm:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 5(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBetterBlockAsm
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm
JZ match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeSnappyBetterBlockAsm
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeSnappyBetterBlockAsm:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- CMPL R12, $0x01
+ CMPL R11, $0x01
JG match_length_ok_encodeSnappyBetterBlockAsm
- CMPL R8, $0x0000ffff
+ CMPL DI, $0x0000ffff
JLE match_length_ok_encodeSnappyBetterBlockAsm
MOVL 20(SP), CX
INCL CX
JMP search_loop_encodeSnappyBetterBlockAsm
match_length_ok_encodeSnappyBetterBlockAsm:
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeSnappyBetterBlockAsm
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL SI, $0x00010000
+ CMPL BX, $0x00010000
JLT three_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL SI, $0x01000000
+ CMPL BX, $0x01000000
JLT four_bytes_match_emit_encodeSnappyBetterBlockAsm
MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
+ MOVL BX, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
four_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVL SI, R11
- SHRL $0x10, R11
+ MOVL BX, R10
+ SHRL $0x10, R10
MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB R11, 3(AX)
+ MOVW BX, 1(AX)
+ MOVB R10, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
three_bytes_match_emit_encodeSnappyBetterBlockAsm:
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
two_bytes_match_emit_encodeSnappyBetterBlockAsm:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeSnappyBetterBlockAsm
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
one_byte_match_emit_encodeSnappyBetterBlockAsm:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
memmove_long_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
- CMPL R8, $0x00010000
+ CMPL DI, $0x00010000
JL two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
MOVB $0xff, (AX)
- MOVL R8, 1(AX)
- LEAL -64(R12), R12
+ MOVL DI, 1(AX)
+ LEAL -64(R11), R11
ADDQ $0x05, AX
- CMPL R12, $0x04
+ CMPL R11, $0x04
JL four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
- TESTL R12, R12
+ TESTL R11, R11
JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
- MOVB $0x03, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVL R8, 1(AX)
+ XORL BX, BX
+ LEAL -1(BX)(R11*4), R11
+ MOVB R11, (AX)
+ MOVL DI, 1(AX)
ADDQ $0x05, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
@@ -14727,50 +14674,50 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R12
- IMULQ SI, R12
- SHRQ $0x2f, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x32, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 524312(SP)(R11*4)
- MOVL R14, 524312(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x2f, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 524312(SP)(R10*4)
+ MOVL R13, 524312(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeSnappyBetterBlockAsm
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x08, R8
- IMULQ SI, R8
- SHRQ $0x2f, R8
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x2f, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x08, DI
+ IMULQ BX, DI
+ SHRQ $0x2f, DI
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x2f, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm
emit_remainder_encodeSnappyBetterBlockAsm:
@@ -14972,8 +14919,8 @@ zero_loop_encodeSnappyBetterBlockAsm64K:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -14983,309 +14930,309 @@ zero_loop_encodeSnappyBetterBlockAsm64K:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBetterBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x07, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x07, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x32, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 262168(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 262168(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x32, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 262168(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 262168(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeSnappyBetterBlockAsm64K
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeSnappyBetterBlockAsm64K
no_short_found_encodeSnappyBetterBlockAsm64K:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBetterBlockAsm64K
candidateS_match_encodeSnappyBetterBlockAsm64K:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x08, R10
- IMULQ R9, R10
- SHRQ $0x30, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x08, R9
+ IMULQ R8, R9
+ SHRQ $0x30, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBetterBlockAsm64K:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBetterBlockAsm64K
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
match_extend_back_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBetterBlockAsm64K
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm64K:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeSnappyBetterBlockAsm64K
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeSnappyBetterBlockAsm64K
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeSnappyBetterBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
@@ -15297,50 +15244,50 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
- MOVQ $0x00cf1bbcdcbfa563, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R12
- IMULQ SI, R12
- SHRQ $0x30, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x32, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 262168(SP)(R11*4)
- MOVL R14, 262168(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x00cf1bbcdcbfa563, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x08, R11
+ IMULQ BX, R11
+ SHRQ $0x30, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x32, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 262168(SP)(R10*4)
+ MOVL R13, 262168(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm64K:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x08, R8
- IMULQ SI, R8
- SHRQ $0x30, R8
- SHLQ $0x08, R10
- IMULQ SI, R10
- SHRQ $0x30, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x08, DI
+ IMULQ BX, DI
+ SHRQ $0x30, DI
+ SHLQ $0x08, R9
+ IMULQ BX, R9
+ SHRQ $0x30, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm64K
emit_remainder_encodeSnappyBetterBlockAsm64K:
@@ -15523,8 +15470,8 @@ zero_loop_encodeSnappyBetterBlockAsm12B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -15534,309 +15481,309 @@ zero_loop_encodeSnappyBetterBlockAsm12B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x06, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x06, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x34, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 65560(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 65560(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x34, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 65560(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 65560(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeSnappyBetterBlockAsm12B
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeSnappyBetterBlockAsm12B
no_short_found_encodeSnappyBetterBlockAsm12B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBetterBlockAsm12B
candidateS_match_encodeSnappyBetterBlockAsm12B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x32, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBetterBlockAsm12B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBetterBlockAsm12B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
match_extend_back_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBetterBlockAsm12B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm12B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeSnappyBetterBlockAsm12B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeSnappyBetterBlockAsm12B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeSnappyBetterBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
@@ -15848,50 +15795,50 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x34, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x32, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x34, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 65560(SP)(R11*4)
- MOVL R14, 65560(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x32, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x34, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 65560(SP)(R10*4)
+ MOVL R13, 65560(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm12B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x32, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x32, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x32, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x32, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm12B
emit_remainder_encodeSnappyBetterBlockAsm12B:
@@ -16074,8 +16021,8 @@ zero_loop_encodeSnappyBetterBlockAsm10B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -16085,309 +16032,309 @@ zero_loop_encodeSnappyBetterBlockAsm10B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x05, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x36, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 16408(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 16408(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
- JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPQ R11, DI
- JNE no_short_found_encodeSnappyBetterBlockAsm10B
- MOVL R8, SI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x36, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 16408(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 16408(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPQ R10, SI
+ JNE no_short_found_encodeSnappyBetterBlockAsm10B
+ MOVL DI, BX
JMP candidate_match_encodeSnappyBetterBlockAsm10B
no_short_found_encodeSnappyBetterBlockAsm10B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBetterBlockAsm10B
candidateS_match_encodeSnappyBetterBlockAsm10B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x34, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBetterBlockAsm10B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBetterBlockAsm10B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
match_extend_back_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBetterBlockAsm10B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm10B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeSnappyBetterBlockAsm10B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeSnappyBetterBlockAsm10B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeSnappyBetterBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- CMPL R8, $0x00000800
+ CMPL DI, $0x00000800
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
@@ -16399,50 +16346,50 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x36, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x34, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x36, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 16408(SP)(R11*4)
- MOVL R14, 16408(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x34, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x36, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 16408(SP)(R10*4)
+ MOVL R13, 16408(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm10B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x34, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x34, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x34, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x34, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm10B
emit_remainder_encodeSnappyBetterBlockAsm10B:
@@ -16625,8 +16572,8 @@ zero_loop_encodeSnappyBetterBlockAsm8B:
MOVL $0x00000000, 12(SP)
MOVQ src_len+32(FP), CX
LEAQ -9(CX), DX
- LEAQ -8(CX), SI
- MOVL SI, 8(SP)
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
SHRQ $0x05, CX
SUBL CX, DX
LEAQ (AX)(DX*1), DX
@@ -16636,307 +16583,307 @@ zero_loop_encodeSnappyBetterBlockAsm8B:
MOVQ src_base+24(FP), DX
search_loop_encodeSnappyBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- SHRL $0x04, SI
- LEAL 1(CX)(SI*1), SI
- CMPL SI, 8(SP)
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 1(CX)(BX*1), BX
+ CMPL BX, 8(SP)
JGE emit_remainder_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(CX*1), DI
- MOVL SI, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R9
- MOVQ $0x9e3779b1, SI
- MOVQ DI, R10
- MOVQ DI, R11
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ SI, R11
- SHRQ $0x38, R11
- MOVL 24(SP)(R10*4), SI
- MOVL 4120(SP)(R11*4), R8
- MOVL CX, 24(SP)(R10*4)
- MOVL CX, 4120(SP)(R11*4)
- MOVQ (DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- CMPQ R10, DI
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ $0x9e3779b1, BX
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ BX, R10
+ SHRQ $0x38, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 4120(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ MOVL CX, 4120(SP)(R10*4)
+ MOVQ (DX)(BX*1), R9
+ MOVQ (DX)(DI*1), R10
+ CMPQ R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPQ R11, DI
+ CMPQ R10, SI
JNE no_short_found_encodeSnappyBetterBlockAsm8B
- MOVL R8, SI
+ MOVL DI, BX
JMP candidate_match_encodeSnappyBetterBlockAsm8B
no_short_found_encodeSnappyBetterBlockAsm8B:
- CMPL R10, DI
+ CMPL R9, SI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPL R11, DI
+ CMPL R10, SI
JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
MOVL 20(SP), CX
JMP search_loop_encodeSnappyBetterBlockAsm8B
candidateS_match_encodeSnappyBetterBlockAsm8B:
- SHRQ $0x08, DI
- MOVQ DI, R10
- SHLQ $0x10, R10
- IMULQ R9, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R10*4), SI
+ SHRQ $0x08, SI
+ MOVQ SI, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x36, R9
+ MOVL 24(SP)(R9*4), BX
INCL CX
- MOVL CX, 24(SP)(R10*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 24(SP)(R9*4)
+ CMPL (DX)(BX*1), SI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
DECL CX
- MOVL R8, SI
+ MOVL DI, BX
candidate_match_encodeSnappyBetterBlockAsm8B:
- MOVL 12(SP), DI
- TESTL SI, SI
+ MOVL 12(SP), SI
+ TESTL BX, BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
- CMPL CX, DI
+ CMPL CX, SI
JLE match_extend_back_end_encodeSnappyBetterBlockAsm8B
- MOVB -1(DX)(SI*1), BL
+ MOVB -1(DX)(BX*1), DI
MOVB -1(DX)(CX*1), R8
- CMPB BL, R8
+ CMPB DI, R8
JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
LEAL -1(CX), CX
- DECL SI
+ DECL BX
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
match_extend_back_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, DI
- SUBL 12(SP), DI
- LEAQ 3(AX)(DI*1), DI
- CMPQ DI, (SP)
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
JL match_dst_size_check_encodeSnappyBetterBlockAsm8B
MOVQ $0x00000000, ret+48(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm8B:
- MOVL CX, DI
+ MOVL CX, SI
ADDL $0x04, CX
- ADDL $0x04, SI
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(SI*1), R10
+ ADDL $0x04, BX
+ MOVQ src_len+32(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), R9
// matchLen
- XORL R12, R12
- CMPL R8, $0x08
+ XORL R11, R11
+ CMPL DI, $0x08
JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVQ (R9)(R12*1), R11
- XORQ (R10)(R12*1), R11
- TESTQ R11, R11
+ MOVQ (R8)(R11*1), R10
+ XORQ (R9)(R11*1), R10
+ TESTQ R10, R10
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R10, R10
#else
- BSFQ R11, R11
+ BSFQ R10, R10
#endif
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B:
- LEAL -8(R8), R8
- LEAL 8(R12), R12
- CMPL R8, $0x08
+ LEAL -8(DI), DI
+ LEAL 8(R11), R11
+ CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R8, $0x04
+ CMPL DI, $0x04
JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVL (R9)(R12*1), R11
- CMPL (R10)(R12*1), R11
+ MOVL (R8)(R11*1), R10
+ CMPL (R9)(R11*1), R10
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- SUBL $0x04, R8
- LEAL 4(R12), R12
+ SUBL $0x04, DI
+ LEAL 4(R11), R11
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R8, $0x02
+ CMPL DI, $0x02
JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVW (R9)(R12*1), R11
- CMPW (R10)(R12*1), R11
+ MOVW (R8)(R11*1), R10
+ CMPW (R9)(R11*1), R10
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
- SUBL $0x02, R8
- LEAL 2(R12), R12
+ SUBL $0x02, DI
+ LEAL 2(R11), R11
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R8, $0x01
+ CMPL DI, $0x01
JL match_nolit_end_encodeSnappyBetterBlockAsm8B
- MOVB (R9)(R12*1), R11
- CMPB (R10)(R12*1), R11
+ MOVB (R8)(R11*1), R10
+ CMPB (R9)(R11*1), R10
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
- LEAL 1(R12), R12
+ LEAL 1(R11), R11
match_nolit_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, R8
- SUBL SI, R8
+ MOVL CX, DI
+ SUBL BX, DI
// Check if repeat
- MOVL R8, 16(SP)
- MOVL 12(SP), SI
- CMPL SI, DI
+ MOVL DI, 16(SP)
+ MOVL 12(SP), BX
+ CMPL BX, SI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
- MOVL DI, R9
- MOVL DI, 12(SP)
- LEAQ (DX)(SI*1), R10
- SUBL SI, R9
- LEAL -1(R9), SI
- CMPL SI, $0x3c
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R9
+ SUBL BX, R8
+ LEAL -1(R8), BX
+ CMPL BX, $0x3c
JLT one_byte_match_emit_encodeSnappyBetterBlockAsm8B
- CMPL SI, $0x00000100
+ CMPL BX, $0x00000100
JLT two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
+ MOVW BX, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
+ MOVB BL, 1(AX)
ADDQ $0x02, AX
- CMPL SI, $0x40
+ CMPL BX, $0x40
JL memmove_match_emit_encodeSnappyBetterBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
- SHLB $0x02, SI
- MOVB SI, (AX)
+ SHLB $0x02, BL
+ MOVB BL, (AX)
ADDQ $0x01, AX
memmove_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveShort
- CMPQ R9, $0x08
+ CMPQ R8, $0x08
JLE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
- CMPQ R9, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
- CMPQ R9, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
- MOVQ (R10), R11
- MOVQ R11, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (AX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R10), R11
- MOVQ -8(R10)(R9*1), R10
- MOVQ R11, (AX)
- MOVQ R10, -8(AX)(R9*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (AX)
+ MOVQ R9, -8(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R10), X0
- MOVOU -16(R10)(R9*1), X1
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R9*1)
+ MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVQ SI, AX
+ MOVQ BX, AX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R9*1), SI
+ LEAQ (AX)(R8*1), BX
// genMemMoveLong
- MOVOU (R10), X0
- MOVOU 16(R10), X1
- MOVOU -32(R10)(R9*1), X2
- MOVOU -16(R10)(R9*1), X3
- MOVQ R9, R13
- SHRQ $0x05, R13
- MOVQ AX, R11
- ANDL $0x0000001f, R11
- MOVQ $0x00000040, R14
- SUBQ R11, R14
- DECQ R13
- JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R10)(R14*1), R11
- LEAQ -32(AX)(R14*1), R15
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R12
+ JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R11), X4
- MOVOU 16(R11), X5
- MOVOA X4, (R15)
- MOVOA X5, 16(R15)
- ADDQ $0x20, R15
- ADDQ $0x20, R11
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
ADDQ $0x20, R14
- DECQ R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R13
+ DECQ R12
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R10)(R14*1), X4
- MOVOU -16(R10)(R14*1), X5
- MOVOA X4, -32(AX)(R14*1)
- MOVOA X5, -16(AX)(R14*1)
- ADDQ $0x20, R14
- CMPQ R9, R14
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R9*1)
- MOVOU X3, -16(AX)(R9*1)
- MOVQ SI, AX
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ BX, AX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
- ADDL R12, CX
- ADDL $0x04, R12
+ ADDL R11, CX
+ ADDL $0x04, R11
MOVL CX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R12, $0x40
+ CMPL R11, $0x40
JLE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
MOVB $0xee, (AX)
- MOVW R8, 1(AX)
- LEAL -60(R12), R12
+ MOVW DI, 1(AX)
+ LEAL -60(R11), R11
ADDQ $0x03, AX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R12, $0x0c
+ MOVL R11, BX
+ SHLL $0x02, BX
+ CMPL R11, $0x0c
JGE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVB $0x01, BL
- LEAL -16(BX)(R12*4), R12
- MOVB R8, 1(AX)
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, R12
- MOVB R12, (AX)
+ LEAL -15(BX), BX
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, BX
+ MOVB BL, (AX)
ADDQ $0x02, AX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVB $0x02, BL
- LEAL -4(BX)(R12*4), R12
- MOVB R12, (AX)
- MOVW R8, 1(AX)
+ LEAL -2(BX), BX
+ MOVB BL, (AX)
+ MOVW DI, 1(AX)
ADDQ $0x03, AX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
@@ -16948,50 +16895,50 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, SI
- MOVQ $0x9e3779b1, R8
- LEAQ 1(DI), DI
- LEAQ -2(CX), R9
- MOVQ (DX)(DI*1), R10
- MOVQ 1(DX)(DI*1), R11
- MOVQ (DX)(R9*1), R12
- MOVQ 1(DX)(R9*1), R13
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x38, R11
- SHLQ $0x10, R12
- IMULQ SI, R12
- SHRQ $0x36, R12
- SHLQ $0x20, R13
- IMULQ R8, R13
- SHRQ $0x38, R13
- LEAQ 1(DI), R8
- LEAQ 1(R9), R14
- MOVL DI, 24(SP)(R10*4)
- MOVL R9, 24(SP)(R12*4)
- MOVL R8, 4120(SP)(R11*4)
- MOVL R14, 4120(SP)(R13*4)
- ADDQ $0x01, DI
- SUBQ $0x01, R9
+ MOVQ $0x0000cf1bbcdcbf9b, BX
+ MOVQ $0x9e3779b1, DI
+ LEAQ 1(SI), SI
+ LEAQ -2(CX), R8
+ MOVQ (DX)(SI*1), R9
+ MOVQ 1(DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ MOVQ 1(DX)(R8*1), R12
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ SHLQ $0x20, R10
+ IMULQ DI, R10
+ SHRQ $0x38, R10
+ SHLQ $0x10, R11
+ IMULQ BX, R11
+ SHRQ $0x36, R11
+ SHLQ $0x20, R12
+ IMULQ DI, R12
+ SHRQ $0x38, R12
+ LEAQ 1(SI), DI
+ LEAQ 1(R8), R13
+ MOVL SI, 24(SP)(R9*4)
+ MOVL R8, 24(SP)(R11*4)
+ MOVL DI, 4120(SP)(R10*4)
+ MOVL R13, 4120(SP)(R12*4)
+ ADDQ $0x01, SI
+ SUBQ $0x01, R8
index_loop_encodeSnappyBetterBlockAsm8B:
- CMPQ DI, R9
+ CMPQ SI, R8
JAE search_loop_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(DI*1), R8
- MOVQ (DX)(R9*1), R10
- SHLQ $0x10, R8
- IMULQ SI, R8
- SHRQ $0x36, R8
- SHLQ $0x10, R10
- IMULQ SI, R10
- SHRQ $0x36, R10
- MOVL DI, 24(SP)(R8*4)
- MOVL R9, 24(SP)(R10*4)
- ADDQ $0x02, DI
- SUBQ $0x02, R9
+ MOVQ (DX)(SI*1), DI
+ MOVQ (DX)(R8*1), R9
+ SHLQ $0x10, DI
+ IMULQ BX, DI
+ SHRQ $0x36, DI
+ SHLQ $0x10, R9
+ IMULQ BX, R9
+ SHRQ $0x36, R9
+ MOVL SI, 24(SP)(DI*4)
+ MOVL R8, 24(SP)(R9*4)
+ ADDQ $0x02, SI
+ SUBQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm8B
emit_remainder_encodeSnappyBetterBlockAsm8B:
@@ -17151,206 +17098,1484 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
MOVQ AX, ret+48(FP)
RET
-// func emitLiteral(dst []byte, lit []byte) int
-// Requires: SSE2
-TEXT ·emitLiteral(SB), NOSPLIT, $0-56
- MOVQ lit_len+32(FP), DX
- MOVQ dst_base+0(FP), AX
- MOVQ lit_base+24(FP), CX
- TESTQ DX, DX
- JZ emit_literal_end_standalone_skip
- MOVL DX, BX
- LEAL -1(DX), SI
- CMPL SI, $0x3c
- JLT one_byte_standalone
- CMPL SI, $0x00000100
- JLT two_bytes_standalone
- CMPL SI, $0x00010000
- JLT three_bytes_standalone
- CMPL SI, $0x01000000
- JLT four_bytes_standalone
- MOVB $0xfc, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, BX
- ADDQ $0x05, AX
- JMP memmove_long_standalone
+// func calcBlockSize(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSize(SB), $32792-32
+ XORQ AX, AX
+ MOVQ $0x00000100, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
-four_bytes_standalone:
+zero_loop_calcBlockSize:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSize
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSize:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x05, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JGE emit_remainder_calcBlockSize
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ SHLQ $0x10, R10
+ IMULQ R8, R10
+ SHRQ $0x33, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x10, R9
+ IMULQ R8, R9
+ SHRQ $0x33, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSize
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSize
+
+repeat_extend_back_loop_calcBlockSize:
+ CMPL SI, BX
+ JLE repeat_extend_back_end_calcBlockSize
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSize
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSize
+
+repeat_extend_back_end_calcBlockSize:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSize
MOVL SI, DI
- SHRL $0x10, DI
- MOVB $0xf8, (AX)
- MOVW SI, 1(AX)
- MOVB DI, 3(AX)
- ADDQ $0x04, BX
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JLT one_byte_repeat_emit_calcBlockSize
+ CMPL BX, $0x00000100
+ JLT two_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x00010000
+ JLT three_bytes_repeat_emit_calcBlockSize
+ CMPL BX, $0x01000000
+ JLT four_bytes_repeat_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_repeat_emit_calcBlockSize
+
+four_bytes_repeat_emit_calcBlockSize:
ADDQ $0x04, AX
- JMP memmove_long_standalone
+ JMP memmove_long_repeat_emit_calcBlockSize
-three_bytes_standalone:
- MOVB $0xf4, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, BX
+three_bytes_repeat_emit_calcBlockSize:
ADDQ $0x03, AX
- JMP memmove_long_standalone
+ JMP memmove_long_repeat_emit_calcBlockSize
-two_bytes_standalone:
- MOVB $0xf0, (AX)
- MOVB SI, 1(AX)
- ADDQ $0x02, BX
+two_bytes_repeat_emit_calcBlockSize:
ADDQ $0x02, AX
- CMPL SI, $0x40
- JL memmove_standalone
- JMP memmove_long_standalone
+ CMPL BX, $0x40
+ JL memmove_repeat_emit_calcBlockSize
+ JMP memmove_long_repeat_emit_calcBlockSize
-one_byte_standalone:
- SHLB $0x02, SI
- MOVB SI, (AX)
- ADDQ $0x01, BX
+one_byte_repeat_emit_calcBlockSize:
ADDQ $0x01, AX
-memmove_standalone:
- // genMemMoveShort
- CMPQ DX, $0x03
- JB emit_lit_memmove_standalone_memmove_move_1or2
- JE emit_lit_memmove_standalone_memmove_move_3
- CMPQ DX, $0x08
- JB emit_lit_memmove_standalone_memmove_move_4through7
- CMPQ DX, $0x10
- JBE emit_lit_memmove_standalone_memmove_move_8through16
- CMPQ DX, $0x20
- JBE emit_lit_memmove_standalone_memmove_move_17through32
- JMP emit_lit_memmove_standalone_memmove_move_33through64
+memmove_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSize
-emit_lit_memmove_standalone_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(DX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(DX*1)
- JMP emit_literal_end_standalone
+memmove_long_repeat_emit_calcBlockSize:
+ LEAQ (AX)(DI*1), AX
-emit_lit_memmove_standalone_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
- JMP emit_literal_end_standalone
+emit_literal_done_repeat_emit_calcBlockSize:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
-emit_lit_memmove_standalone_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(DX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(DX*1)
- JMP emit_literal_end_standalone
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JL matchlen_match4_repeat_extend_calcBlockSize
-emit_lit_memmove_standalone_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(DX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(DX*1)
- JMP emit_literal_end_standalone
+matchlen_loopback_repeat_extend_calcBlockSize:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_calcBlockSize
-emit_lit_memmove_standalone_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(DX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DX*1)
- JMP emit_literal_end_standalone
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
-emit_lit_memmove_standalone_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(DX*1), X2
- MOVOU -16(CX)(DX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DX*1)
- MOVOU X3, -16(AX)(DX*1)
- JMP emit_literal_end_standalone
- JMP emit_literal_end_standalone
+#else
+ BSFQ R9, R9
-memmove_long_standalone:
- // genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(DX*1), X2
- MOVOU -16(CX)(DX*1), X3
- MOVQ DX, DI
- SHRQ $0x05, DI
- MOVQ AX, SI
- ANDL $0x0000001f, SI
- MOVQ $0x00000040, R8
- SUBQ SI, R8
- DECQ DI
- JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSize
-emit_lit_memmove_long_standalonelarge_big_loop_back:
- MOVOU (SI), X4
- MOVOU 16(SI), X5
- MOVOA X4, (R9)
- MOVOA X5, 16(R9)
- ADDQ $0x20, R9
- ADDQ $0x20, SI
- ADDQ $0x20, R8
- DECQ DI
- JNA emit_lit_memmove_long_standalonelarge_big_loop_back
+matchlen_loop_repeat_extend_calcBlockSize:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JGE matchlen_loopback_repeat_extend_calcBlockSize
+ JZ repeat_extend_forward_end_calcBlockSize
-emit_lit_memmove_long_standalonelarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
- ADDQ $0x20, R8
- CMPQ DX, R8
- JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DX*1)
- MOVOU X3, -16(AX)(DX*1)
- JMP emit_literal_end_standalone
- JMP emit_literal_end_standalone
+matchlen_match4_repeat_extend_calcBlockSize:
+ CMPL DI, $0x04
+ JL matchlen_match2_repeat_extend_calcBlockSize
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSize
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
-emit_literal_end_standalone_skip:
- XORQ BX, BX
+matchlen_match2_repeat_extend_calcBlockSize:
+ CMPL DI, $0x02
+ JL matchlen_match1_repeat_extend_calcBlockSize
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSize
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_repeat_extend_calcBlockSize:
+ CMPL DI, $0x01
+ JL repeat_extend_forward_end_calcBlockSize
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSize
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSize:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+ CMPL SI, $0x00010000
+ JL two_byte_offset_repeat_as_copy_calcBlockSize
+
+four_bytes_loop_back_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JLE four_bytes_remain_repeat_as_copy_calcBlockSize
+ LEAL -64(BX), BX
+ ADDQ $0x05, AX
+ CMPL BX, $0x04
+ JL four_bytes_remain_repeat_as_copy_calcBlockSize
+ JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
+
+four_bytes_remain_repeat_as_copy_calcBlockSize:
+ TESTL BX, BX
+ JZ repeat_end_emit_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_calcBlockSize
+
+two_byte_offset_repeat_as_copy_calcBlockSize:
+ CMPL BX, $0x40
+ JLE two_byte_offset_short_repeat_as_copy_calcBlockSize
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSize
+
+two_byte_offset_short_repeat_as_copy_calcBlockSize:
+ MOVL BX, DI
+ SHLL $0x02, DI
+ CMPL BX, $0x0c
+ JGE emit_copy_three_repeat_as_copy_calcBlockSize
+ CMPL SI, $0x00000800
+ JGE emit_copy_three_repeat_as_copy_calcBlockSize
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSize
+
+emit_copy_three_repeat_as_copy_calcBlockSize:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSize:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSize
+
+no_repeat_found_calcBlockSize:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSize
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSize
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSize
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSize
+
+candidate3_match_calcBlockSize:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSize
+
+candidate2_match_calcBlockSize:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSize:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSize
+
+match_extend_back_loop_calcBlockSize:
+ CMPL CX, SI
+ JLE match_extend_back_end_calcBlockSize
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSize
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSize
+ JMP match_extend_back_loop_calcBlockSize
+
+match_extend_back_end_calcBlockSize:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 5(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JL match_dst_size_check_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSize:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSize
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JLT one_byte_match_emit_calcBlockSize
+ CMPL SI, $0x00000100
+ JLT two_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x00010000
+ JLT three_bytes_match_emit_calcBlockSize
+ CMPL SI, $0x01000000
+ JLT four_bytes_match_emit_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+four_bytes_match_emit_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+three_bytes_match_emit_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSize
+
+two_bytes_match_emit_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JL memmove_match_emit_calcBlockSize
+ JMP memmove_long_match_emit_calcBlockSize
+
+one_byte_match_emit_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSize
+
+memmove_long_match_emit_calcBlockSize:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSize:
+match_nolit_loop_calcBlockSize:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JL matchlen_match4_match_nolit_calcBlockSize
+
+matchlen_loopback_match_nolit_calcBlockSize:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_calcBlockSize
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSize
+
+matchlen_loop_match_nolit_calcBlockSize:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JGE matchlen_loopback_match_nolit_calcBlockSize
+ JZ match_nolit_end_calcBlockSize
+
+matchlen_match4_match_nolit_calcBlockSize:
+ CMPL SI, $0x04
+ JL matchlen_match2_match_nolit_calcBlockSize
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSize
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSize:
+ CMPL SI, $0x02
+ JL matchlen_match1_match_nolit_calcBlockSize
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSize
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
+
+matchlen_match1_match_nolit_calcBlockSize:
+ CMPL SI, $0x01
+ JL match_nolit_end_calcBlockSize
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSize
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSize:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+ CMPL BX, $0x00010000
+ JL two_byte_offset_match_nolit_calcBlockSize
+
+four_bytes_loop_back_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JLE four_bytes_remain_match_nolit_calcBlockSize
+ LEAL -64(R9), R9
+ ADDQ $0x05, AX
+ CMPL R9, $0x04
+ JL four_bytes_remain_match_nolit_calcBlockSize
+ JMP four_bytes_loop_back_match_nolit_calcBlockSize
+
+four_bytes_remain_match_nolit_calcBlockSize:
+ TESTL R9, R9
+ JZ match_nolit_emitcopy_end_calcBlockSize
+ XORL BX, BX
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+two_byte_offset_match_nolit_calcBlockSize:
+ CMPL R9, $0x40
+ JLE two_byte_offset_short_match_nolit_calcBlockSize
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSize
+
+two_byte_offset_short_match_nolit_calcBlockSize:
+ MOVL R9, SI
+ SHLL $0x02, SI
+ CMPL R9, $0x0c
+ JGE emit_copy_three_match_nolit_calcBlockSize
+ CMPL BX, $0x00000800
+ JGE emit_copy_three_match_nolit_calcBlockSize
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSize
+
+emit_copy_three_match_nolit_calcBlockSize:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSize:
+ CMPL CX, 8(SP)
+ JGE emit_remainder_calcBlockSize
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JL match_nolit_dst_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSize:
+ MOVQ $0x0000cf1bbcdcbf9b, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x10, DI
+ IMULQ R8, DI
+ SHRQ $0x33, DI
+ SHLQ $0x10, BX
+ IMULQ R8, BX
+ SHRQ $0x33, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSize
+ INCL CX
+ JMP search_loop_calcBlockSize
+
+emit_remainder_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 5(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JL emit_remainder_ok_calcBlockSize
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSize:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSize
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JLT one_byte_emit_remainder_calcBlockSize
+ CMPL CX, $0x00000100
+ JLT two_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x00010000
+ JLT three_bytes_emit_remainder_calcBlockSize
+ CMPL CX, $0x01000000
+ JLT four_bytes_emit_remainder_calcBlockSize
+ ADDQ $0x05, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+four_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x04, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+three_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+two_bytes_emit_remainder_calcBlockSize:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JL memmove_emit_remainder_calcBlockSize
+ JMP memmove_long_emit_remainder_calcBlockSize
+
+one_byte_emit_remainder_calcBlockSize:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSize
+
+memmove_long_emit_remainder_calcBlockSize:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSize:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func calcBlockSizeSmall(src []byte) int
+// Requires: BMI, SSE2
+TEXT ·calcBlockSizeSmall(SB), $2072-32
+ XORQ AX, AX
+ MOVQ $0x00000010, CX
+ LEAQ 24(SP), DX
+ PXOR X0, X0
+
+zero_loop_calcBlockSizeSmall:
+ MOVOU X0, (DX)
+ MOVOU X0, 16(DX)
+ MOVOU X0, 32(DX)
+ MOVOU X0, 48(DX)
+ MOVOU X0, 64(DX)
+ MOVOU X0, 80(DX)
+ MOVOU X0, 96(DX)
+ MOVOU X0, 112(DX)
+ ADDQ $0x80, DX
+ DECQ CX
+ JNZ zero_loop_calcBlockSizeSmall
+ MOVL $0x00000000, 12(SP)
+ MOVQ src_len+8(FP), CX
+ LEAQ -9(CX), DX
+ LEAQ -8(CX), BX
+ MOVL BX, 8(SP)
+ SHRQ $0x05, CX
+ SUBL CX, DX
+ LEAQ (AX)(DX*1), DX
+ MOVQ DX, (SP)
+ MOVL $0x00000001, CX
+ MOVL CX, 16(SP)
+ MOVQ src_base+0(FP), DX
+
+search_loop_calcBlockSizeSmall:
+ MOVL CX, BX
+ SUBL 12(SP), BX
+ SHRL $0x04, BX
+ LEAL 4(CX)(BX*1), BX
+ CMPL BX, 8(SP)
+ JGE emit_remainder_calcBlockSizeSmall
+ MOVQ (DX)(CX*1), SI
+ MOVL BX, 20(SP)
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, R9
+ MOVQ SI, R10
+ SHRQ $0x08, R10
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ SHLQ $0x20, R10
+ IMULQ R8, R10
+ SHRQ $0x37, R10
+ MOVL 24(SP)(R9*4), BX
+ MOVL 24(SP)(R10*4), DI
+ MOVL CX, 24(SP)(R9*4)
+ LEAL 1(CX), R9
+ MOVL R9, 24(SP)(R10*4)
+ MOVQ SI, R9
+ SHRQ $0x10, R9
+ SHLQ $0x20, R9
+ IMULQ R8, R9
+ SHRQ $0x37, R9
+ MOVL CX, R8
+ SUBL 16(SP), R8
+ MOVL 1(DX)(R8*1), R10
+ MOVQ SI, R8
+ SHRQ $0x08, R8
+ CMPL R8, R10
+ JNE no_repeat_found_calcBlockSizeSmall
+ LEAL 1(CX), SI
+ MOVL 12(SP), BX
+ MOVL SI, DI
+ SUBL 16(SP), DI
+ JZ repeat_extend_back_end_calcBlockSizeSmall
+
+repeat_extend_back_loop_calcBlockSizeSmall:
+ CMPL SI, BX
+ JLE repeat_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(DI*1), R8
+ MOVB -1(DX)(SI*1), R9
+ CMPB R8, R9
+ JNE repeat_extend_back_end_calcBlockSizeSmall
+ LEAL -1(SI), SI
+ DECL DI
+ JNZ repeat_extend_back_loop_calcBlockSizeSmall
+
+repeat_extend_back_end_calcBlockSizeSmall:
+ MOVL 12(SP), BX
+ CMPL BX, SI
+ JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
+ MOVL SI, DI
+ MOVL SI, 12(SP)
+ LEAQ (DX)(BX*1), R8
+ SUBL BX, DI
+ LEAL -1(DI), BX
+ CMPL BX, $0x3c
+ JLT one_byte_repeat_emit_calcBlockSizeSmall
+ CMPL BX, $0x00000100
+ JLT two_bytes_repeat_emit_calcBlockSizeSmall
+ ADDQ $0x03, AX
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+two_bytes_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL BX, $0x40
+ JL memmove_repeat_emit_calcBlockSizeSmall
+ JMP memmove_long_repeat_emit_calcBlockSizeSmall
+
+one_byte_repeat_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+ JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
+
+memmove_long_repeat_emit_calcBlockSizeSmall:
+ LEAQ (AX)(DI*1), AX
+
+emit_literal_done_repeat_emit_calcBlockSizeSmall:
+ ADDL $0x05, CX
+ MOVL CX, BX
+ SUBL 16(SP), BX
+ MOVQ src_len+8(FP), DI
+ SUBL CX, DI
+ LEAQ (DX)(CX*1), R8
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R10, R10
+ CMPL DI, $0x08
+ JL matchlen_match4_repeat_extend_calcBlockSizeSmall
+
+matchlen_loopback_repeat_extend_calcBlockSizeSmall:
+ MOVQ (R8)(R10*1), R9
+ XORQ (BX)(R10*1), R9
+ TESTQ R9, R9
+ JZ matchlen_loop_repeat_extend_calcBlockSizeSmall
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_loop_repeat_extend_calcBlockSizeSmall:
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
+ CMPL DI, $0x08
+ JGE matchlen_loopback_repeat_extend_calcBlockSizeSmall
+ JZ repeat_extend_forward_end_calcBlockSizeSmall
+
+matchlen_match4_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x04
+ JL matchlen_match2_repeat_extend_calcBlockSizeSmall
+ MOVL (R8)(R10*1), R9
+ CMPL (BX)(R10*1), R9
+ JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x02
+ JL matchlen_match1_repeat_extend_calcBlockSizeSmall
+ MOVW (R8)(R10*1), R9
+ CMPW (BX)(R10*1), R9
+ JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_repeat_extend_calcBlockSizeSmall:
+ CMPL DI, $0x01
+ JL repeat_extend_forward_end_calcBlockSizeSmall
+ MOVB (R8)(R10*1), R9
+ CMPB (BX)(R10*1), R9
+ JNE repeat_extend_forward_end_calcBlockSizeSmall
+ LEAL 1(R10), R10
+
+repeat_extend_forward_end_calcBlockSizeSmall:
+ ADDL R10, CX
+ MOVL CX, BX
+ SUBL SI, BX
+ MOVL 16(SP), SI
+
+ // emitCopy
+two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
+ CMPL BX, $0x40
+ JLE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
+ LEAL -60(BX), BX
+ ADDQ $0x03, AX
+ JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
+
+two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
+ MOVL BX, SI
+ SHLL $0x02, SI
+ CMPL BX, $0x0c
+ JGE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_calcBlockSizeSmall
+
+emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+repeat_end_emit_calcBlockSizeSmall:
+ MOVL CX, 12(SP)
+ JMP search_loop_calcBlockSizeSmall
+
+no_repeat_found_calcBlockSizeSmall:
+ CMPL (DX)(BX*1), SI
+ JEQ candidate_match_calcBlockSizeSmall
+ SHRQ $0x08, SI
+ MOVL 24(SP)(R9*4), BX
+ LEAL 2(CX), R8
+ CMPL (DX)(DI*1), SI
+ JEQ candidate2_match_calcBlockSizeSmall
+ MOVL R8, 24(SP)(R9*4)
+ SHRQ $0x08, SI
+ CMPL (DX)(BX*1), SI
+ JEQ candidate3_match_calcBlockSizeSmall
+ MOVL 20(SP), CX
+ JMP search_loop_calcBlockSizeSmall
+
+candidate3_match_calcBlockSizeSmall:
+ ADDL $0x02, CX
+ JMP candidate_match_calcBlockSizeSmall
+
+candidate2_match_calcBlockSizeSmall:
+ MOVL R8, 24(SP)(R9*4)
+ INCL CX
+ MOVL DI, BX
+
+candidate_match_calcBlockSizeSmall:
+ MOVL 12(SP), SI
+ TESTL BX, BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+
+match_extend_back_loop_calcBlockSizeSmall:
+ CMPL CX, SI
+ JLE match_extend_back_end_calcBlockSizeSmall
+ MOVB -1(DX)(BX*1), DI
+ MOVB -1(DX)(CX*1), R8
+ CMPB DI, R8
+ JNE match_extend_back_end_calcBlockSizeSmall
+ LEAL -1(CX), CX
+ DECL BX
+ JZ match_extend_back_end_calcBlockSizeSmall
+ JMP match_extend_back_loop_calcBlockSizeSmall
+
+match_extend_back_end_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL 12(SP), SI
+ LEAQ 3(AX)(SI*1), SI
+ CMPQ SI, (SP)
+ JL match_dst_size_check_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_dst_size_check_calcBlockSizeSmall:
+ MOVL CX, SI
+ MOVL 12(SP), DI
+ CMPL DI, SI
+ JEQ emit_literal_done_match_emit_calcBlockSizeSmall
+ MOVL SI, R8
+ MOVL SI, 12(SP)
+ LEAQ (DX)(DI*1), SI
+ SUBL DI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
+ JLT one_byte_match_emit_calcBlockSizeSmall
+ CMPL SI, $0x00000100
+ JLT two_bytes_match_emit_calcBlockSizeSmall
+ ADDQ $0x03, AX
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+two_bytes_match_emit_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JL memmove_match_emit_calcBlockSizeSmall
+ JMP memmove_long_match_emit_calcBlockSizeSmall
+
+one_byte_match_emit_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+ JMP emit_literal_done_match_emit_calcBlockSizeSmall
+
+memmove_long_match_emit_calcBlockSizeSmall:
+ LEAQ (AX)(R8*1), AX
+
+emit_literal_done_match_emit_calcBlockSizeSmall:
+match_nolit_loop_calcBlockSizeSmall:
+ MOVL CX, SI
+ SUBL BX, SI
+ MOVL SI, 16(SP)
+ ADDL $0x04, CX
+ ADDL $0x04, BX
+ MOVQ src_len+8(FP), SI
+ SUBL CX, SI
+ LEAQ (DX)(CX*1), DI
+ LEAQ (DX)(BX*1), BX
+
+ // matchLen
+ XORL R9, R9
+ CMPL SI, $0x08
+ JL matchlen_match4_match_nolit_calcBlockSizeSmall
+
+matchlen_loopback_match_nolit_calcBlockSizeSmall:
+ MOVQ (DI)(R9*1), R8
+ XORQ (BX)(R9*1), R8
+ TESTQ R8, R8
+ JZ matchlen_loop_match_nolit_calcBlockSizeSmall
+
+#ifdef GOAMD64_v3
+ TZCNTQ R8, R8
+
+#else
+ BSFQ R8, R8
+
+#endif
+ SARQ $0x03, R8
+ LEAL (R9)(R8*1), R9
+ JMP match_nolit_end_calcBlockSizeSmall
+
+matchlen_loop_match_nolit_calcBlockSizeSmall:
+ LEAL -8(SI), SI
+ LEAL 8(R9), R9
+ CMPL SI, $0x08
+ JGE matchlen_loopback_match_nolit_calcBlockSizeSmall
+ JZ match_nolit_end_calcBlockSizeSmall
+
+matchlen_match4_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x04
+ JL matchlen_match2_match_nolit_calcBlockSizeSmall
+ MOVL (DI)(R9*1), R8
+ CMPL (BX)(R9*1), R8
+ JNE matchlen_match2_match_nolit_calcBlockSizeSmall
+ SUBL $0x04, SI
+ LEAL 4(R9), R9
+
+matchlen_match2_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x02
+ JL matchlen_match1_match_nolit_calcBlockSizeSmall
+ MOVW (DI)(R9*1), R8
+ CMPW (BX)(R9*1), R8
+ JNE matchlen_match1_match_nolit_calcBlockSizeSmall
+ SUBL $0x02, SI
+ LEAL 2(R9), R9
+
+matchlen_match1_match_nolit_calcBlockSizeSmall:
+ CMPL SI, $0x01
+ JL match_nolit_end_calcBlockSizeSmall
+ MOVB (DI)(R9*1), R8
+ CMPB (BX)(R9*1), R8
+ JNE match_nolit_end_calcBlockSizeSmall
+ LEAL 1(R9), R9
+
+match_nolit_end_calcBlockSizeSmall:
+ ADDL R9, CX
+ MOVL 16(SP), BX
+ ADDL $0x04, R9
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_calcBlockSizeSmall:
+ CMPL R9, $0x40
+ JLE two_byte_offset_short_match_nolit_calcBlockSizeSmall
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ JMP two_byte_offset_match_nolit_calcBlockSizeSmall
+
+two_byte_offset_short_match_nolit_calcBlockSizeSmall:
+ MOVL R9, BX
+ SHLL $0x02, BX
+ CMPL R9, $0x0c
+ JGE emit_copy_three_match_nolit_calcBlockSizeSmall
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_calcBlockSizeSmall
+
+emit_copy_three_match_nolit_calcBlockSizeSmall:
+ ADDQ $0x03, AX
+
+match_nolit_emitcopy_end_calcBlockSizeSmall:
+ CMPL CX, 8(SP)
+ JGE emit_remainder_calcBlockSizeSmall
+ MOVQ -2(DX)(CX*1), SI
+ CMPQ AX, (SP)
+ JL match_nolit_dst_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+match_nolit_dst_ok_calcBlockSizeSmall:
+ MOVQ $0x9e3779b1, R8
+ MOVQ SI, DI
+ SHRQ $0x10, SI
+ MOVQ SI, BX
+ SHLQ $0x20, DI
+ IMULQ R8, DI
+ SHRQ $0x37, DI
+ SHLQ $0x20, BX
+ IMULQ R8, BX
+ SHRQ $0x37, BX
+ LEAL -2(CX), R8
+ LEAQ 24(SP)(BX*4), R9
+ MOVL (R9), BX
+ MOVL R8, 24(SP)(DI*4)
+ MOVL CX, (R9)
+ CMPL (DX)(BX*1), SI
+ JEQ match_nolit_loop_calcBlockSizeSmall
+ INCL CX
+ JMP search_loop_calcBlockSizeSmall
+
+emit_remainder_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ SUBL 12(SP), CX
+ LEAQ 3(AX)(CX*1), CX
+ CMPQ CX, (SP)
+ JL emit_remainder_ok_calcBlockSizeSmall
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+emit_remainder_ok_calcBlockSizeSmall:
+ MOVQ src_len+8(FP), CX
+ MOVL 12(SP), BX
+ CMPL BX, CX
+ JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
+ MOVL CX, SI
+ MOVL CX, 12(SP)
+ LEAQ (DX)(BX*1), CX
+ SUBL BX, SI
+ LEAL -1(SI), CX
+ CMPL CX, $0x3c
+ JLT one_byte_emit_remainder_calcBlockSizeSmall
+ CMPL CX, $0x00000100
+ JLT two_bytes_emit_remainder_calcBlockSizeSmall
+ ADDQ $0x03, AX
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+two_bytes_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x02, AX
+ CMPL CX, $0x40
+ JL memmove_emit_remainder_calcBlockSizeSmall
+ JMP memmove_long_emit_remainder_calcBlockSizeSmall
+
+one_byte_emit_remainder_calcBlockSizeSmall:
+ ADDQ $0x01, AX
+
+memmove_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+ JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
+
+memmove_long_emit_remainder_calcBlockSizeSmall:
+ LEAQ (AX)(SI*1), AX
+
+emit_literal_done_emit_remainder_calcBlockSizeSmall:
+ MOVQ AX, ret+24(FP)
+ RET
+
+// func emitLiteral(dst []byte, lit []byte) int
+// Requires: SSE2
+TEXT ·emitLiteral(SB), NOSPLIT, $0-56
+ MOVQ lit_len+32(FP), DX
+ MOVQ dst_base+0(FP), AX
+ MOVQ lit_base+24(FP), CX
+ TESTQ DX, DX
+ JZ emit_literal_end_standalone_skip
+ MOVL DX, BX
+ LEAL -1(DX), SI
+ CMPL SI, $0x3c
+ JLT one_byte_standalone
+ CMPL SI, $0x00000100
+ JLT two_bytes_standalone
+ CMPL SI, $0x00010000
+ JLT three_bytes_standalone
+ CMPL SI, $0x01000000
+ JLT four_bytes_standalone
+ MOVB $0xfc, (AX)
+ MOVL SI, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP memmove_long_standalone
+
+four_bytes_standalone:
+ MOVL SI, DI
+ SHRL $0x10, DI
+ MOVB $0xf8, (AX)
+ MOVW SI, 1(AX)
+ MOVB DI, 3(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP memmove_long_standalone
+
+three_bytes_standalone:
+ MOVB $0xf4, (AX)
+ MOVW SI, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP memmove_long_standalone
+
+two_bytes_standalone:
+ MOVB $0xf0, (AX)
+ MOVB SI, 1(AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ CMPL SI, $0x40
+ JL memmove_standalone
+ JMP memmove_long_standalone
+
+one_byte_standalone:
+ SHLB $0x02, SI
+ MOVB SI, (AX)
+ ADDQ $0x01, BX
+ ADDQ $0x01, AX
+
+memmove_standalone:
+ // genMemMoveShort
+ CMPQ DX, $0x03
+ JB emit_lit_memmove_standalone_memmove_move_1or2
+ JE emit_lit_memmove_standalone_memmove_move_3
+ CMPQ DX, $0x08
+ JB emit_lit_memmove_standalone_memmove_move_4through7
+ CMPQ DX, $0x10
+ JBE emit_lit_memmove_standalone_memmove_move_8through16
+ CMPQ DX, $0x20
+ JBE emit_lit_memmove_standalone_memmove_move_17through32
+ JMP emit_lit_memmove_standalone_memmove_move_33through64
+
+emit_lit_memmove_standalone_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(DX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(DX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_8through16:
+ MOVQ (CX), SI
+ MOVQ -8(CX)(DX*1), CX
+ MOVQ SI, (AX)
+ MOVQ CX, -8(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_17through32:
+ MOVOU (CX), X0
+ MOVOU -16(CX)(DX*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+
+emit_lit_memmove_standalone_memmove_move_33through64:
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+memmove_long_standalone:
+ // genMemMoveLong
+ MOVOU (CX), X0
+ MOVOU 16(CX), X1
+ MOVOU -32(CX)(DX*1), X2
+ MOVOU -16(CX)(DX*1), X3
+ MOVQ DX, DI
+ SHRQ $0x05, DI
+ MOVQ AX, SI
+ ANDL $0x0000001f, SI
+ MOVQ $0x00000040, R8
+ SUBQ SI, R8
+ DECQ DI
+ JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ LEAQ -32(CX)(R8*1), SI
+ LEAQ -32(AX)(R8*1), R9
+
+emit_lit_memmove_long_standalonelarge_big_loop_back:
+ MOVOU (SI), X4
+ MOVOU 16(SI), X5
+ MOVOA X4, (R9)
+ MOVOA X5, 16(R9)
+ ADDQ $0x20, R9
+ ADDQ $0x20, SI
+ ADDQ $0x20, R8
+ DECQ DI
+ JNA emit_lit_memmove_long_standalonelarge_big_loop_back
+
+emit_lit_memmove_long_standalonelarge_forward_sse_loop_32:
+ MOVOU -32(CX)(R8*1), X4
+ MOVOU -16(CX)(R8*1), X5
+ MOVOA X4, -32(AX)(R8*1)
+ MOVOA X5, -16(AX)(R8*1)
+ ADDQ $0x20, R8
+ CMPQ DX, R8
+ JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(DX*1)
+ MOVOU X3, -16(AX)(DX*1)
+ JMP emit_literal_end_standalone
+ JMP emit_literal_end_standalone
+
+emit_literal_end_standalone_skip:
+ XORQ BX, BX
+
+emit_literal_end_standalone:
+ MOVQ BX, ret+48(FP)
+ RET
+
+// func emitRepeat(dst []byte, offset int, length int) int
+TEXT ·emitRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitRepeat
+emit_repeat_again_standalone:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JLE repeat_two_standalone
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_standalone
+ CMPL CX, $0x00000800
+ JLT repeat_two_offset_standalone
+
+cant_repeat_two_offset_standalone:
+ CMPL DX, $0x00000104
+ JLT repeat_three_standalone
+ CMPL DX, $0x00010100
+ JLT repeat_four_standalone
+ CMPL DX, $0x0100ffff
+ JLT repeat_five_standalone
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone
+
+repeat_five_standalone:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_repeat_end
+
+repeat_four_standalone:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_repeat_end
+
+repeat_three_standalone:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_standalone:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_repeat_end
+
+repeat_two_offset_standalone:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+
+gen_emit_repeat_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopy(dst []byte, offset int, length int) int
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JL two_byte_offset_standalone
+ CMPL DX, $0x40
+ JLE four_bytes_remain_standalone
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JL four_bytes_remain_standalone
+
+ // emitRepeat
+emit_repeat_again_standalone_emit_copy:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JLE repeat_two_standalone_emit_copy
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_standalone_emit_copy
+ CMPL CX, $0x00000800
+ JLT repeat_two_offset_standalone_emit_copy
+
+cant_repeat_two_offset_standalone_emit_copy:
+ CMPL DX, $0x00000104
+ JLT repeat_three_standalone_emit_copy
+ CMPL DX, $0x00010100
+ JLT repeat_four_standalone_emit_copy
+ CMPL DX, $0x0100ffff
+ JLT repeat_five_standalone_emit_copy
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy
+
+repeat_five_standalone_emit_copy:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+four_bytes_remain_standalone:
+ TESTL DX, DX
+ JZ gen_emit_copy_end
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_standalone:
+ CMPL DX, $0x40
+ JLE two_byte_offset_short_standalone
+ CMPL CX, $0x00000800
+ JAE long_offset_short_standalone
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB CL, 1(AX)
+ MOVL CX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ SUBL $0x08, DX
+
+ // emitRepeat
+ LEAL -4(DX), DX
+ JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
+
+emit_repeat_again_standalone_emit_copy_short_2b:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JLE repeat_two_standalone_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_standalone_emit_copy_short_2b
+ CMPL CX, $0x00000800
+ JLT repeat_two_offset_standalone_emit_copy_short_2b
+
+cant_repeat_two_offset_standalone_emit_copy_short_2b:
+ CMPL DX, $0x00000104
+ JLT repeat_three_standalone_emit_copy_short_2b
+ CMPL DX, $0x00010100
+ JLT repeat_four_standalone_emit_copy_short_2b
+ CMPL DX, $0x0100ffff
+ JLT repeat_five_standalone_emit_copy_short_2b
+ LEAL -16842747(DX), DX
+ MOVL $0xfffb001d, (AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short_2b
+
+repeat_five_standalone_emit_copy_short_2b:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short_2b:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short_2b:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short_2b:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
-emit_literal_end_standalone:
- MOVQ BX, ret+48(FP)
- RET
+repeat_two_offset_standalone_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
-// func emitRepeat(dst []byte, offset int, length int) int
-TEXT ·emitRepeat(SB), NOSPLIT, $0-48
- XORQ BX, BX
- MOVQ dst_base+0(FP), AX
- MOVQ offset+24(FP), CX
- MOVQ length+32(FP), DX
+long_offset_short_standalone:
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
// emitRepeat
-emit_repeat_again_standalone:
+emit_repeat_again_standalone_emit_copy_short:
MOVL DX, SI
LEAL -4(DX), DX
CMPL SI, $0x08
- JLE repeat_two_standalone
+ JLE repeat_two_standalone_emit_copy_short
CMPL SI, $0x0c
- JGE cant_repeat_two_offset_standalone
+ JGE cant_repeat_two_offset_standalone_emit_copy_short
CMPL CX, $0x00000800
- JLT repeat_two_offset_standalone
+ JLT repeat_two_offset_standalone_emit_copy_short
-cant_repeat_two_offset_standalone:
+cant_repeat_two_offset_standalone_emit_copy_short:
CMPL DX, $0x00000104
- JLT repeat_three_standalone
+ JLT repeat_three_standalone_emit_copy_short
CMPL DX, $0x00010100
- JLT repeat_four_standalone
+ JLT repeat_four_standalone_emit_copy_short
CMPL DX, $0x0100ffff
- JLT repeat_five_standalone
+ JLT repeat_five_standalone_emit_copy_short
LEAL -16842747(DX), DX
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
ADDQ $0x05, BX
- JMP emit_repeat_again_standalone
+ JMP emit_repeat_again_standalone_emit_copy_short
-repeat_five_standalone:
+repeat_five_standalone_emit_copy_short:
LEAL -65536(DX), DX
MOVL DX, CX
MOVW $0x001d, (AX)
@@ -17359,33 +18584,33 @@ repeat_five_standalone:
MOVB CL, 4(AX)
ADDQ $0x05, BX
ADDQ $0x05, AX
- JMP gen_emit_repeat_end
+ JMP gen_emit_copy_end
-repeat_four_standalone:
+repeat_four_standalone_emit_copy_short:
LEAL -256(DX), DX
MOVW $0x0019, (AX)
MOVW DX, 2(AX)
ADDQ $0x04, BX
ADDQ $0x04, AX
- JMP gen_emit_repeat_end
+ JMP gen_emit_copy_end
-repeat_three_standalone:
+repeat_three_standalone_emit_copy_short:
LEAL -4(DX), DX
MOVW $0x0015, (AX)
MOVB DL, 2(AX)
ADDQ $0x03, BX
ADDQ $0x03, AX
- JMP gen_emit_repeat_end
+ JMP gen_emit_copy_end
-repeat_two_standalone:
+repeat_two_standalone_emit_copy_short:
SHLL $0x02, DX
ORL $0x01, DX
MOVW DX, (AX)
ADDQ $0x02, BX
ADDQ $0x02, AX
- JMP gen_emit_repeat_end
+ JMP gen_emit_copy_end
-repeat_two_offset_standalone:
+repeat_two_offset_standalone_emit_copy_short:
XORQ SI, SI
LEAL 1(SI)(DX*4), DX
MOVB CL, 1(AX)
@@ -17395,454 +18620,919 @@ repeat_two_offset_standalone:
MOVB DL, (AX)
ADDQ $0x02, BX
ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+two_byte_offset_short_standalone:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JGE emit_copy_three_standalone
+ CMPL CX, $0x00000800
+ JGE emit_copy_three_standalone
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+emit_copy_three_standalone:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func emitCopyNoRepeat(dst []byte, offset int, length int) int
+TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48
+ XORQ BX, BX
+ MOVQ dst_base+0(FP), AX
+ MOVQ offset+24(FP), CX
+ MOVQ length+32(FP), DX
+
+ // emitCopy
+ CMPL CX, $0x00010000
+ JL two_byte_offset_standalone_snappy
+
+four_bytes_loop_back_standalone_snappy:
+ CMPL DX, $0x40
+ JLE four_bytes_remain_standalone_snappy
+ MOVB $0xff, (AX)
+ MOVL CX, 1(AX)
+ LEAL -64(DX), DX
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ CMPL DX, $0x04
+ JL four_bytes_remain_standalone_snappy
+ JMP four_bytes_loop_back_standalone_snappy
+
+four_bytes_remain_standalone_snappy:
+ TESTL DX, DX
+ JZ gen_emit_copy_end_snappy
+ XORL SI, SI
+ LEAL -1(SI)(DX*4), DX
+ MOVB DL, (AX)
+ MOVL CX, 1(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end_snappy
+
+two_byte_offset_standalone_snappy:
+ CMPL DX, $0x40
+ JLE two_byte_offset_short_standalone_snappy
+ MOVB $0xee, (AX)
+ MOVW CX, 1(AX)
+ LEAL -60(DX), DX
+ ADDQ $0x03, AX
+ ADDQ $0x03, BX
+ JMP two_byte_offset_standalone_snappy
+
+two_byte_offset_short_standalone_snappy:
+ MOVL DX, SI
+ SHLL $0x02, SI
+ CMPL DX, $0x0c
+ JGE emit_copy_three_standalone_snappy
+ CMPL CX, $0x00000800
+ JGE emit_copy_three_standalone_snappy
+ LEAL -15(SI), SI
+ MOVB CL, 1(AX)
+ SHRL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end_snappy
+
+emit_copy_three_standalone_snappy:
+ LEAL -2(SI), SI
+ MOVB SI, (AX)
+ MOVW CX, 1(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+
+gen_emit_copy_end_snappy:
+ MOVQ BX, ret+40(FP)
+ RET
+
+// func matchLen(a []byte, b []byte) int
+// Requires: BMI
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+ CMPL DX, $0x08
+ JL matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ TESTQ BX, BX
+ JZ matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+
+#else
+ BSFQ BX, BX
+
+#endif
+ SARQ $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_loop_standalone:
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ CMPL DX, $0x08
+ JGE matchlen_loopback_standalone
+ JZ gen_match_len_end
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JL matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ SUBL $0x04, DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JL matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ SUBL $0x02, DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JL gen_match_len_end
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ LEAL 1(SI), SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
+
+// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+ XORQ DI, DI
+
+lz4_s2_loop:
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ AX, CX
+ JAE lz4_s2_dstfull
+ MOVBQZX (DX), R8
+ MOVQ R8, R9
+ MOVQ R8, R10
+ SHRQ $0x04, R9
+ ANDQ $0x0f, R10
+ CMPQ R8, $0xf0
+ JB lz4_s2_ll_end
+
+lz4_s2_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ MOVBQZX (DX), R8
+ ADDQ R8, R9
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ll_loop
+
+lz4_s2_ll_end:
+ LEAQ (DX)(R9*1), R8
+ ADDQ $0x04, R10
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ INCQ DX
+ INCQ R8
+ TESTQ R9, R9
+ JZ lz4_s2_lits_done
+ LEAQ (AX)(R9*1), R11
+ CMPQ R11, CX
+ JAE lz4_s2_dstfull
+ ADDQ R9, SI
+ LEAL -1(R9), R11
+ CMPL R11, $0x3c
+ JLT one_byte_lz4_s2
+ CMPL R11, $0x00000100
+ JLT two_bytes_lz4_s2
+ CMPL R11, $0x00010000
+ JLT three_bytes_lz4_s2
+ CMPL R11, $0x01000000
+ JLT four_bytes_lz4_s2
+ MOVB $0xfc, (AX)
+ MOVL R11, 1(AX)
+ ADDQ $0x05, AX
+ JMP memmove_long_lz4_s2
+
+four_bytes_lz4_s2:
+ MOVL R11, R12
+ SHRL $0x10, R12
+ MOVB $0xf8, (AX)
+ MOVW R11, 1(AX)
+ MOVB R12, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_s2
+
+three_bytes_lz4_s2:
+ MOVB $0xf4, (AX)
+ MOVW R11, 1(AX)
+ ADDQ $0x03, AX
+ JMP memmove_long_lz4_s2
+
+two_bytes_lz4_s2:
+ MOVB $0xf0, (AX)
+ MOVB R11, 1(AX)
+ ADDQ $0x02, AX
+ CMPL R11, $0x40
+ JL memmove_lz4_s2
+ JMP memmove_long_lz4_s2
+
+one_byte_lz4_s2:
+ SHLB $0x02, R11
+ MOVB R11, (AX)
+ ADDQ $0x01, AX
+
+memmove_lz4_s2:
+ LEAQ (AX)(R9*1), R11
+
+ // genMemMoveShort
+ CMPQ R9, $0x08
+ JLE emit_lit_memmove_lz4_s2_memmove_move_8
+ CMPQ R9, $0x10
+ JBE emit_lit_memmove_lz4_s2_memmove_move_8through16
+ CMPQ R9, $0x20
+ JBE emit_lit_memmove_lz4_s2_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_s2_memmove_move_33through64
+
+emit_lit_memmove_lz4_s2_memmove_move_8:
+ MOVQ (DX), R12
+ MOVQ R12, (AX)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_8through16:
+ MOVQ (DX), R12
+ MOVQ -8(DX)(R9*1), DX
+ MOVQ R12, (AX)
+ MOVQ DX, -8(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R9*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R9*1)
+ JMP memmove_end_copy_lz4_s2
+
+emit_lit_memmove_lz4_s2_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
-gen_emit_repeat_end:
- MOVQ BX, ret+40(FP)
- RET
+memmove_end_copy_lz4_s2:
+ MOVQ R11, AX
+ JMP lz4_s2_lits_emit_done
-// func emitCopy(dst []byte, offset int, length int) int
-TEXT ·emitCopy(SB), NOSPLIT, $0-48
- XORQ BX, BX
- MOVQ dst_base+0(FP), AX
- MOVQ offset+24(FP), CX
- MOVQ length+32(FP), DX
+memmove_long_lz4_s2:
+ LEAQ (AX)(R9*1), R11
- // emitCopy
- CMPL CX, $0x00010000
- JL two_byte_offset_standalone
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R9*1), X2
+ MOVOU -16(DX)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ AX, R12
+ ANDL $0x0000001f, R12
+ MOVQ $0x00000040, R14
+ SUBQ R12, R14
+ DECQ R13
+ JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ LEAQ -32(DX)(R14*1), R12
+ LEAQ -32(AX)(R14*1), R15
-four_bytes_loop_back_standalone:
- CMPL DX, $0x40
- JLE four_bytes_remain_standalone
- MOVB $0xff, (AX)
- MOVL CX, 1(AX)
- LEAL -64(DX), DX
- ADDQ $0x05, BX
- ADDQ $0x05, AX
- CMPL DX, $0x04
- JL four_bytes_remain_standalone
+emit_lit_memmove_long_lz4_s2large_big_loop_back:
+ MOVOU (R12), X4
+ MOVOU 16(R12), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R12
+ ADDQ $0x20, R14
+ DECQ R13
+ JNA emit_lit_memmove_long_lz4_s2large_big_loop_back
+
+emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32:
+ MOVOU -32(DX)(R14*1), X4
+ MOVOU -16(DX)(R14*1), X5
+ MOVOA X4, -32(AX)(R14*1)
+ MOVOA X5, -16(AX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
+ JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R9*1)
+ MOVOU X3, -16(AX)(R9*1)
+ MOVQ R11, AX
+
+lz4_s2_lits_emit_done:
+ MOVQ R8, DX
+
+lz4_s2_lits_done:
+ CMPQ DX, BX
+ JNE lz4_s2_match
+ CMPQ R10, $0x04
+ JEQ lz4_s2_done
+ JMP lz4_s2_corrupt
+
+lz4_s2_match:
+ LEAQ 2(DX), R8
+ CMPQ R8, BX
+ JAE lz4_s2_corrupt
+ MOVWQZX (DX), R9
+ MOVQ R8, DX
+ TESTQ R9, R9
+ JZ lz4_s2_corrupt
+ CMPQ R9, SI
+ JA lz4_s2_corrupt
+ CMPQ R10, $0x13
+ JNE lz4_s2_ml_done
+
+lz4_s2_ml_loop:
+ MOVBQZX (DX), R8
+ INCQ DX
+ ADDQ R8, R10
+ CMPQ DX, BX
+ JAE lz4_s2_corrupt
+ CMPQ R8, $0xff
+ JEQ lz4_s2_ml_loop
+
+lz4_s2_ml_done:
+ ADDQ R10, SI
+ CMPQ R9, DI
+ JNE lz4_s2_docopy
// emitRepeat
-emit_repeat_again_standalone_emit_copy:
- MOVL DX, SI
- LEAL -4(DX), DX
- CMPL SI, $0x08
- JLE repeat_two_standalone_emit_copy
- CMPL SI, $0x0c
- JGE cant_repeat_two_offset_standalone_emit_copy
- CMPL CX, $0x00000800
- JLT repeat_two_offset_standalone_emit_copy
+emit_repeat_again_lz4_s2:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JLE repeat_two_lz4_s2
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_lz4_s2
+ CMPL R9, $0x00000800
+ JLT repeat_two_offset_lz4_s2
-cant_repeat_two_offset_standalone_emit_copy:
- CMPL DX, $0x00000104
- JLT repeat_three_standalone_emit_copy
- CMPL DX, $0x00010100
- JLT repeat_four_standalone_emit_copy
- CMPL DX, $0x0100ffff
- JLT repeat_five_standalone_emit_copy
- LEAL -16842747(DX), DX
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+cant_repeat_two_offset_lz4_s2:
+ CMPL R10, $0x00000104
+ JLT repeat_three_lz4_s2
+ CMPL R10, $0x00010100
+ JLT repeat_four_lz4_s2
+ CMPL R10, $0x0100ffff
+ JLT repeat_five_lz4_s2
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
- ADDQ $0x05, BX
- JMP emit_repeat_again_standalone_emit_copy
+ JMP emit_repeat_again_lz4_s2
-repeat_five_standalone_emit_copy:
- LEAL -65536(DX), DX
- MOVL DX, CX
+repeat_five_lz4_s2:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
MOVW $0x001d, (AX)
- MOVW DX, 2(AX)
- SARL $0x10, CX
- MOVB CL, 4(AX)
- ADDQ $0x05, BX
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
ADDQ $0x05, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_four_standalone_emit_copy:
- LEAL -256(DX), DX
+repeat_four_lz4_s2:
+ LEAL -256(R10), R10
MOVW $0x0019, (AX)
- MOVW DX, 2(AX)
- ADDQ $0x04, BX
+ MOVW R10, 2(AX)
ADDQ $0x04, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_three_standalone_emit_copy:
- LEAL -4(DX), DX
+repeat_three_lz4_s2:
+ LEAL -4(R10), R10
MOVW $0x0015, (AX)
- MOVB DL, 2(AX)
- ADDQ $0x03, BX
+ MOVB R10, 2(AX)
ADDQ $0x03, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_standalone_emit_copy:
- SHLL $0x02, DX
- ORL $0x01, DX
- MOVW DX, (AX)
- ADDQ $0x02, BX
+repeat_two_lz4_s2:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_offset_standalone_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(DX*4), DX
- MOVB CL, 1(AX)
- SARL $0x08, CX
- SHLL $0x05, CX
- ORL CX, DX
- MOVB DL, (AX)
- ADDQ $0x02, BX
+repeat_two_offset_lz4_s2:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
- JMP four_bytes_loop_back_standalone
+ JMP lz4_s2_loop
-four_bytes_remain_standalone:
- TESTL DX, DX
- JZ gen_emit_copy_end
- MOVB $0x03, SI
- LEAL -4(SI)(DX*4), DX
- MOVB DL, (AX)
- MOVL CX, 1(AX)
- ADDQ $0x05, BX
- ADDQ $0x05, AX
- JMP gen_emit_copy_end
+lz4_s2_docopy:
+ MOVQ R9, DI
-two_byte_offset_standalone:
- CMPL DX, $0x40
- JLE two_byte_offset_short_standalone
- CMPL CX, $0x00000800
- JAE long_offset_short_standalone
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB CL, 1(AX)
- MOVL CX, DI
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
- ADDQ $0x02, BX
+ // emitCopy
+ CMPL R10, $0x40
+ JLE two_byte_offset_short_lz4_s2
+ CMPL R9, $0x00000800
+ JAE long_offset_short_lz4_s2
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB R9, 1(AX)
+ MOVL R9, R11
+ SHRL $0x08, R11
+ SHLL $0x05, R11
+ ORL R11, R8
+ MOVB R8, (AX)
ADDQ $0x02, AX
- SUBL $0x08, DX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(DX), DX
- JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
-emit_repeat_again_standalone_emit_copy_short_2b:
- MOVL DX, SI
- LEAL -4(DX), DX
- CMPL SI, $0x08
- JLE repeat_two_standalone_emit_copy_short_2b
- CMPL SI, $0x0c
- JGE cant_repeat_two_offset_standalone_emit_copy_short_2b
- CMPL CX, $0x00000800
- JLT repeat_two_offset_standalone_emit_copy_short_2b
+emit_repeat_again_lz4_s2_emit_copy_short_2b:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JLE repeat_two_lz4_s2_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
+ CMPL R9, $0x00000800
+ JLT repeat_two_offset_lz4_s2_emit_copy_short_2b
-cant_repeat_two_offset_standalone_emit_copy_short_2b:
- CMPL DX, $0x00000104
- JLT repeat_three_standalone_emit_copy_short_2b
- CMPL DX, $0x00010100
- JLT repeat_four_standalone_emit_copy_short_2b
- CMPL DX, $0x0100ffff
- JLT repeat_five_standalone_emit_copy_short_2b
- LEAL -16842747(DX), DX
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JLT repeat_four_lz4_s2_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JLT repeat_five_lz4_s2_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
- ADDQ $0x05, BX
- JMP emit_repeat_again_standalone_emit_copy_short_2b
+ JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
-repeat_five_standalone_emit_copy_short_2b:
- LEAL -65536(DX), DX
- MOVL DX, CX
+repeat_five_lz4_s2_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
MOVW $0x001d, (AX)
- MOVW DX, 2(AX)
- SARL $0x10, CX
- MOVB CL, 4(AX)
- ADDQ $0x05, BX
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
ADDQ $0x05, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_four_standalone_emit_copy_short_2b:
- LEAL -256(DX), DX
+repeat_four_lz4_s2_emit_copy_short_2b:
+ LEAL -256(R10), R10
MOVW $0x0019, (AX)
- MOVW DX, 2(AX)
- ADDQ $0x04, BX
+ MOVW R10, 2(AX)
ADDQ $0x04, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_three_standalone_emit_copy_short_2b:
- LEAL -4(DX), DX
+repeat_three_lz4_s2_emit_copy_short_2b:
+ LEAL -4(R10), R10
MOVW $0x0015, (AX)
- MOVB DL, 2(AX)
- ADDQ $0x03, BX
+ MOVB R10, 2(AX)
ADDQ $0x03, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_standalone_emit_copy_short_2b:
- SHLL $0x02, DX
- ORL $0x01, DX
- MOVW DX, (AX)
- ADDQ $0x02, BX
+repeat_two_lz4_s2_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_offset_standalone_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(DX*4), DX
- MOVB CL, 1(AX)
- SARL $0x08, CX
- SHLL $0x05, CX
- ORL CX, DX
- MOVB DL, (AX)
- ADDQ $0x02, BX
+repeat_two_offset_lz4_s2_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-long_offset_short_standalone:
+long_offset_short_lz4_s2:
MOVB $0xee, (AX)
- MOVW CX, 1(AX)
- LEAL -60(DX), DX
+ MOVW R9, 1(AX)
+ LEAL -60(R10), R10
ADDQ $0x03, AX
- ADDQ $0x03, BX
// emitRepeat
-emit_repeat_again_standalone_emit_copy_short:
- MOVL DX, SI
- LEAL -4(DX), DX
- CMPL SI, $0x08
- JLE repeat_two_standalone_emit_copy_short
- CMPL SI, $0x0c
- JGE cant_repeat_two_offset_standalone_emit_copy_short
- CMPL CX, $0x00000800
- JLT repeat_two_offset_standalone_emit_copy_short
+emit_repeat_again_lz4_s2_emit_copy_short:
+ MOVL R10, R8
+ LEAL -4(R10), R10
+ CMPL R8, $0x08
+ JLE repeat_two_lz4_s2_emit_copy_short
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_lz4_s2_emit_copy_short
+ CMPL R9, $0x00000800
+ JLT repeat_two_offset_lz4_s2_emit_copy_short
-cant_repeat_two_offset_standalone_emit_copy_short:
- CMPL DX, $0x00000104
- JLT repeat_three_standalone_emit_copy_short
- CMPL DX, $0x00010100
- JLT repeat_four_standalone_emit_copy_short
- CMPL DX, $0x0100ffff
- JLT repeat_five_standalone_emit_copy_short
- LEAL -16842747(DX), DX
- MOVW $0x001d, (AX)
- MOVW $0xfffb, 2(AX)
+cant_repeat_two_offset_lz4_s2_emit_copy_short:
+ CMPL R10, $0x00000104
+ JLT repeat_three_lz4_s2_emit_copy_short
+ CMPL R10, $0x00010100
+ JLT repeat_four_lz4_s2_emit_copy_short
+ CMPL R10, $0x0100ffff
+ JLT repeat_five_lz4_s2_emit_copy_short
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
- ADDQ $0x05, BX
- JMP emit_repeat_again_standalone_emit_copy_short
+ JMP emit_repeat_again_lz4_s2_emit_copy_short
-repeat_five_standalone_emit_copy_short:
- LEAL -65536(DX), DX
- MOVL DX, CX
+repeat_five_lz4_s2_emit_copy_short:
+ LEAL -65536(R10), R10
+ MOVL R10, R9
MOVW $0x001d, (AX)
- MOVW DX, 2(AX)
- SARL $0x10, CX
- MOVB CL, 4(AX)
- ADDQ $0x05, BX
+ MOVW R10, 2(AX)
+ SARL $0x10, R9
+ MOVB R9, 4(AX)
ADDQ $0x05, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_four_standalone_emit_copy_short:
- LEAL -256(DX), DX
+repeat_four_lz4_s2_emit_copy_short:
+ LEAL -256(R10), R10
MOVW $0x0019, (AX)
- MOVW DX, 2(AX)
- ADDQ $0x04, BX
+ MOVW R10, 2(AX)
ADDQ $0x04, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_three_standalone_emit_copy_short:
- LEAL -4(DX), DX
+repeat_three_lz4_s2_emit_copy_short:
+ LEAL -4(R10), R10
MOVW $0x0015, (AX)
- MOVB DL, 2(AX)
- ADDQ $0x03, BX
+ MOVB R10, 2(AX)
ADDQ $0x03, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_standalone_emit_copy_short:
- SHLL $0x02, DX
- ORL $0x01, DX
- MOVW DX, (AX)
- ADDQ $0x02, BX
+repeat_two_lz4_s2_emit_copy_short:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-repeat_two_offset_standalone_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(DX*4), DX
- MOVB CL, 1(AX)
- SARL $0x08, CX
- SHLL $0x05, CX
- ORL CX, DX
- MOVB DL, (AX)
- ADDQ $0x02, BX
+repeat_two_offset_lz4_s2_emit_copy_short:
+ XORQ R8, R8
+ LEAL 1(R8)(R10*4), R10
+ MOVB R9, 1(AX)
+ SARL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R10
+ MOVB R10, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
- JMP two_byte_offset_standalone
+ JMP lz4_s2_loop
-two_byte_offset_short_standalone:
- CMPL DX, $0x0c
- JGE emit_copy_three_standalone
- CMPL CX, $0x00000800
- JGE emit_copy_three_standalone
- MOVB $0x01, SI
- LEAL -16(SI)(DX*4), DX
- MOVB CL, 1(AX)
- SHRL $0x08, CX
- SHLL $0x05, CX
- ORL CX, DX
- MOVB DL, (AX)
- ADDQ $0x02, BX
+two_byte_offset_short_lz4_s2:
+ MOVL R10, R8
+ SHLL $0x02, R8
+ CMPL R10, $0x0c
+ JGE emit_copy_three_lz4_s2
+ CMPL R9, $0x00000800
+ JGE emit_copy_three_lz4_s2
+ LEAL -15(R8), R8
+ MOVB R9, 1(AX)
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end
+ JMP lz4_s2_loop
-emit_copy_three_standalone:
- MOVB $0x02, SI
- LEAL -4(SI)(DX*4), DX
- MOVB DL, (AX)
- MOVW CX, 1(AX)
- ADDQ $0x03, BX
+emit_copy_three_lz4_s2:
+ LEAL -2(R8), R8
+ MOVB R8, (AX)
+ MOVW R9, 1(AX)
ADDQ $0x03, AX
+ JMP lz4_s2_loop
-gen_emit_copy_end:
- MOVQ BX, ret+40(FP)
+lz4_s2_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
RET
-// func emitCopyNoRepeat(dst []byte, offset int, length int) int
-TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48
- XORQ BX, BX
- MOVQ dst_base+0(FP), AX
- MOVQ offset+24(FP), CX
- MOVQ length+32(FP), DX
-
- // emitCopy
- CMPL CX, $0x00010000
- JL two_byte_offset_standalone_snappy
+lz4_s2_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
-four_bytes_loop_back_standalone_snappy:
- CMPL DX, $0x40
- JLE four_bytes_remain_standalone_snappy
- MOVB $0xff, (AX)
- MOVL CX, 1(AX)
- LEAL -64(DX), DX
- ADDQ $0x05, BX
- ADDQ $0x05, AX
- CMPL DX, $0x04
- JL four_bytes_remain_standalone_snappy
- JMP four_bytes_loop_back_standalone_snappy
+lz4_s2_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
-four_bytes_remain_standalone_snappy:
- TESTL DX, DX
- JZ gen_emit_copy_end_snappy
- MOVB $0x03, SI
- LEAL -4(SI)(DX*4), DX
- MOVB DL, (AX)
- MOVL CX, 1(AX)
- ADDQ $0x05, BX
+// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
+// Requires: SSE2
+TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
+ XORQ SI, SI
+ MOVQ dst_base+0(FP), AX
+ MOVQ dst_len+8(FP), CX
+ MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), BX
+ LEAQ (DX)(BX*1), BX
+ LEAQ -10(AX)(CX*1), CX
+
+lz4_snappy_loop:
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ AX, CX
+ JAE lz4_snappy_dstfull
+ MOVBQZX (DX), DI
+ MOVQ DI, R8
+ MOVQ DI, R9
+ SHRQ $0x04, R8
+ ANDQ $0x0f, R9
+ CMPQ DI, $0xf0
+ JB lz4_snappy_ll_end
+
+lz4_snappy_ll_loop:
+ INCQ DX
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ MOVBQZX (DX), DI
+ ADDQ DI, R8
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ll_loop
+
+lz4_snappy_ll_end:
+ LEAQ (DX)(R8*1), DI
+ ADDQ $0x04, R9
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ INCQ DX
+ INCQ DI
+ TESTQ R8, R8
+ JZ lz4_snappy_lits_done
+ LEAQ (AX)(R8*1), R10
+ CMPQ R10, CX
+ JAE lz4_snappy_dstfull
+ ADDQ R8, SI
+ LEAL -1(R8), R10
+ CMPL R10, $0x3c
+ JLT one_byte_lz4_snappy
+ CMPL R10, $0x00000100
+ JLT two_bytes_lz4_snappy
+ CMPL R10, $0x00010000
+ JLT three_bytes_lz4_snappy
+ CMPL R10, $0x01000000
+ JLT four_bytes_lz4_snappy
+ MOVB $0xfc, (AX)
+ MOVL R10, 1(AX)
ADDQ $0x05, AX
- JMP gen_emit_copy_end_snappy
+ JMP memmove_long_lz4_snappy
-two_byte_offset_standalone_snappy:
- CMPL DX, $0x40
- JLE two_byte_offset_short_standalone_snappy
- MOVB $0xee, (AX)
- MOVW CX, 1(AX)
- LEAL -60(DX), DX
+four_bytes_lz4_snappy:
+ MOVL R10, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (AX)
+ MOVW R10, 1(AX)
+ MOVB R11, 3(AX)
+ ADDQ $0x04, AX
+ JMP memmove_long_lz4_snappy
+
+three_bytes_lz4_snappy:
+ MOVB $0xf4, (AX)
+ MOVW R10, 1(AX)
ADDQ $0x03, AX
- ADDQ $0x03, BX
- JMP two_byte_offset_standalone_snappy
+ JMP memmove_long_lz4_snappy
-two_byte_offset_short_standalone_snappy:
- CMPL DX, $0x0c
- JGE emit_copy_three_standalone_snappy
- CMPL CX, $0x00000800
- JGE emit_copy_three_standalone_snappy
- MOVB $0x01, SI
- LEAL -16(SI)(DX*4), DX
- MOVB CL, 1(AX)
- SHRL $0x08, CX
- SHLL $0x05, CX
- ORL CX, DX
- MOVB DL, (AX)
- ADDQ $0x02, BX
+two_bytes_lz4_snappy:
+ MOVB $0xf0, (AX)
+ MOVB R10, 1(AX)
ADDQ $0x02, AX
- JMP gen_emit_copy_end_snappy
+ CMPL R10, $0x40
+ JL memmove_lz4_snappy
+ JMP memmove_long_lz4_snappy
-emit_copy_three_standalone_snappy:
- MOVB $0x02, SI
- LEAL -4(SI)(DX*4), DX
- MOVB DL, (AX)
- MOVW CX, 1(AX)
- ADDQ $0x03, BX
- ADDQ $0x03, AX
+one_byte_lz4_snappy:
+ SHLB $0x02, R10
+ MOVB R10, (AX)
+ ADDQ $0x01, AX
-gen_emit_copy_end_snappy:
- MOVQ BX, ret+40(FP)
- RET
+memmove_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
-// func matchLen(a []byte, b []byte) int
-// Requires: BMI
-TEXT ·matchLen(SB), NOSPLIT, $0-56
- MOVQ a_base+0(FP), AX
- MOVQ b_base+24(FP), CX
- MOVQ a_len+8(FP), DX
+ // genMemMoveShort
+ CMPQ R8, $0x08
+ JLE emit_lit_memmove_lz4_snappy_memmove_move_8
+ CMPQ R8, $0x10
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16
+ CMPQ R8, $0x20
+ JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32
+ JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64
- // matchLen
- XORL SI, SI
- CMPL DX, $0x08
- JL matchlen_match4_standalone
+emit_lit_memmove_lz4_snappy_memmove_move_8:
+ MOVQ (DX), R11
+ MOVQ R11, (AX)
+ JMP memmove_end_copy_lz4_snappy
-matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+emit_lit_memmove_lz4_snappy_memmove_move_8through16:
+ MOVQ (DX), R11
+ MOVQ -8(DX)(R8*1), DX
+ MOVQ R11, (AX)
+ MOVQ DX, -8(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
-#ifdef GOAMD64_v3
- TZCNTQ BX, BX
+emit_lit_memmove_lz4_snappy_memmove_move_17through32:
+ MOVOU (DX), X0
+ MOVOU -16(DX)(R8*1), X1
+ MOVOU X0, (AX)
+ MOVOU X1, -16(AX)(R8*1)
+ JMP memmove_end_copy_lz4_snappy
-#else
- BSFQ BX, BX
+emit_lit_memmove_lz4_snappy_memmove_move_33through64:
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
-#endif
- SARQ $0x03, BX
- LEAL (SI)(BX*1), SI
- JMP gen_match_len_end
+memmove_end_copy_lz4_snappy:
+ MOVQ R10, AX
+ JMP lz4_snappy_lits_emit_done
-matchlen_loop_standalone:
- LEAL -8(DX), DX
- LEAL 8(SI), SI
- CMPL DX, $0x08
- JGE matchlen_loopback_standalone
- JZ gen_match_len_end
+memmove_long_lz4_snappy:
+ LEAQ (AX)(R8*1), R10
-matchlen_match4_standalone:
- CMPL DX, $0x04
- JL matchlen_match2_standalone
- MOVL (AX)(SI*1), BX
- CMPL (CX)(SI*1), BX
- JNE matchlen_match2_standalone
- SUBL $0x04, DX
- LEAL 4(SI), SI
+ // genMemMoveLong
+ MOVOU (DX), X0
+ MOVOU 16(DX), X1
+ MOVOU -32(DX)(R8*1), X2
+ MOVOU -16(DX)(R8*1), X3
+ MOVQ R8, R12
+ SHRQ $0x05, R12
+ MOVQ AX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
+ JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ LEAQ -32(DX)(R13*1), R11
+ LEAQ -32(AX)(R13*1), R14
-matchlen_match2_standalone:
- CMPL DX, $0x02
- JL matchlen_match1_standalone
- MOVW (AX)(SI*1), BX
- CMPW (CX)(SI*1), BX
- JNE matchlen_match1_standalone
- SUBL $0x02, DX
- LEAL 2(SI), SI
+emit_lit_memmove_long_lz4_snappylarge_big_loop_back:
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
+ ADDQ $0x20, R13
+ DECQ R12
+ JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back
-matchlen_match1_standalone:
- CMPL DX, $0x01
- JL gen_match_len_end
- MOVB (AX)(SI*1), BL
- CMPB (CX)(SI*1), BL
- JNE gen_match_len_end
- LEAL 1(SI), SI
+emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32:
+ MOVOU -32(DX)(R13*1), X4
+ MOVOU -16(DX)(R13*1), X5
+ MOVOA X4, -32(AX)(R13*1)
+ MOVOA X5, -16(AX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
+ JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, -32(AX)(R8*1)
+ MOVOU X3, -16(AX)(R8*1)
+ MOVQ R10, AX
-gen_match_len_end:
- MOVQ SI, ret+48(FP)
+lz4_snappy_lits_emit_done:
+ MOVQ DI, DX
+
+lz4_snappy_lits_done:
+ CMPQ DX, BX
+ JNE lz4_snappy_match
+ CMPQ R9, $0x04
+ JEQ lz4_snappy_done
+ JMP lz4_snappy_corrupt
+
+lz4_snappy_match:
+ LEAQ 2(DX), DI
+ CMPQ DI, BX
+ JAE lz4_snappy_corrupt
+ MOVWQZX (DX), R8
+ MOVQ DI, DX
+ TESTQ R8, R8
+ JZ lz4_snappy_corrupt
+ CMPQ R8, SI
+ JA lz4_snappy_corrupt
+ CMPQ R9, $0x13
+ JNE lz4_snappy_ml_done
+
+lz4_snappy_ml_loop:
+ MOVBQZX (DX), DI
+ INCQ DX
+ ADDQ DI, R9
+ CMPQ DX, BX
+ JAE lz4_snappy_corrupt
+ CMPQ DI, $0xff
+ JEQ lz4_snappy_ml_loop
+
+lz4_snappy_ml_done:
+ ADDQ R9, SI
+
+ // emitCopy
+two_byte_offset_lz4_s2:
+ CMPL R9, $0x40
+ JLE two_byte_offset_short_lz4_s2
+ MOVB $0xee, (AX)
+ MOVW R8, 1(AX)
+ LEAL -60(R9), R9
+ ADDQ $0x03, AX
+ CMPQ AX, CX
+ JAE lz4_snappy_loop
+ JMP two_byte_offset_lz4_s2
+
+two_byte_offset_short_lz4_s2:
+ MOVL R9, DI
+ SHLL $0x02, DI
+ CMPL R9, $0x0c
+ JGE emit_copy_three_lz4_s2
+ CMPL R8, $0x00000800
+ JGE emit_copy_three_lz4_s2
+ LEAL -15(DI), DI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ JMP lz4_snappy_loop
+
+emit_copy_three_lz4_s2:
+ LEAL -2(DI), DI
+ MOVB DI, (AX)
+ MOVW R8, 1(AX)
+ ADDQ $0x03, AX
+ JMP lz4_snappy_loop
+
+lz4_snappy_done:
+ MOVQ dst_base+0(FP), CX
+ SUBQ CX, AX
+ MOVQ SI, uncompressed+48(FP)
+ MOVQ AX, dstUsed+56(FP)
+ RET
+
+lz4_snappy_corrupt:
+ XORQ AX, AX
+ LEAQ -1(AX), SI
+ MOVQ SI, uncompressed+48(FP)
+ RET
+
+lz4_snappy_dstfull:
+ XORQ AX, AX
+ LEAQ -2(AX), SI
+ MOVQ SI, uncompressed+48(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go
new file mode 100644
index 0000000000..46ed908e3c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go
@@ -0,0 +1,585 @@
+// Copyright (c) 2022 Klaus Post. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+)
+
+// LZ4Converter provides conversion from LZ4 blocks as defined here:
+// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
+type LZ4Converter struct {
+}
+
+// ErrDstTooSmall is returned when provided destination is too small.
+var ErrDstTooSmall = errors.New("s2: destination too small")
+
+// ConvertBlock will convert an LZ4 block and append it as an S2
+// block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const inline = true
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var lastOffset uint16
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return dst[:d], 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return dst[:d], 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if offset == lastOffset {
+ if debug {
+ fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitRepeat16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ d += 2
+ break
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ d += 3
+ break
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ d += 4
+ break
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ d += 5 + emitRepeat16(dst[5:], offset, left)
+ break
+ }
+ d += 5
+ break
+ }
+ }
+ } else {
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ if !inline {
+ d += emitCopy16(dst[d:], offset, ml)
+ } else {
+ length := ml
+ dst := dst[d:]
+ for len(dst) > 5 {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ d += off + emitRepeat16(dst[off:], offset, length)
+ break
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ }
+ lastOffset = offset
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// ConvertBlockSnappy will convert an LZ4 block and append it
+// as a Snappy block without block length to dst.
+// The uncompressed size is returned as well.
+// dst must have capacity to contain the entire compressed block.
+func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
+ if len(src) == 0 {
+ return dst, 0, nil
+ }
+ const debug = false
+ const lz4MinMatch = 4
+
+ s, d := 0, len(dst)
+ dst = dst[:cap(dst)]
+ // Use assembly when possible
+ if !debug && hasAmd64Asm {
+ res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
+ if res < 0 {
+ const (
+ errCorrupt = -1
+ errDstTooSmall = -2
+ )
+ switch res {
+ case errCorrupt:
+ return nil, 0, ErrCorrupt
+ case errDstTooSmall:
+ return nil, 0, ErrDstTooSmall
+ default:
+ return nil, 0, fmt.Errorf("unexpected result: %d", res)
+ }
+ }
+ if d+sz > len(dst) {
+ return nil, 0, ErrDstTooSmall
+ }
+ return dst[:d+sz], res, nil
+ }
+
+ dLimit := len(dst) - 10
+ var uncompressed int
+ if debug {
+ fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
+ }
+
+ for {
+ if s >= len(src) {
+ return nil, 0, ErrCorrupt
+ }
+ // Read literal info
+ token := src[s]
+ ll := int(token >> 4)
+ ml := int(lz4MinMatch + (token & 0xf))
+
+ // If upper nibble is 15, literal length is extended
+ if token >= 0xf0 {
+ for {
+ s++
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ ll += int(val)
+ if val != 255 {
+ break
+ }
+ }
+ }
+ // Skip past token
+ if s+ll >= len(src) {
+ if debug {
+ fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ s++
+ if ll > 0 {
+ if d+ll > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ if debug {
+ fmt.Printf("emit %d literals\n", ll)
+ }
+ d += emitLiteralGo(dst[d:], src[s:s+ll])
+ s += ll
+ uncompressed += ll
+ }
+
+ // Check if we are done...
+ if s == len(src) && ml == lz4MinMatch {
+ break
+ }
+ // 2 byte offset
+ if s >= len(src)-2 {
+ if debug {
+ fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ offset := binary.LittleEndian.Uint16(src[s:])
+ s += 2
+ if offset == 0 {
+ if debug {
+ fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
+ }
+ return nil, 0, ErrCorrupt
+ }
+ if int(offset) > uncompressed {
+ if debug {
+ fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
+ }
+ return nil, 0, ErrCorrupt
+ }
+
+ if ml == lz4MinMatch+15 {
+ for {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ val := src[s]
+ s++
+ ml += int(val)
+ if val != 255 {
+ if s >= len(src) {
+ if debug {
+ fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
+ }
+ return nil, 0, ErrCorrupt
+ }
+ break
+ }
+ }
+ }
+ if debug {
+ fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
+ }
+ length := ml
+ // d += emitCopyNoRepeat(dst[d:], int(offset), ml)
+ for length > 0 {
+ if d >= dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = 63<<2 | tagCopy2
+ length -= 64
+ d += 3
+ continue
+ }
+ if length >= 12 || offset >= 2048 || length < 4 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[d+2] = uint8(offset >> 8)
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(length-1)<<2 | tagCopy2
+ d += 3
+ break
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[d+1] = uint8(offset)
+ dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ d += 2
+ break
+ }
+ uncompressed += ml
+ if d > dLimit {
+ return nil, 0, ErrDstTooSmall
+ }
+ }
+
+ return dst[:d], uncompressed, nil
+}
+
+// emitRepeat writes a repeat chunk and returns the number of bytes written.
+// Length must be at least 4 and < 1<<24
+func emitRepeat16(dst []byte, offset uint16, length int) int {
+ // Repeat offset, make length cheaper
+ length -= 4
+ if length <= 4 {
+ dst[0] = uint8(length)<<2 | tagCopy1
+ dst[1] = 0
+ return 2
+ }
+ if length < 8 && offset < 2048 {
+ // Encode WITH offset
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
+ return 2
+ }
+ if length < (1<<8)+4 {
+ length -= 4
+ dst[2] = uint8(length)
+ dst[1] = 0
+ dst[0] = 5<<2 | tagCopy1
+ return 3
+ }
+ if length < (1<<16)+(1<<8) {
+ length -= 1 << 8
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 6<<2 | tagCopy1
+ return 4
+ }
+ const maxRepeat = (1 << 24) - 1
+ length -= 1 << 16
+ left := 0
+ if length > maxRepeat {
+ left = length - maxRepeat + 4
+ length = maxRepeat - 4
+ }
+ dst[4] = uint8(length >> 16)
+ dst[3] = uint8(length >> 8)
+ dst[2] = uint8(length >> 0)
+ dst[1] = 0
+ dst[0] = 7<<2 | tagCopy1
+ if left > 0 {
+ return 5 + emitRepeat16(dst[5:], offset, left)
+ }
+ return 5
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint16
+// 4 <= length && length <= math.MaxUint32
+func emitCopy16(dst []byte, offset uint16, length int) int {
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return off + emitRepeat16(dst[off:], offset, length)
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = uint8(length-1)<<2 | tagCopy2
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ return 2
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//
+// dst is long enough to hold the encoded bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
+func emitLiteralGo(dst, lit []byte) int {
+ if len(lit) == 0 {
+ return 0
+ }
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[1] = uint8(n)
+ dst[0] = 60<<2 | tagLiteral
+ i = 2
+ case n < 1<<16:
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 61<<2 | tagLiteral
+ i = 3
+ case n < 1<<24:
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 62<<2 | tagLiteral
+ i = 4
+ default:
+ dst[4] = uint8(n >> 24)
+ dst[3] = uint8(n >> 16)
+ dst[2] = uint8(n >> 8)
+ dst[1] = uint8(n)
+ dst[0] = 63<<2 | tagLiteral
+ i = 5
+ }
+ return i + copy(dst[i:], lit)
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 66a95c18ef..ca0951452e 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -32,14 +32,38 @@ func (d *dict) ID() uint32 {
return d.id
}
-// DictContentSize returns the dictionary content size or 0 if d is nil.
-func (d *dict) DictContentSize() int {
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+ if d == nil {
+ return nil
+ }
+ return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+ if d == nil {
+ return [3]int{}
+ }
+ return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+ if d == nil {
+ return nil
+ }
+ return d.litEnc
+}
+
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@@ -64,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
@@ -122,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
return &d, nil
}
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+ ID() uint32
+ ContentSize() int
+ Content() []byte
+ Offsets() [3]int
+ LitEncoder() *huff0.Scratch
+}, error) {
+ initPredefined()
+ d, err := loadDict(b)
+ return d, err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index bfb2e146c3..e008b99298 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
if singleBlock {
e.lowMem = true
}
- e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+ e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
index ea7df3dd84..37b5167d27 100644
--- a/vendor/github.com/klauspost/cpuid/v2/README.md
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -16,10 +16,23 @@ Package home: https://github.com/klauspost/cpuid
## installing
-`go get -u github.com/klauspost/cpuid/v2` using modules.
-
+`go get -u github.com/klauspost/cpuid/v2` using modules.
Drop `v2` for others.
+Installing binary:
+
+`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest`
+
+Or download binaries from release page: https://github.com/klauspost/cpuid/releases
+
+### Homebrew
+
+For macOS/Linux users, you can install via [brew](https://brew.sh/)
+
+```sh
+$ brew install cpuid
+```
+
## example
```Go
@@ -77,10 +90,14 @@ We have Streaming SIMD 2 Extensions
The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features.
A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler.
+To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc.
+This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported.
+
Note that for some cpu/os combinations some features will not be detected.
`amd64` has rather good support and should work reliably on all platforms.
-Note that hypervisors may not pass through all CPU features.
+Note that hypervisors may not pass through all CPU features through to the guest OS,
+so even if your host supports a feature it may not be visible on guests.
## arm64 feature detection
@@ -253,6 +270,223 @@ Exit Code 0
Exit Code 1
```
+
+## Available flags
+
+### x86 & amd64
+
+| Feature Flag | Description |
+|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) |
+| AESNI | Advanced Encryption Standard New Instructions |
+| AMD3DNOW | AMD 3DNOW |
+| AMD3DNOWEXT | AMD 3DNowExt |
+| AMXBF16 | Tile computational operations on BFLOAT16 numbers |
+| AMXINT8 | Tile computational operations on 8-bit integers |
+| AMXFP16 | Tile computational operations on FP16 numbers |
+| AMXTILE | Tile architecture |
+| AVX | AVX functions |
+| AVX2 | AVX2 functions |
+| AVX512BF16 | AVX-512 BFLOAT16 Instructions |
+| AVX512BITALG | AVX-512 Bit Algorithms |
+| AVX512BW | AVX-512 Byte and Word Instructions |
+| AVX512CD | AVX-512 Conflict Detection Instructions |
+| AVX512DQ | AVX-512 Doubleword and Quadword Instructions |
+| AVX512ER | AVX-512 Exponential and Reciprocal Instructions |
+| AVX512F | AVX-512 Foundation |
+| AVX512FP16 | AVX-512 FP16 Instructions |
+| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions |
+| AVX512PF | AVX-512 Prefetch Instructions |
+| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions |
+| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 |
+| AVX512VL | AVX-512 Vector Length Extensions |
+| AVX512VNNI | AVX-512 Vector Neural Network Instructions |
+| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q |
+| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword |
+| AVXIFMA | AVX-IFMA instructions |
+| AVXNECONVERT | AVX-NE-CONVERT instructions |
+| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one |
+| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions |
+| AVXVNNIINT8 | AVX-VNNI-INT8 instructions |
+| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 |
+| BMI1 | Bit Manipulation Instruction Set 1 |
+| BMI2 | Bit Manipulation Instruction Set 2 |
+| CETIBT | Intel CET Indirect Branch Tracking |
+| CETSS | Intel CET Shadow Stack |
+| CLDEMOTE | Cache Line Demote |
+| CLMUL | Carry-less Multiplication |
+| CLZERO | CLZERO instruction supported |
+| CMOV | i686 CMOV |
+| CMPCCXADD | CMPCCXADD instructions |
+| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB |
+| CMPXCHG8 | CMPXCHG8 instruction |
+| CPBOOST | Core Performance Boost |
+| CPPC | AMD: Collaborative Processor Performance Control |
+| CX16 | CMPXCHG16B Instruction |
+| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ |
+| ENQCMD | Enqueue Command |
+| ERMS | Enhanced REP MOVSB/STOSB |
+| F16C | Half-precision floating-point conversion |
+| FLUSH_L1D | Flush L1D cache |
+| FMA3 | Intel FMA 3. Does not imply AVX. |
+| FMA4 | Bulldozer FMA4 functions |
+| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide |
+| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide |
+| FSRM | Fast Short Rep Mov |
+| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 |
+| FXSROPT | FXSAVE/FXRSTOR optimizations |
+| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. |
+| HLE | Hardware Lock Elision |
+| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR |
+| HTT | Hyperthreading (enabled) |
+| HWA | Hardware assert supported. Indicates support for MSRC001_10 |
+| HYBRID_CPU | This part has CPUs of more than one type. |
+| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors |
+| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) |
+| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR |
+| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) |
+| IBRS | AMD: Indirect Branch Restricted Speculation |
+| IBRS_PREFERRED | AMD: IBRS is preferred over software solution |
+| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection |
+| IBS | Instruction Based Sampling (AMD) |
+| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) |
+| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) |
+| IBSFFV | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) |
+| IBSOPSAM | Instruction Based Sampling Feature (AMD) |
+| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) |
+| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) |
+| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported |
+| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported |
+| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse |
+| IBS_PREVENTHOST | Disallowing IBS use by the host supported |
+| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 |
+| IDPRED_CTRL | IPRED_DIS |
+| INT_WBINVD | WBINVD/WBNOINVD are interruptible. |
+| INVLPGB | NVLPGB and TLBSYNC instruction supported |
+| LAHF | LAHF/SAHF in long mode |
+| LAM | If set, CPU supports Linear Address Masking |
+| LBRVIRT | LBR virtualization |
+| LZCNT | LZCNT instruction |
+| MCAOVERFLOW | MCA overflow recovery support. |
+| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. |
+| MCOMMIT | MCOMMIT instruction supported |
+| MD_CLEAR | VERW clears CPU buffers |
+| MMX | standard MMX |
+| MMXEXT | SSE integer functions or AMD MMX ext |
+| MOVBE | MOVBE instruction (big-endian) |
+| MOVDIR64B | Move 64 Bytes as Direct Store |
+| MOVDIRI | Move Doubleword as Direct Store |
+| MOVSB_ZL | Fast Zero-Length MOVSB |
+| MPX | Intel MPX (Memory Protection Extensions) |
+| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD |
+| MSRIRC | Instruction Retired Counter MSR available |
+| MSRLIST | Read/Write List of Model Specific Registers |
+| MSR_PAGEFLUSH | Page Flush MSR available |
+| NRIPS | Indicates support for NRIP save on VMEXIT |
+| NX | NX (No-Execute) bit |
+| OSXSAVE | XSAVE enabled by OS |
+| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption |
+| POPCNT | POPCNT instruction |
+| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled |
+| PREFETCHI | PREFETCHIT0/1 instructions |
+| PSFD | Predictive Store Forward Disable |
+| RDPRU | RDPRU instruction supported |
+| RDRAND | RDRAND instruction is available |
+| RDSEED | RDSEED instruction is available |
+| RDTSCP | RDTSCP Instruction |
+| RRSBA_CTRL | Restricted RSB Alternate |
+| RTM | Restricted Transactional Memory |
+| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. |
+| SERIALIZE | Serialize Instruction Execution |
+| SEV | AMD Secure Encrypted Virtualization supported |
+| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host |
+| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported |
+| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests |
+| SEV_ES | AMD SEV Encrypted State supported |
+| SEV_RESTRICTED | AMD SEV Restricted Injection supported |
+| SEV_SNP | AMD SEV Secure Nested Paging supported |
+| SGX | Software Guard Extensions |
+| SGXLC | Software Guard Extensions Launch Control |
+| SHA | Intel SHA Extensions |
+| SME | AMD Secure Memory Encryption supported |
+| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced |
+| SPEC_CTRL_SSBD | Speculative Store Bypass Disable |
+| SRBDS_CTRL | SRBDS mitigation MSR available |
+| SSE | SSE functions |
+| SSE2 | P4 SSE functions |
+| SSE3 | Prescott SSE3 functions |
+| SSE4 | Penryn SSE4.1 functions |
+| SSE42 | Nehalem SSE4.2 functions |
+| SSE4A | AMD Barcelona microarchitecture SSE4a instructions |
+| SSSE3 | Conroe SSSE3 functions |
+| STIBP | Single Thread Indirect Branch Predictors |
+| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On |
+| STOSB_SHORT | Fast short STOSB |
+| SUCCOR | Software uncorrectable error containment and recovery capability. |
+| SVM | AMD Secure Virtual Machine |
+| SVMDA | Indicates support for the SVM decode assists. |
+| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control |
+| SVML | AMD SVM lock. Indicates support for SVM-Lock. |
+| SVMNP | AMD SVM nested paging |
+| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter |
+| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold |
+| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
+| SYSEE | SYSENTER and SYSEXIT instructions |
+| TBM | AMD Trailing Bit Manipulation |
+| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
+| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
+| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |
+| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 |
+| TSXLDTRK | Intel TSX Suspend Load Address Tracking |
+| VAES | Vector AES. AVX(512) versions requires additional checks. |
+| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. |
+| VMPL | AMD VM Permission Levels supported |
+| VMSA_REGPROT | AMD VMSA Register Protection supported |
+| VMX | Virtual Machine Extensions |
+| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. |
+| VTE | AMD Virtual Transparent Encryption supported |
+| WAITPKG | TPAUSE, UMONITOR, UMWAIT |
+| WBNOINVD | Write Back and Do Not Invalidate Cache |
+| WRMSRNS | Non-Serializing Write to Model Specific Register |
+| X87 | FPU |
+| XGETBV1 | Supports XGETBV with ECX = 1 |
+| XOP | Bulldozer XOP functions |
+| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV |
+| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. |
+| XSAVEOPT | XSAVEOPT available |
+| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS |
+
+# ARM features:
+
+| Feature Flag | Description |
+|--------------|------------------------------------------------------------------|
+| AESARM | AES instructions |
+| ARMCPUID | Some CPU ID registers readable at user-level |
+| ASIMD | Advanced SIMD |
+| ASIMDDP | SIMD Dot Product |
+| ASIMDHP | Advanced SIMD half-precision floating point |
+| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) |
+| ATOMICS | Large System Extensions (LSE) |
+| CRC32 | CRC32/CRC32C instructions |
+| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
+| EVTSTRM | Generic timer |
+| FCMA | Floatin point complex number addition and multiplication |
+| FP | Single-precision and double-precision floating point |
+| FPHP | Half-precision floating point |
+| GPA | Generic Pointer Authentication |
+| JSCVT | Javascript-style double->int convert (FJCVTZS) |
+| LRCPC | Weaker release consistency (LDAPR, etc) |
+| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
+| SHA1 | SHA-1 instructions (SHA1C, etc) |
+| SHA2 | SHA-2 instructions (SHA256H, etc) |
+| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |
+| SHA512 | SHA512 instructions |
+| SM3 | SM3 instructions |
+| SM4 | SM4 instructions |
+| SVE | Scalable Vector Extension |
+
# license
This code is published under an MIT license. See LICENSE file for more information.
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
index 701f2385b6..89a861d4f7 100644
--- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -73,6 +73,7 @@ const (
AMD3DNOW // AMD 3DNOW
AMD3DNOWEXT // AMD 3DNowExt
AMXBF16 // Tile computational operations on BFLOAT16 numbers
+ AMXFP16 // Tile computational operations on FP16 numbers
AMXINT8 // Tile computational operations on 8-bit integers
AMXTILE // Tile architecture
AVX // AVX functions
@@ -93,8 +94,12 @@ const (
AVX512VNNI // AVX-512 Vector Neural Network Instructions
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
+ AVXIFMA // AVX-IFMA instructions
+ AVXNECONVERT // AVX-NE-CONVERT instructions
AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one
AVXVNNI // AVX (VEX encoded) VNNI neural network instructions
+ AVXVNNIINT8 // AVX-VNNI-INT8 instructions
+ BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598
BMI1 // Bit Manipulation Instruction Set 1
BMI2 // Bit Manipulation Instruction Set 2
CETIBT // Intel CET Indirect Branch Tracking
@@ -103,15 +108,22 @@ const (
CLMUL // Carry-less Multiplication
CLZERO // CLZERO instruction supported
CMOV // i686 CMOV
+ CMPCCXADD // CMPCCXADD instructions
CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB
CMPXCHG8 // CMPXCHG8 instruction
CPBOOST // Core Performance Boost
+ CPPC // AMD: Collaborative Processor Performance Control
CX16 // CMPXCHG16B Instruction
+ EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ
ENQCMD // Enqueue Command
ERMS // Enhanced REP MOVSB/STOSB
F16C // Half-precision floating-point conversion
+ FLUSH_L1D // Flush L1D cache
FMA3 // Intel FMA 3. Does not imply AVX.
FMA4 // Bulldozer FMA4 functions
+ FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide
+ FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide
+ FSRM // Fast Short Rep Mov
FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
FXSROPT // FXSAVE/FXRSTOR optimizations
GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage.
@@ -119,8 +131,14 @@ const (
HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR
HTT // Hyperthreading (enabled)
HWA // Hardware assert supported. Indicates support for MSRC001_10
+ HYBRID_CPU // This part has CPUs of more than one type.
HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors
+ IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel)
+ IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR
IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB)
+ IBRS // AMD: Indirect Branch Restricted Speculation
+ IBRS_PREFERRED // AMD: IBRS is preferred over software solution
+ IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection
IBS // Instruction Based Sampling (AMD)
IBSBRNTRGT // Instruction Based Sampling Feature (AMD)
IBSFETCHSAM // Instruction Based Sampling Feature (AMD)
@@ -130,7 +148,12 @@ const (
IBSOPSAM // Instruction Based Sampling Feature (AMD)
IBSRDWROPCNT // Instruction Based Sampling Feature (AMD)
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
+ IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported
+ IBS_OPDATA4 // AMD: IBS op data 4 MSR supported
+ IBS_OPFUSE // AMD: Indicates support for IbsOpFuse
IBS_PREVENTHOST // Disallowing IBS use by the host supported
+ IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4
+ IDPRED_CTRL // IPRED_DIS
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
INVLPGB // NVLPGB and TLBSYNC instruction supported
LAHF // LAHF/SAHF in long mode
@@ -138,28 +161,35 @@ const (
LBRVIRT // LBR virtualization
LZCNT // LZCNT instruction
MCAOVERFLOW // MCA overflow recovery support.
+ MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it.
MCOMMIT // MCOMMIT instruction supported
+ MD_CLEAR // VERW clears CPU buffers
MMX // standard MMX
MMXEXT // SSE integer functions or AMD MMX ext
MOVBE // MOVBE instruction (big-endian)
MOVDIR64B // Move 64 Bytes as Direct Store
MOVDIRI // Move Doubleword as Direct Store
MOVSB_ZL // Fast Zero-Length MOVSB
+ MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD
MPX // Intel MPX (Memory Protection Extensions)
MSRIRC // Instruction Retired Counter MSR available
+ MSRLIST // Read/Write List of Model Specific Registers
MSR_PAGEFLUSH // Page Flush MSR available
NRIPS // Indicates support for NRIP save on VMEXIT
NX // NX (No-Execute) bit
OSXSAVE // XSAVE enabled by OS
PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption
POPCNT // POPCNT instruction
+ PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled
+ PREFETCHI // PREFETCHIT0/1 instructions
+ PSFD // Predictive Store Forward Disable
RDPRU // RDPRU instruction supported
RDRAND // RDRAND instruction is available
RDSEED // RDSEED instruction is available
RDTSCP // RDTSCP Instruction
+ RRSBA_CTRL // Restricted RSB Alternate
RTM // Restricted Transactional Memory
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
- SCE // SYSENTER and SYSEXIT instructions
SERIALIZE // Serialize Instruction Execution
SEV // AMD Secure Encrypted Virtualization supported
SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host
@@ -173,6 +203,8 @@ const (
SHA // Intel SHA Extensions
SME // AMD Secure Memory Encryption supported
SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced
+ SPEC_CTRL_SSBD // Speculative Store Bypass Disable
+ SRBDS_CTRL // SRBDS mitigation MSR available
SSE // SSE functions
SSE2 // P4 SSE functions
SSE3 // Prescott SSE3 functions
@@ -181,6 +213,7 @@ const (
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
SSSE3 // Conroe SSSE3 functions
STIBP // Single Thread Indirect Branch Predictors
+ STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On
STOSB_SHORT // Fast short STOSB
SUCCOR // Software uncorrectable error containment and recovery capability.
SVM // AMD Secure Virtual Machine
@@ -190,8 +223,12 @@ const (
SVMNP // AMD SVM nested paging
SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter
SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold
+ SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
+ SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
+ TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
+ TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104
TSXLDTRK // Intel TSX Suspend Load Address Tracking
VAES // Vector AES. AVX(512) versions requires additional checks.
@@ -203,6 +240,7 @@ const (
VTE // AMD Virtual Transparent Encryption supported
WAITPKG // TPAUSE, UMONITOR, UMWAIT
WBNOINVD // Write Back and Do Not Invalidate Cache
+ WRMSRNS // Non-Serializing Write to Model Specific Register
X87 // FPU
XGETBV1 // Supports XGETBV with ECX = 1
XOP // Bulldozer XOP functions
@@ -253,6 +291,7 @@ type CPUInfo struct {
LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
Family int // CPU family number
Model int // CPU model number
+ Stepping int // CPU stepping info
CacheLine int // Cache line size in bytes. Will be 0 if undetectable.
Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed.
BoostFreq int64 // Max clock speed, if known, 0 otherwise
@@ -355,30 +394,61 @@ func (c CPUInfo) Supports(ids ...FeatureID) bool {
// Has allows for checking a single feature.
// Should be inlined by the compiler.
-func (c CPUInfo) Has(id FeatureID) bool {
+func (c *CPUInfo) Has(id FeatureID) bool {
return c.featureSet.inSet(id)
}
+// AnyOf returns whether the CPU supports one or more of the requested features.
+func (c CPUInfo) AnyOf(ids ...FeatureID) bool {
+ for _, id := range ids {
+ if c.featureSet.inSet(id) {
+ return true
+ }
+ }
+ return false
+}
+
+// Features contains several features combined for a fast check using
+// CpuInfo.HasAll
+type Features *flagSet
+
+// CombineFeatures allows to combine several features for a close to constant time lookup.
+func CombineFeatures(ids ...FeatureID) Features {
+ var v flagSet
+ for _, id := range ids {
+ v.set(id)
+ }
+ return &v
+}
+
+func (c *CPUInfo) HasAll(f Features) bool {
+ return c.featureSet.hasSetP(f)
+}
+
// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
-var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
-var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
-var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
-var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
+var oneOfLevel = CombineFeatures(SYSEE, SYSCALL)
+var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2)
+var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
+var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
+var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
// X64Level returns the microarchitecture level detected on the CPU.
// If features are lacking or non x64 mode, 0 is returned.
// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
func (c CPUInfo) X64Level() int {
- if c.featureSet.hasSet(level4Features) {
+ if !c.featureSet.hasOneOf(oneOfLevel) {
+ return 0
+ }
+ if c.featureSet.hasSetP(level4Features) {
return 4
}
- if c.featureSet.hasSet(level3Features) {
+ if c.featureSet.hasSetP(level3Features) {
return 3
}
- if c.featureSet.hasSet(level2Features) {
+ if c.featureSet.hasSetP(level2Features) {
return 2
}
- if c.featureSet.hasSet(level1Features) {
+ if c.featureSet.hasSetP(level1Features) {
return 1
}
return 0
@@ -542,7 +612,7 @@ const flagMask = flagBits - 1
// flagSet contains detected cpu features and characteristics in an array of flags
type flagSet [(lastID + flagMask) / flagBits]flags
-func (s flagSet) inSet(feat FeatureID) bool {
+func (s *flagSet) inSet(feat FeatureID) bool {
return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0
}
@@ -572,7 +642,17 @@ func (s *flagSet) or(other flagSet) {
}
// hasSet returns whether all features are present.
-func (s flagSet) hasSet(other flagSet) bool {
+func (s *flagSet) hasSet(other flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+// hasSet returns whether all features are present.
+func (s *flagSet) hasSetP(other *flagSet) bool {
for i, v := range other[:] {
if s[i]&v != v {
return false
@@ -581,8 +661,18 @@ func (s flagSet) hasSet(other flagSet) bool {
return true
}
+// hasOneOf returns whether one or more features are present.
+func (s *flagSet) hasOneOf(other *flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != 0 {
+ return true
+ }
+ }
+ return false
+}
+
// nEnabled will return the number of enabled flags.
-func (s flagSet) nEnabled() (n int) {
+func (s *flagSet) nEnabled() (n int) {
for _, v := range s[:] {
n += bits.OnesCount64(uint64(v))
}
@@ -677,7 +767,7 @@ func threadsPerCore() int {
if vend == AMD {
// Workaround for AMD returning 0, assume 2 if >= Zen 2
// It will be more correct than not.
- fam, _ := familyModel()
+ fam, _, _ := familyModel()
_, _, _, d := cpuid(1)
if (d&(1<<28)) != 0 && fam >= 23 {
return 2
@@ -715,14 +805,27 @@ func logicalCores() int {
}
}
-func familyModel() (int, int) {
+func familyModel() (family, model, stepping int) {
if maxFunctionID() < 0x1 {
- return 0, 0
+ return 0, 0, 0
}
eax, _, _, _ := cpuid(1)
- family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
- model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
- return int(family), int(model)
+ // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0].
+ family = int((eax >> 8) & 0xf)
+ extFam := family == 0x6 // Intel is 0x6, needs extended model.
+ if family == 0xf {
+ // Add ExtFamily
+ family += int((eax >> 20) & 0xff)
+ extFam = true
+ }
+ // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0].
+ model = int((eax >> 4) & 0xf)
+ if extFam {
+ // Add ExtModel
+ model += int((eax >> 12) & 0xf0)
+ }
+ stepping = int(eax & 0xf)
+ return family, model, stepping
}
func physicalCores() int {
@@ -857,7 +960,7 @@ func (c *CPUInfo) cacheSize() {
c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
// CPUID Fn8000_001D_EAX_x[N:0] Cache Properties
- if maxExtendedFunction() < 0x8000001D {
+ if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) {
return
}
@@ -974,14 +1077,13 @@ func support() flagSet {
if mfi < 0x1 {
return fs
}
- family, model := familyModel()
+ family, model, _ := familyModel()
_, _, c, d := cpuid(1)
fs.setIf((d&(1<<0)) != 0, X87)
fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
- fs.setIf((d&(1<<11)) != 0, SCE)
+ fs.setIf((d&(1<<11)) != 0, SYSEE)
fs.setIf((d&(1<<15)) != 0, CMOV)
- fs.setIf((d&(1<<22)) != 0, MMXEXT)
fs.setIf((d&(1<<23)) != 0, MMX)
fs.setIf((d&(1<<24)) != 0, FXSR)
fs.setIf((d&(1<<25)) != 0, FXSROPT)
@@ -989,9 +1091,9 @@ func support() flagSet {
fs.setIf((d&(1<<26)) != 0, SSE2)
fs.setIf((c&1) != 0, SSE3)
fs.setIf((c&(1<<5)) != 0, VMX)
- fs.setIf((c&0x00000200) != 0, SSSE3)
- fs.setIf((c&0x00080000) != 0, SSE4)
- fs.setIf((c&0x00100000) != 0, SSE42)
+ fs.setIf((c&(1<<9)) != 0, SSSE3)
+ fs.setIf((c&(1<<19)) != 0, SSE4)
+ fs.setIf((c&(1<<20)) != 0, SSE42)
fs.setIf((c&(1<<25)) != 0, AESNI)
fs.setIf((c&(1<<1)) != 0, CLMUL)
fs.setIf(c&(1<<22) != 0, MOVBE)
@@ -1068,21 +1170,36 @@ func support() flagSet {
fs.setIf(ecx&(1<<30) != 0, SGXLC)
// CPUID.(EAX=7, ECX=0).EDX
+ fs.setIf(edx&(1<<4) != 0, FSRM)
+ fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL)
+ fs.setIf(edx&(1<<10) != 0, MD_CLEAR)
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
+ fs.setIf(edx&(1<<15) != 0, HYBRID_CPU)
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
fs.setIf(edx&(1<<18) != 0, PCONFIG)
fs.setIf(edx&(1<<20) != 0, CETIBT)
fs.setIf(edx&(1<<26) != 0, IBPB)
fs.setIf(edx&(1<<27) != 0, STIBP)
+ fs.setIf(edx&(1<<28) != 0, FLUSH_L1D)
+ fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP)
+ fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
+ fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
+
+ // CPUID.(EAX=7, ECX=1).EDX
+ fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
+ fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
+ fs.setIf(edx&(1<<14) != 0, PREFETCHI)
- // CPUID.(EAX=7, ECX=1)
+ // CPUID.(EAX=7, ECX=1).EAX
eax1, _, _, _ := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
+ fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT)
fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT)
fs.setIf(eax1&(1<<22) != 0, HRESET)
+ fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
// Only detect AVX-512 features if XGETBV is supported
@@ -1120,9 +1237,22 @@ func support() flagSet {
fs.setIf(edx&(1<<25) != 0, AMXINT8)
// eax1 = CPUID.(EAX=7, ECX=1).EAX
fs.setIf(eax1&(1<<5) != 0, AVX512BF16)
+ fs.setIf(eax1&(1<<19) != 0, WRMSRNS)
+ fs.setIf(eax1&(1<<21) != 0, AMXFP16)
+ fs.setIf(eax1&(1<<27) != 0, MSRLIST)
}
}
+
+ // CPUID.(EAX=7, ECX=2)
+ _, _, _, edx = cpuidex(7, 2)
+ fs.setIf(edx&(1<<0) != 0, PSFD)
+ fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL)
+ fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL)
+ fs.setIf(edx&(1<<4) != 0, BHI_CTRL)
+ fs.setIf(edx&(1<<5) != 0, MCDT_NO)
+
}
+
// Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1)
// EAX
// Bit 00: XSAVEOPT is available.
@@ -1156,20 +1286,24 @@ func support() flagSet {
fs.setIf((c&(1<<2)) != 0, SVM)
fs.setIf((c&(1<<6)) != 0, SSE4A)
fs.setIf((c&(1<<10)) != 0, IBS)
+ fs.setIf((c&(1<<22)) != 0, TOPEXT)
// EDX
- fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
- fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
- fs.setIf((d&(1<<23)) != 0, MMX)
- fs.setIf((d&(1<<22)) != 0, MMXEXT)
+ fs.setIf(d&(1<<11) != 0, SYSCALL)
fs.setIf(d&(1<<20) != 0, NX)
+ fs.setIf(d&(1<<22) != 0, MMXEXT)
+ fs.setIf(d&(1<<23) != 0, MMX)
+ fs.setIf(d&(1<<24) != 0, FXSR)
+ fs.setIf(d&(1<<25) != 0, FXSROPT)
fs.setIf(d&(1<<27) != 0, RDTSCP)
+ fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT)
+ fs.setIf(d&(1<<31) != 0, AMD3DNOW)
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
* used unless the OS has AVX support. */
if fs.inSet(AVX) {
- fs.setIf((c&0x00000800) != 0, XOP)
- fs.setIf((c&0x00010000) != 0, FMA4)
+ fs.setIf((c&(1<<11)) != 0, XOP)
+ fs.setIf((c&(1<<16)) != 0, FMA4)
}
}
@@ -1183,9 +1317,21 @@ func support() flagSet {
if maxExtendedFunction() >= 0x80000008 {
_, b, _, _ := cpuid(0x80000008)
+ fs.setIf(b&(1<<28) != 0, PSFD)
+ fs.setIf(b&(1<<27) != 0, CPPC)
+ fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD)
+ fs.setIf(b&(1<<23) != 0, PPIN)
+ fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED)
+ fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS)
+ fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP)
+ fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED)
+ fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON)
+ fs.setIf(b&(1<<15) != 0, STIBP)
+ fs.setIf(b&(1<<14) != 0, IBRS)
+ fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
+ fs.setIf(b&(1<<12) != 0, IBPB)
fs.setIf((b&(1<<9)) != 0, WBNOINVD)
fs.setIf((b&(1<<8)) != 0, MCOMMIT)
- fs.setIf((b&(1<<13)) != 0, INT_WBINVD)
fs.setIf((b&(1<<4)) != 0, RDPRU)
fs.setIf((b&(1<<3)) != 0, INVLPGB)
fs.setIf((b&(1<<1)) != 0, MSRIRC)
@@ -1206,6 +1352,13 @@ func support() flagSet {
fs.setIf((edx>>12)&1 == 1, SVMPFT)
}
+ if maxExtendedFunction() >= 0x8000001a {
+ eax, _, _, _ := cpuid(0x8000001a)
+ fs.setIf((eax>>0)&1 == 1, FP128)
+ fs.setIf((eax>>1)&1 == 1, MOVU)
+ fs.setIf((eax>>2)&1 == 1, FP256)
+ }
+
if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) {
eax, _, _, _ := cpuid(0x8000001b)
fs.setIf((eax>>0)&1 == 1, IBSFFV)
@@ -1216,6 +1369,10 @@ func support() flagSet {
fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT)
fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT)
fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK)
+ fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE)
+ fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX)
+ fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1.
+ fs.setIf((eax>>11)&1 == 1, IBS_ZEN4)
}
if maxExtendedFunction() >= 0x8000001f && vend == AMD {
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
index 35678d8a3e..c946824ec0 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -24,7 +24,7 @@ func addInfo(c *CPUInfo, safe bool) {
c.maxExFunc = maxExtendedFunction()
c.BrandName = brandName()
c.CacheLine = cacheLine()
- c.Family, c.Model = familyModel()
+ c.Family, c.Model, c.Stepping = familyModel()
c.featureSet = support()
c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC))
c.ThreadsPerCore = threadsPerCore()
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
index a9b3e36c70..2a27f44d38 100644
--- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -13,174 +13,212 @@ func _() {
_ = x[AMD3DNOW-3]
_ = x[AMD3DNOWEXT-4]
_ = x[AMXBF16-5]
- _ = x[AMXINT8-6]
- _ = x[AMXTILE-7]
- _ = x[AVX-8]
- _ = x[AVX2-9]
- _ = x[AVX512BF16-10]
- _ = x[AVX512BITALG-11]
- _ = x[AVX512BW-12]
- _ = x[AVX512CD-13]
- _ = x[AVX512DQ-14]
- _ = x[AVX512ER-15]
- _ = x[AVX512F-16]
- _ = x[AVX512FP16-17]
- _ = x[AVX512IFMA-18]
- _ = x[AVX512PF-19]
- _ = x[AVX512VBMI-20]
- _ = x[AVX512VBMI2-21]
- _ = x[AVX512VL-22]
- _ = x[AVX512VNNI-23]
- _ = x[AVX512VP2INTERSECT-24]
- _ = x[AVX512VPOPCNTDQ-25]
- _ = x[AVXSLOW-26]
- _ = x[AVXVNNI-27]
- _ = x[BMI1-28]
- _ = x[BMI2-29]
- _ = x[CETIBT-30]
- _ = x[CETSS-31]
- _ = x[CLDEMOTE-32]
- _ = x[CLMUL-33]
- _ = x[CLZERO-34]
- _ = x[CMOV-35]
- _ = x[CMPSB_SCADBS_SHORT-36]
- _ = x[CMPXCHG8-37]
- _ = x[CPBOOST-38]
- _ = x[CX16-39]
- _ = x[ENQCMD-40]
- _ = x[ERMS-41]
- _ = x[F16C-42]
- _ = x[FMA3-43]
- _ = x[FMA4-44]
- _ = x[FXSR-45]
- _ = x[FXSROPT-46]
- _ = x[GFNI-47]
- _ = x[HLE-48]
- _ = x[HRESET-49]
- _ = x[HTT-50]
- _ = x[HWA-51]
- _ = x[HYPERVISOR-52]
- _ = x[IBPB-53]
- _ = x[IBS-54]
- _ = x[IBSBRNTRGT-55]
- _ = x[IBSFETCHSAM-56]
- _ = x[IBSFFV-57]
- _ = x[IBSOPCNT-58]
- _ = x[IBSOPCNTEXT-59]
- _ = x[IBSOPSAM-60]
- _ = x[IBSRDWROPCNT-61]
- _ = x[IBSRIPINVALIDCHK-62]
- _ = x[IBS_PREVENTHOST-63]
- _ = x[INT_WBINVD-64]
- _ = x[INVLPGB-65]
- _ = x[LAHF-66]
- _ = x[LAM-67]
- _ = x[LBRVIRT-68]
- _ = x[LZCNT-69]
- _ = x[MCAOVERFLOW-70]
- _ = x[MCOMMIT-71]
- _ = x[MMX-72]
- _ = x[MMXEXT-73]
- _ = x[MOVBE-74]
- _ = x[MOVDIR64B-75]
- _ = x[MOVDIRI-76]
- _ = x[MOVSB_ZL-77]
- _ = x[MPX-78]
- _ = x[MSRIRC-79]
- _ = x[MSR_PAGEFLUSH-80]
- _ = x[NRIPS-81]
- _ = x[NX-82]
- _ = x[OSXSAVE-83]
- _ = x[PCONFIG-84]
- _ = x[POPCNT-85]
- _ = x[RDPRU-86]
- _ = x[RDRAND-87]
- _ = x[RDSEED-88]
- _ = x[RDTSCP-89]
- _ = x[RTM-90]
- _ = x[RTM_ALWAYS_ABORT-91]
- _ = x[SCE-92]
- _ = x[SERIALIZE-93]
- _ = x[SEV-94]
- _ = x[SEV_64BIT-95]
- _ = x[SEV_ALTERNATIVE-96]
- _ = x[SEV_DEBUGSWAP-97]
- _ = x[SEV_ES-98]
- _ = x[SEV_RESTRICTED-99]
- _ = x[SEV_SNP-100]
- _ = x[SGX-101]
- _ = x[SGXLC-102]
- _ = x[SHA-103]
- _ = x[SME-104]
- _ = x[SME_COHERENT-105]
- _ = x[SSE-106]
- _ = x[SSE2-107]
- _ = x[SSE3-108]
- _ = x[SSE4-109]
- _ = x[SSE42-110]
- _ = x[SSE4A-111]
- _ = x[SSSE3-112]
- _ = x[STIBP-113]
- _ = x[STOSB_SHORT-114]
- _ = x[SUCCOR-115]
- _ = x[SVM-116]
- _ = x[SVMDA-117]
- _ = x[SVMFBASID-118]
- _ = x[SVML-119]
- _ = x[SVMNP-120]
- _ = x[SVMPF-121]
- _ = x[SVMPFT-122]
- _ = x[TBM-123]
- _ = x[TME-124]
- _ = x[TSCRATEMSR-125]
- _ = x[TSXLDTRK-126]
- _ = x[VAES-127]
- _ = x[VMCBCLEAN-128]
- _ = x[VMPL-129]
- _ = x[VMSA_REGPROT-130]
- _ = x[VMX-131]
- _ = x[VPCLMULQDQ-132]
- _ = x[VTE-133]
- _ = x[WAITPKG-134]
- _ = x[WBNOINVD-135]
- _ = x[X87-136]
- _ = x[XGETBV1-137]
- _ = x[XOP-138]
- _ = x[XSAVE-139]
- _ = x[XSAVEC-140]
- _ = x[XSAVEOPT-141]
- _ = x[XSAVES-142]
- _ = x[AESARM-143]
- _ = x[ARMCPUID-144]
- _ = x[ASIMD-145]
- _ = x[ASIMDDP-146]
- _ = x[ASIMDHP-147]
- _ = x[ASIMDRDM-148]
- _ = x[ATOMICS-149]
- _ = x[CRC32-150]
- _ = x[DCPOP-151]
- _ = x[EVTSTRM-152]
- _ = x[FCMA-153]
- _ = x[FP-154]
- _ = x[FPHP-155]
- _ = x[GPA-156]
- _ = x[JSCVT-157]
- _ = x[LRCPC-158]
- _ = x[PMULL-159]
- _ = x[SHA1-160]
- _ = x[SHA2-161]
- _ = x[SHA3-162]
- _ = x[SHA512-163]
- _ = x[SM3-164]
- _ = x[SM4-165]
- _ = x[SVE-166]
- _ = x[lastID-167]
+ _ = x[AMXFP16-6]
+ _ = x[AMXINT8-7]
+ _ = x[AMXTILE-8]
+ _ = x[AVX-9]
+ _ = x[AVX2-10]
+ _ = x[AVX512BF16-11]
+ _ = x[AVX512BITALG-12]
+ _ = x[AVX512BW-13]
+ _ = x[AVX512CD-14]
+ _ = x[AVX512DQ-15]
+ _ = x[AVX512ER-16]
+ _ = x[AVX512F-17]
+ _ = x[AVX512FP16-18]
+ _ = x[AVX512IFMA-19]
+ _ = x[AVX512PF-20]
+ _ = x[AVX512VBMI-21]
+ _ = x[AVX512VBMI2-22]
+ _ = x[AVX512VL-23]
+ _ = x[AVX512VNNI-24]
+ _ = x[AVX512VP2INTERSECT-25]
+ _ = x[AVX512VPOPCNTDQ-26]
+ _ = x[AVXIFMA-27]
+ _ = x[AVXNECONVERT-28]
+ _ = x[AVXSLOW-29]
+ _ = x[AVXVNNI-30]
+ _ = x[AVXVNNIINT8-31]
+ _ = x[BHI_CTRL-32]
+ _ = x[BMI1-33]
+ _ = x[BMI2-34]
+ _ = x[CETIBT-35]
+ _ = x[CETSS-36]
+ _ = x[CLDEMOTE-37]
+ _ = x[CLMUL-38]
+ _ = x[CLZERO-39]
+ _ = x[CMOV-40]
+ _ = x[CMPCCXADD-41]
+ _ = x[CMPSB_SCADBS_SHORT-42]
+ _ = x[CMPXCHG8-43]
+ _ = x[CPBOOST-44]
+ _ = x[CPPC-45]
+ _ = x[CX16-46]
+ _ = x[EFER_LMSLE_UNS-47]
+ _ = x[ENQCMD-48]
+ _ = x[ERMS-49]
+ _ = x[F16C-50]
+ _ = x[FLUSH_L1D-51]
+ _ = x[FMA3-52]
+ _ = x[FMA4-53]
+ _ = x[FP128-54]
+ _ = x[FP256-55]
+ _ = x[FSRM-56]
+ _ = x[FXSR-57]
+ _ = x[FXSROPT-58]
+ _ = x[GFNI-59]
+ _ = x[HLE-60]
+ _ = x[HRESET-61]
+ _ = x[HTT-62]
+ _ = x[HWA-63]
+ _ = x[HYBRID_CPU-64]
+ _ = x[HYPERVISOR-65]
+ _ = x[IA32_ARCH_CAP-66]
+ _ = x[IA32_CORE_CAP-67]
+ _ = x[IBPB-68]
+ _ = x[IBRS-69]
+ _ = x[IBRS_PREFERRED-70]
+ _ = x[IBRS_PROVIDES_SMP-71]
+ _ = x[IBS-72]
+ _ = x[IBSBRNTRGT-73]
+ _ = x[IBSFETCHSAM-74]
+ _ = x[IBSFFV-75]
+ _ = x[IBSOPCNT-76]
+ _ = x[IBSOPCNTEXT-77]
+ _ = x[IBSOPSAM-78]
+ _ = x[IBSRDWROPCNT-79]
+ _ = x[IBSRIPINVALIDCHK-80]
+ _ = x[IBS_FETCH_CTLX-81]
+ _ = x[IBS_OPDATA4-82]
+ _ = x[IBS_OPFUSE-83]
+ _ = x[IBS_PREVENTHOST-84]
+ _ = x[IBS_ZEN4-85]
+ _ = x[IDPRED_CTRL-86]
+ _ = x[INT_WBINVD-87]
+ _ = x[INVLPGB-88]
+ _ = x[LAHF-89]
+ _ = x[LAM-90]
+ _ = x[LBRVIRT-91]
+ _ = x[LZCNT-92]
+ _ = x[MCAOVERFLOW-93]
+ _ = x[MCDT_NO-94]
+ _ = x[MCOMMIT-95]
+ _ = x[MD_CLEAR-96]
+ _ = x[MMX-97]
+ _ = x[MMXEXT-98]
+ _ = x[MOVBE-99]
+ _ = x[MOVDIR64B-100]
+ _ = x[MOVDIRI-101]
+ _ = x[MOVSB_ZL-102]
+ _ = x[MOVU-103]
+ _ = x[MPX-104]
+ _ = x[MSRIRC-105]
+ _ = x[MSRLIST-106]
+ _ = x[MSR_PAGEFLUSH-107]
+ _ = x[NRIPS-108]
+ _ = x[NX-109]
+ _ = x[OSXSAVE-110]
+ _ = x[PCONFIG-111]
+ _ = x[POPCNT-112]
+ _ = x[PPIN-113]
+ _ = x[PREFETCHI-114]
+ _ = x[PSFD-115]
+ _ = x[RDPRU-116]
+ _ = x[RDRAND-117]
+ _ = x[RDSEED-118]
+ _ = x[RDTSCP-119]
+ _ = x[RRSBA_CTRL-120]
+ _ = x[RTM-121]
+ _ = x[RTM_ALWAYS_ABORT-122]
+ _ = x[SERIALIZE-123]
+ _ = x[SEV-124]
+ _ = x[SEV_64BIT-125]
+ _ = x[SEV_ALTERNATIVE-126]
+ _ = x[SEV_DEBUGSWAP-127]
+ _ = x[SEV_ES-128]
+ _ = x[SEV_RESTRICTED-129]
+ _ = x[SEV_SNP-130]
+ _ = x[SGX-131]
+ _ = x[SGXLC-132]
+ _ = x[SHA-133]
+ _ = x[SME-134]
+ _ = x[SME_COHERENT-135]
+ _ = x[SPEC_CTRL_SSBD-136]
+ _ = x[SRBDS_CTRL-137]
+ _ = x[SSE-138]
+ _ = x[SSE2-139]
+ _ = x[SSE3-140]
+ _ = x[SSE4-141]
+ _ = x[SSE42-142]
+ _ = x[SSE4A-143]
+ _ = x[SSSE3-144]
+ _ = x[STIBP-145]
+ _ = x[STIBP_ALWAYSON-146]
+ _ = x[STOSB_SHORT-147]
+ _ = x[SUCCOR-148]
+ _ = x[SVM-149]
+ _ = x[SVMDA-150]
+ _ = x[SVMFBASID-151]
+ _ = x[SVML-152]
+ _ = x[SVMNP-153]
+ _ = x[SVMPF-154]
+ _ = x[SVMPFT-155]
+ _ = x[SYSCALL-156]
+ _ = x[SYSEE-157]
+ _ = x[TBM-158]
+ _ = x[TLB_FLUSH_NESTED-159]
+ _ = x[TME-160]
+ _ = x[TOPEXT-161]
+ _ = x[TSCRATEMSR-162]
+ _ = x[TSXLDTRK-163]
+ _ = x[VAES-164]
+ _ = x[VMCBCLEAN-165]
+ _ = x[VMPL-166]
+ _ = x[VMSA_REGPROT-167]
+ _ = x[VMX-168]
+ _ = x[VPCLMULQDQ-169]
+ _ = x[VTE-170]
+ _ = x[WAITPKG-171]
+ _ = x[WBNOINVD-172]
+ _ = x[WRMSRNS-173]
+ _ = x[X87-174]
+ _ = x[XGETBV1-175]
+ _ = x[XOP-176]
+ _ = x[XSAVE-177]
+ _ = x[XSAVEC-178]
+ _ = x[XSAVEOPT-179]
+ _ = x[XSAVES-180]
+ _ = x[AESARM-181]
+ _ = x[ARMCPUID-182]
+ _ = x[ASIMD-183]
+ _ = x[ASIMDDP-184]
+ _ = x[ASIMDHP-185]
+ _ = x[ASIMDRDM-186]
+ _ = x[ATOMICS-187]
+ _ = x[CRC32-188]
+ _ = x[DCPOP-189]
+ _ = x[EVTSTRM-190]
+ _ = x[FCMA-191]
+ _ = x[FP-192]
+ _ = x[FPHP-193]
+ _ = x[GPA-194]
+ _ = x[JSCVT-195]
+ _ = x[LRCPC-196]
+ _ = x[PMULL-197]
+ _ = x[SHA1-198]
+ _ = x[SHA2-199]
+ _ = x[SHA3-200]
+ _ = x[SHA512-201]
+ _ = x[SM3-202]
+ _ = x[SM4-203]
+ _ = x[SVE-204]
+ _ = x[lastID-205]
_ = x[firstID-0]
}
-const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
-var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107}
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
index d91d021094..84b1acd215 100644
--- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
@@ -83,7 +83,7 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
c.Model = sysctlGetInt(0, "machdep.cpu.model")
c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize")
c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize")
- c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize")
+ c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize")
c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize")
c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize")
diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore
index 12ecd6ae9b..8ae0384ebc 100644
--- a/vendor/github.com/minio/minio-go/v7/.gitignore
+++ b/vendor/github.com/minio/minio-go/v7/.gitignore
@@ -2,4 +2,5 @@
*.test
validator
golangci-lint
-functional_tests
\ No newline at end of file
+functional_tests
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
index ace6667054..68444aa681 100644
--- a/vendor/github.com/minio/minio-go/v7/Makefile
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -16,9 +16,11 @@ lint:
vet:
@GO111MODULE=on go vet ./...
+ @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
+ ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
test:
- @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
+ @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
examples:
@echo "Building s3 examples"
@@ -28,7 +30,7 @@ examples:
functional-test:
@GO111MODULE=on go build -race functional_tests.go
- @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
+ @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
clean:
@echo "Cleaning up all the generated files"
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
index 7e2199732e..3f88d07777 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
@@ -21,7 +21,7 @@ import (
"bytes"
"context"
"encoding/xml"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
@@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
}
}
- return ioutil.ReadAll(resp.Body)
+ return io.ReadAll(resp.Body)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
index e7edf9c971..dbb5259a81 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
@@ -18,7 +18,7 @@ package minio
import (
"context"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strings"
@@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string
}
}
- bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
+ bucketPolicyBuf, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
index c80488eee1..d5895dfe04 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
@@ -22,7 +22,7 @@ import (
"context"
"encoding/json"
"encoding/xml"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"time"
@@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str
if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "")
}
- respBytes, err := ioutil.ReadAll(resp.Body)
+ respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return s, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
index 1615f8f873..86d74298a6 100644
--- a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
@@ -22,7 +22,6 @@ import (
"encoding/xml"
"errors"
"io"
- "io/ioutil"
"net/http"
"net/url"
@@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags
return nil, httpRespToErrorResponse(resp, bucketName, "")
}
- defer io.Copy(ioutil.Discard, resp.Body)
+ defer io.Copy(io.Discard, resp.Body)
return tags.ParseBucketXML(resp.Body)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
index 835c8bd8ad..1ba68c79ee 100644
--- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go
@@ -21,7 +21,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
return UploadInfo{}, err
}
if dst.Progress != nil {
- io.CopyN(ioutil.Discard, dst.Progress, end-start+1)
+ io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
@@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
- completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
+ completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
index 1c0ad2be4c..0c95d91ec7 100644
--- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go
@@ -20,7 +20,6 @@ package minio
import (
"context"
"io"
- "io/ioutil"
"net/http"
)
@@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
// Update the progress properly after successful copy.
if dst.Progress != nil {
- io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size))
+ io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
}
cpObjRes := copyObjectResult{}
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
index 1f47938777..e1a34e003d 100644
--- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -21,6 +21,8 @@ import (
"encoding/xml"
"io"
"net/http"
+ "net/url"
+ "strings"
"time"
)
@@ -43,14 +45,14 @@ type StringMap map[string]string
// if m is nil it can be initialized, which is often the case if m is
// nested in another xml structural. This is also why the first thing done
// on the first line is initialize it.
-func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error {
*m = StringMap{}
- type Item struct {
- Key string
- Value string
- }
for {
- var e Item
+ // Format is value
+ var e struct {
+ XMLName xml.Name
+ Value string `xml:",chardata"`
+ }
err := d.Decode(&e)
if err == io.EOF {
break
@@ -58,11 +60,63 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if err != nil {
return err
}
- (*m)[e.Key] = e.Value
+ (*m)[e.XMLName.Local] = e.Value
}
return nil
}
+// URLMap represents map with custom UnmarshalXML
+type URLMap map[string]string
+
+// UnmarshalXML unmarshals the XML into a map of string to strings,
+// creating a key in the map for each tag and setting it's value to the
+// tags contents.
+//
+// The fact this function is on the pointer of Map is important, so that
+// if m is nil it can be initialized, which is often the case if m is
+// nested in another xml structural. This is also why the first thing done
+// on the first line is initialize it.
+func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error {
+ *m = URLMap{}
+ var tgs string
+ if err := d.DecodeElement(&tgs, &se); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ for tgs != "" {
+ var key string
+ key, tgs, _ = stringsCut(tgs, "&")
+ if key == "" {
+ continue
+ }
+ key, value, _ := stringsCut(key, "=")
+ key, err := url.QueryUnescape(key)
+ if err != nil {
+ return err
+ }
+
+ value, err = url.QueryUnescape(value)
+ if err != nil {
+ return err
+ }
+ (*m)[key] = value
+ }
+ return nil
+}
+
+// stringsCut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, "", false.
+func stringsCut(s, sep string) (before, after string, found bool) {
+ if i := strings.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
+ }
+ return s, "", false
+}
+
// Owner name.
type Owner struct {
XMLName xml.Name `xml:"Owner" json:"owner"`
@@ -121,10 +175,12 @@ type ObjectInfo struct {
Metadata http.Header `json:"metadata" xml:"-"`
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
+ // Only returned by MinIO servers.
UserMetadata StringMap `json:"userMetadata,omitempty"`
// x-amz-tagging values in their k/v values.
- UserTags map[string]string `json:"userTags"`
+ // Only returned by MinIO servers.
+ UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
// x-amz-tagging-count value
UserTagCount int
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
index 33ca394588..4ec0c87c20 100644
--- a/vendor/github.com/minio/minio-go/v7/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -22,7 +22,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"net/http"
)
@@ -108,7 +107,7 @@ const (
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB)
const maxBodyLength = 1 << 20
- body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil {
return nil, err
}
@@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
}
}
-// errInvalidBucketName - Invalid bucket name response.
-func errInvalidBucketName(message string) error {
- return ErrorResponse{
- StatusCode: http.StatusBadRequest,
- Code: "InvalidBucketName",
- Message: message,
- RequestID: "minio",
- }
-}
-
-// errInvalidObjectName - Invalid object name response.
-func errInvalidObjectName(message string) error {
- return ErrorResponse{
- StatusCode: http.StatusNotFound,
- Code: "NoSuchKey",
- Message: message,
- RequestID: "minio",
- }
-}
-
// errInvalidArgument - Invalid argument response.
func errInvalidArgument(message string) error {
return ErrorResponse{
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
index b17f3146b8..e31e4cf929 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -23,8 +23,6 @@ import (
"fmt"
"io"
"net/http"
- "net/url"
- "strconv"
"sync"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -654,19 +652,11 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o
return nil, ObjectInfo{}, nil, err
}
- urlValues := make(url.Values)
- if opts.VersionID != "" {
- urlValues.Set("versionId", opts.VersionID)
- }
- if opts.PartNumber > 0 {
- urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber))
- }
-
// Execute GET on objectName.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
objectName: objectName,
- queryValues: urlValues,
+ queryValues: opts.toQueryValues(),
customHeader: opts.Header(),
contentSHA256Hex: emptySHA256Hex,
})
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
index 0082c1fa76..bb86a59941 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-options.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -20,6 +20,8 @@ package minio
import (
"fmt"
"net/http"
+ "net/url"
+ "strconv"
"time"
"github.com/minio/minio-go/v7/pkg/encrypt"
@@ -36,6 +38,7 @@ type AdvancedGetOptions struct {
// during GET requests.
type GetObjectOptions struct {
headers map[string]string
+ reqParams url.Values
ServerSideEncryption encrypt.ServerSide
VersionID string
PartNumber int
@@ -83,6 +86,34 @@ func (o *GetObjectOptions) Set(key, value string) {
o.headers[http.CanonicalHeaderKey(key)] = value
}
+// SetReqParam - set request query string parameter
+// supported key: see supportedQueryValues.
+// If an unsupported key is passed in, it will be ignored and nothing will be done.
+func (o *GetObjectOptions) SetReqParam(key, value string) {
+ if !isStandardQueryValue(key) {
+ // do nothing
+ return
+ }
+ if o.reqParams == nil {
+ o.reqParams = make(url.Values)
+ }
+ o.reqParams.Set(key, value)
+}
+
+// AddReqParam - add request query string parameter
+// supported key: see supportedQueryValues.
+// If an unsupported key is passed in, it will be ignored and nothing will be done.
+func (o *GetObjectOptions) AddReqParam(key, value string) {
+ if !isStandardQueryValue(key) {
+ // do nothing
+ return
+ }
+ if o.reqParams == nil {
+ o.reqParams = make(url.Values)
+ }
+ o.reqParams.Add(key, value)
+}
+
// SetMatchETag - set match etag.
func (o *GetObjectOptions) SetMatchETag(etag string) error {
if etag == "" {
@@ -149,3 +180,24 @@ func (o *GetObjectOptions) SetRange(start, end int64) error {
}
return nil
}
+
+// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters.
+func (o *GetObjectOptions) toQueryValues() url.Values {
+ urlValues := make(url.Values)
+ if o.VersionID != "" {
+ urlValues.Set("versionId", o.VersionID)
+ }
+ if o.PartNumber > 0 {
+ urlValues.Set("partNumber", strconv.Itoa(o.PartNumber))
+ }
+
+ if o.reqParams != nil {
+ for key, values := range o.reqParams {
+ for _, value := range values {
+ urlValues.Add(key, value)
+ }
+ }
+ }
+
+ return urlValues
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
index d216afb397..627811cfd0 100644
--- a/vendor/github.com/minio/minio-go/v7/api-list.go
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -402,7 +402,7 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
for {
// Get list of objects a maximum of 1000 per request.
- result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
+ result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter)
if err != nil {
sendObjectInfo(ObjectInfo{
Err: err,
@@ -422,6 +422,8 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
IsLatest: version.IsLatest,
VersionID: version.VersionID,
IsDeleteMarker: version.isDeleteMarker,
+ UserTags: version.UserTags,
+ UserMetadata: version.UserMetadata,
}
select {
// Send object version info.
@@ -474,13 +476,13 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
+func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListVersionsResult{}, err
}
// Validate object prefix.
- if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
return ListVersionsResult{}, err
}
// Get resources properly escaped and lined up before
@@ -491,7 +493,7 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
urlValues.Set("versions", "")
// Set object prefix, prefix value to be set to empty is okay.
- urlValues.Set("prefix", prefix)
+ urlValues.Set("prefix", opts.Prefix)
// Set delimiter, delimiter value to be set to empty is okay.
urlValues.Set("delimiter", delimiter)
@@ -502,8 +504,8 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
}
// Set max keys.
- if maxkeys > 0 {
- urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+ if opts.MaxKeys > 0 {
+ urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys))
}
// Set version ID marker
@@ -511,6 +513,10 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
urlValues.Set("version-id-marker", versionIDMarker)
}
+ if opts.WithMetadata {
+ urlValues.Set("metadata", "true")
+ }
+
// Always set encoding-type
urlValues.Set("encoding-type", "url")
@@ -519,7 +525,7 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
- customHeader: headers,
+ customHeader: opts.headers,
})
defer closeResponse(resp)
if err != nil {
@@ -897,6 +903,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM
}
// listObjectParts list all object parts recursively.
+//
+//lint:ignore U1000 Keep this around
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 3c9a13ff20..85d6c70a25 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -26,7 +26,6 @@ import (
"fmt"
"hash/crc32"
"io"
- "io/ioutil"
"net/http"
"net/url"
"sort"
@@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- opts = PutObjectOptions{}
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ }
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
@@ -386,6 +387,12 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
return UploadInfo{}, err
}
+ headers := opts.Header()
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
+ headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
+ }
+
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
@@ -395,7 +402,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
- customHeader: opts.Header(),
+ customHeader: headers,
}
// Execute POST to complete multipart upload for an objectName.
@@ -412,7 +419,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
// Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte
- b, err = ioutil.ReadAll(resp.Body)
+ b, err = io.ReadAll(resp.Body)
if err != nil {
return UploadInfo{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
index e3a14c59d8..55b3f38e67 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -28,6 +28,7 @@ import (
"net/url"
"sort"
"strings"
+ "sync"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -44,7 +45,9 @@ import (
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions,
) (info UploadInfo, err error) {
- if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
+ if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
+ info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
+ } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else {
@@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ }
if withChecksum {
// Add hash of hashes.
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
@@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
- uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
@@ -425,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
+ opts = PutObjectOptions{
+ ServerSideEncryption: opts.ServerSideEncryption,
+ }
+ if len(crcBytes) > 0 {
+ // Add hash of hashes.
+ crc.Reset()
+ crc.Write(crcBytes)
+ opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ }
+ uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ uploadInfo.Size = totalUploadedSize
+ return uploadInfo, nil
+}
+
+// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
+// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
+func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return UploadInfo{}, err
+ }
+
+ if !opts.SendContentMd5 {
+ if opts.UserMetadata == nil {
+ opts.UserMetadata = make(map[string]string, 1)
+ }
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ }
+
+ // Cancel all when an error occurs.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return UploadInfo{}, err
+ }
+ delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Create checksums
+ // CRC32C is ~50% faster on AMD64 @ 30GB/s
+ var crcBytes []byte
+ crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ nBuffers := int64(opts.NumThreads)
+ bufs := make(chan []byte, nBuffers)
+ all := make([]byte, nBuffers*partSize)
+ for i := int64(0); i < nBuffers; i++ {
+ bufs <- all[i*partSize : i*partSize+partSize]
+ }
+
+ var wg sync.WaitGroup
+ var mu sync.Mutex
+ errCh := make(chan error, opts.NumThreads)
+
+ reader = newHook(reader, opts.Progress)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Proceed to upload the part.
+ var buf []byte
+ select {
+ case buf = <-bufs:
+ case err = <-errCh:
+ cancel()
+ wg.Wait()
+ return UploadInfo{}, err
+ }
+
+ if int64(len(buf)) != partSize {
+ return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
+ }
+
+ length, rerr := readFull(reader, buf)
+ if rerr == io.EOF && partNumber > 1 {
+ // Done
+ break
+ }
+
+ if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
+ cancel()
+ wg.Wait()
+ return UploadInfo{}, rerr
+ }
+
+ // Calculate md5sum.
+ customHeader := make(http.Header)
+ if !opts.SendContentMd5 {
+ // Add CRC32C instead.
+ crc.Reset()
+ crc.Write(buf[:length])
+ cSum := crc.Sum(nil)
+ customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ crcBytes = append(crcBytes, cSum...)
+ }
+
+ wg.Add(1)
+ go func(partNumber int) {
+ // Avoid declaring variables in the for loop
+ var md5Base64 string
+
+ if opts.SendContentMd5 {
+ md5Hash := c.md5Hasher()
+ md5Hash.Write(buf[:length])
+ md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
+ md5Hash.Close()
+ }
+
+ defer wg.Done()
+ p := uploadPartParams{
+ bucketName: bucketName,
+ objectName: objectName,
+ uploadID: uploadID,
+ reader: bytes.NewReader(buf[:length]),
+ partNumber: partNumber,
+ md5Base64: md5Base64,
+ size: int64(length),
+ sse: opts.ServerSideEncryption,
+ streamSha256: !opts.DisableContentSha256,
+ customHeader: customHeader,
+ }
+ objPart, uerr := c.uploadPart(ctx, p)
+ if uerr != nil {
+ errCh <- uerr
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ mu.Lock()
+ partsInfo[partNumber] = objPart
+ mu.Unlock()
+
+ // Send buffer back so it can be reused.
+ bufs <- buf
+ }(partNumber)
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+ }
+ wg.Wait()
+
+ // Collect any error
+ select {
+ case err = <-errCh:
+ return UploadInfo{}, err
+ default:
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ ChecksumCRC32: part.ChecksumCRC32,
+ ChecksumCRC32C: part.ChecksumCRC32C,
+ ChecksumSHA1: part.ChecksumSHA1,
+ ChecksumSHA256: part.ChecksumSHA256,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+
opts = PutObjectOptions{}
if len(crcBytes) > 0 {
// Add hash of hashes.
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index 5735bee5eb..b29df17d49 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -87,7 +87,34 @@ type PutObjectOptions struct {
SendContentMd5 bool
DisableContentSha256 bool
DisableMultipart bool
- Internal AdvancedPutOptions
+
+ // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
+ // fill them serially and upload them in parallel.
+ // This can be used for faster uploads on non-seekable or slow-to-seek input.
+ ConcurrentStreamParts bool
+ Internal AdvancedPutOptions
+
+ customHeaders http.Header
+}
+
+// SetMatchETag if etag matches while PUT MinIO returns an error
+// this is a MinIO specific extension to support optimistic locking
+// semantics.
+func (opts *PutObjectOptions) SetMatchETag(etag string) {
+ if opts.customHeaders == nil {
+ opts.customHeaders = http.Header{}
+ }
+ opts.customHeaders.Set("If-Match", "\""+etag+"\"")
+}
+
+// SetMatchETagExcept if etag does not match while PUT MinIO returns an
+// error this is a MinIO specific extension to support optimistic locking
+// semantics.
+func (opts *PutObjectOptions) SetMatchETagExcept(etag string) {
+ if opts.customHeaders == nil {
+ opts.customHeaders = http.Header{}
+ }
+ opts.customHeaders.Set("If-None-Match", "\""+etag+"\"")
}
// getNumThreads - gets the number of threads to be used in the multipart
@@ -182,6 +209,12 @@ func (opts PutObjectOptions) Header() (header http.Header) {
header.Set("x-amz-meta-"+k, v)
}
}
+
+ // set any other additional custom headers.
+ for k, v := range opts.customHeaders {
+ header[k] = v
+ }
+
return
}
@@ -272,6 +305,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if opts.DisableMultipart {
return UploadInfo{}, errors.New("no length provided and multipart disabled")
}
+ if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
+ return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
+ }
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
index b7502e2d92..849471e33b 100644
--- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -24,7 +24,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"os"
"strings"
"sync"
@@ -60,6 +59,7 @@ type SnowballObject struct {
Size int64
// Modtime to apply to the object.
+ // If Modtime is the zero value current time will be used.
ModTime time.Time
// Content of the object.
@@ -107,7 +107,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
}
} else {
- f, err := ioutil.TempFile("", "s3-putsnowballobjects-*")
+ f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
if err != nil {
return err
}
@@ -173,6 +173,10 @@ objectLoop:
ModTime: obj.ModTime,
Format: tar.FormatPAX,
}
+ if header.ModTime.IsZero() {
+ header.ModTime = time.Now().UTC()
+ }
+
if err := t.WriteHeader(&header); err != nil {
closeObj()
return err
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
index c0ff31a5bd..ca6b80a7df 100644
--- a/vendor/github.com/minio/minio-go/v7/api-remove.go
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -235,7 +235,7 @@ func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
// and return the success/failure result status for each object
-func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, resultCh chan<- RemoveObjectResult) {
+func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) {
// Parse multi delete XML response
rmResult := &deleteMultiObjectsResult{}
err := xmlDecoder(body, rmResult)
@@ -459,7 +459,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
}
// Process multiobjects remove xml response
- processRemoveMultiObjectsResponse(resp.Body, batch, resultCh)
+ processRemoveMultiObjectsResponse(resp.Body, resultCh)
closeResponse(resp)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 827395ee12..6e784be4ca 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -85,6 +85,14 @@ type Version struct {
StorageClass string
VersionID string `xml:"VersionId"`
+ // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
+ // Only returned by MinIO servers.
+ UserMetadata StringMap `json:"userMetadata,omitempty"`
+
+ // x-amz-tagging values in their k/v values.
+ // Only returned by MinIO servers.
+ UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
+
isDeleteMarker bool
}
@@ -110,7 +118,7 @@ type ListVersionsResult struct {
// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
// code will unmarshal and tags and save them in Versions field to
// preserve the lexical order of the listing.
-func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
+func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) {
for {
// Read tokens from the XML document in a stream.
t, err := d.Token()
diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go
index 5d47d7ec56..628d967ff4 100644
--- a/vendor/github.com/minio/minio-go/v7/api-select.go
+++ b/vendor/github.com/minio/minio-go/v7/api-select.go
@@ -41,8 +41,8 @@ type CSVFileHeaderInfo string
// Constants for file header info.
const (
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
- CSVFileHeaderInfoIgnore = "IGNORE"
- CSVFileHeaderInfoUse = "USE"
+ CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
+ CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
)
// SelectCompressionType - is the parameter for what type of compression is
@@ -52,15 +52,15 @@ type SelectCompressionType string
// Constants for compression types under select API.
const (
SelectCompressionNONE SelectCompressionType = "NONE"
- SelectCompressionGZIP = "GZIP"
- SelectCompressionBZIP = "BZIP2"
+ SelectCompressionGZIP SelectCompressionType = "GZIP"
+ SelectCompressionBZIP SelectCompressionType = "BZIP2"
// Non-standard compression schemes, supported by MinIO hosts:
- SelectCompressionZSTD = "ZSTD" // Zstandard compression.
- SelectCompressionLZ4 = "LZ4" // LZ4 Stream
- SelectCompressionS2 = "S2" // S2 Stream
- SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
+ SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
+ SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
+ SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
+ SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
)
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
@@ -69,7 +69,7 @@ type CSVQuoteFields string
// Constants for csv quote styles.
const (
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
- CSVQuoteFieldsAsNeeded = "AsNeeded"
+ CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
)
// QueryExpressionType - is of what syntax the expression is, this should only
@@ -87,7 +87,7 @@ type JSONType string
// Constants for JSONTypes.
const (
JSONDocumentType JSONType = "DOCUMENT"
- JSONLinesType = "LINES"
+ JSONLinesType JSONType = "LINES"
)
// ParquetInputOptions parquet input specific options
@@ -378,8 +378,8 @@ type SelectObjectType string
// Constants for input data types.
const (
SelectObjectTypeCSV SelectObjectType = "CSV"
- SelectObjectTypeJSON = "JSON"
- SelectObjectTypeParquet = "Parquet"
+ SelectObjectTypeJSON SelectObjectType = "JSON"
+ SelectObjectTypeParquet SelectObjectType = "Parquet"
)
// preludeInfo is used for keeping track of necessary information from the
@@ -416,7 +416,7 @@ type messageType string
const (
errorMsg messageType = "error"
- commonMsg = "event"
+ commonMsg messageType = "event"
)
// eventType represents the type of event.
@@ -425,9 +425,9 @@ type eventType string
// list of event-types returned by Select API.
const (
endEvent eventType = "End"
- recordsEvent = "Records"
- progressEvent = "Progress"
- statsEvent = "Stats"
+ recordsEvent eventType = "Records"
+ progressEvent eventType = "Progress"
+ statsEvent eventType = "Stats"
)
// contentType represents content type of event.
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index a93a6c6207..49f716bc39 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -25,7 +25,6 @@ import (
"fmt"
"hash/crc32"
"io"
- "io/ioutil"
"math/rand"
"net"
"net/http"
@@ -107,6 +106,12 @@ type Options struct {
Region string
BucketLookup BucketLookupType
+ // Allows setting a custom region lookup based on URL pattern
+ // not all URL patterns are covered by this library so if you
+ // have a custom endpoints with many regions you can use this
+ // function to perform region lookups appropriately.
+ CustomRegionViaURL func(u url.URL) string
+
// TrailingHeaders indicates server support of trailing headers.
// Only supported for v4 signatures.
TrailingHeaders bool
@@ -119,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.45"
+ libraryVersion = "v7.0.52"
)
// User Agent should always following the below style.
@@ -235,7 +240,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
// Sets custom region, if region is empty bucket location cache is used automatically.
if opts.Region == "" {
- opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
+ if opts.CustomRegionViaURL != nil {
+ opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
+ } else {
+ opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
+ }
}
clnt.region = opts.Region
@@ -635,7 +644,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
// Read the body to be saved later.
- errBodyBytes, err := ioutil.ReadAll(res.Body)
+ errBodyBytes, err := io.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
@@ -644,14 +653,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
- res.Body = ioutil.NopCloser(errBodySeeker)
+ res.Body = io.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
- res.Body = ioutil.NopCloser(errBodySeeker)
+ res.Body = io.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
@@ -814,7 +823,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
if metadata.contentLength == 0 {
req.Body = nil
} else {
- req.Body = ioutil.NopCloser(metadata.contentBody)
+ req.Body = io.NopCloser(metadata.contentBody)
}
// Set incoming content-length.
@@ -846,7 +855,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
// Additionally, we also look if the initialized client is secure,
// if yes then we don't need to perform streaming signature.
req = signer.StreamingSignV4(req, accessKeyID,
- secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
+ secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
index cafb4568b6..9df0a31057 100644
--- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go
@@ -190,12 +190,11 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
}
}
- isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName)
+ isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName)
var urlStr string
- // only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint
- if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) {
+ if isVirtualStyle {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
} else {
targetURL.Path = path.Join(bucketName, "") + "/"
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go
index 207a387020..e186b97377 100644
--- a/vendor/github.com/minio/minio-go/v7/core.go
+++ b/vendor/github.com/minio/minio-go/v7/core.go
@@ -86,19 +86,30 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke
return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
}
+// PutObjectPartOptions contains options for PutObjectPart API
+type PutObjectPartOptions struct {
+ Md5Base64, Sha256Hex string
+ SSE encrypt.ServerSide
+ CustomHeader, Trailer http.Header
+}
+
// PutObjectPart - Upload an object part.
-func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
+func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int,
+ data io.Reader, size int64, opts PutObjectPartOptions,
+) (ObjectPart, error) {
p := uploadPartParams{
bucketName: bucket,
objectName: object,
uploadID: uploadID,
reader: data,
partNumber: partID,
- md5Base64: md5Base64,
- sha256Hex: sha256Hex,
+ md5Base64: opts.Md5Base64,
+ sha256Hex: opts.Sha256Hex,
size: size,
- sse: sse,
+ sse: opts.SSE,
streamSha256: true,
+ customHeader: opts.CustomHeader,
+ trailer: opts.Trailer,
}
return c.uploadPart(ctx, p)
}
@@ -109,11 +120,11 @@ func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID stri
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
-func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) {
+func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) {
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
}, opts)
- return res.ETag, err
+ return res, err
}
// AbortMultipartUpload - Abort an incomplete upload.
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index e86e142e55..3328523960 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -31,7 +31,6 @@ import (
"hash"
"hash/crc32"
"io"
- "io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
@@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser {
if _, ok := dataFileCRC32[fileName]; !ok {
dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
}
- return ioutil.NopCloser(newRandomReader(size, size))
+ return io.NopCloser(newRandomReader(size, size))
}
reader, _ := os.Open(getMintDataDirFilePath(fileName))
if _, ok := dataFileCRC32[fileName]; !ok {
@@ -989,7 +988,7 @@ func testGetObjectWithVersioning() {
for _, testFile := range testFiles {
r := getDataReader(testFile)
- buf, err := ioutil.ReadAll(r)
+ buf, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
@@ -1131,7 +1130,7 @@ func testPutObjectWithVersioning() {
var errs [n]error
for i := 0; i < n; i++ {
r := newRandomReader(int64((1<<20)*i+i), int64(i))
- buf, err := ioutil.ReadAll(r)
+ buf, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
@@ -1271,7 +1270,7 @@ func testCopyObjectWithVersioning() {
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
for _, testFile := range testFiles {
r := getDataReader(testFile)
- buf, err := ioutil.ReadAll(r)
+ buf, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
@@ -1304,7 +1303,7 @@ func testCopyObjectWithVersioning() {
return
}
- oldestContent, err := ioutil.ReadAll(reader)
+ oldestContent, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
return
@@ -1338,7 +1337,7 @@ func testCopyObjectWithVersioning() {
}
defer readerCopy.Close()
- newestContent, err := ioutil.ReadAll(readerCopy)
+ newestContent, err := io.ReadAll(readerCopy)
if err != nil {
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
return
@@ -1408,7 +1407,7 @@ func testConcurrentCopyObjectWithVersioning() {
testFiles := []string{"datafile-10-kB"}
for _, testFile := range testFiles {
r := getDataReader(testFile)
- buf, err := ioutil.ReadAll(r)
+ buf, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
@@ -1441,7 +1440,7 @@ func testConcurrentCopyObjectWithVersioning() {
return
}
- oldestContent, err := ioutil.ReadAll(reader)
+ oldestContent, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
return
@@ -1491,7 +1490,7 @@ func testConcurrentCopyObjectWithVersioning() {
}
defer readerCopy.Close()
- newestContent, err := ioutil.ReadAll(readerCopy)
+ newestContent, err := io.ReadAll(readerCopy)
if err != nil {
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
return
@@ -1571,7 +1570,7 @@ func testComposeObjectWithVersioning() {
for _, testFile := range testFiles {
r := getDataReader(testFile)
- buf, err := ioutil.ReadAll(r)
+ buf, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err)
return
@@ -1633,7 +1632,7 @@ func testComposeObjectWithVersioning() {
}
defer readerCopy.Close()
- copyContentBytes, err := ioutil.ReadAll(readerCopy)
+ copyContentBytes, err := io.ReadAll(readerCopy)
if err != nil {
logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
return
@@ -1733,12 +1732,39 @@ func testRemoveObjectWithVersioning() {
logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
return
}
-
- err = c.RemoveBucket(context.Background(), bucketName)
+ // test delete marker version id is non-null
+ _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
if err != nil {
- logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
+ // create delete marker
+ err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DeleteObject failed", err)
+ return
+ }
+ objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
+ idx := 0
+ for info := range objectsInfo {
+ if info.Err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
+ return
+ }
+ if idx == 0 {
+ if !info.IsDeleteMarker {
+ logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
+ return
+ }
+ if info.VersionID == "" {
+ logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
+ return
+ }
+ }
+ idx++
+ }
+
+ defer cleanupBucket(bucketName, c)
successLogger(testName, function, args, startTime).Info()
}
@@ -2027,7 +2053,7 @@ func testPutObjectWithChecksums() {
}
// Enable tracing, write to stderr.
- //c.TraceOn(os.Stderr)
+ // c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
@@ -2461,7 +2487,7 @@ func testGetObjectSeekEnd() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -2982,7 +3008,7 @@ func testFPutObjectMultipart() {
fileName := getMintDataDirFilePath("datafile-129-MB")
if fileName == "" {
// Make a temp file with minPartSize bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return
@@ -3091,7 +3117,7 @@ func testFPutObject() {
fName := getMintDataDirFilePath("datafile-129-MB")
if fName == "" {
// Make a temp file with minPartSize bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return
@@ -3257,7 +3283,7 @@ func testFPutObjectContext() {
fName := getMintDataDirFilePath("datafile-1-MB")
if fName == "" {
// Make a temp file with 1 MiB bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return
@@ -3357,7 +3383,7 @@ func testFPutObjectContextV2() {
fName := getMintDataDirFilePath("datafile-1-MB")
if fName == "" {
// Make a temp file with 1 MiB bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
if err != nil {
logError(testName, function, args, startTime, "", "Temp file creation failed", err)
return
@@ -3621,7 +3647,7 @@ func testGetObjectS3Zip() {
logError(testName, function, args, startTime, "", "file.Open failed", err)
return
}
- want, err := ioutil.ReadAll(zfr)
+ want, err := io.ReadAll(zfr)
if err != nil {
logError(testName, function, args, startTime, "", "fzip file read failed", err)
return
@@ -3638,7 +3664,7 @@ func testGetObjectS3Zip() {
}
return
}
- got, err := ioutil.ReadAll(r)
+ got, err := io.ReadAll(r)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -3722,7 +3748,7 @@ func testGetObjectReadSeekFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -3885,7 +3911,7 @@ func testGetObjectReadAtFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -4062,7 +4088,7 @@ func testGetObjectReadAtWhenEOFWasReached() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -4181,7 +4207,7 @@ func testPresignedPostPolicy() {
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -4245,7 +4271,7 @@ func testPresignedPostPolicy() {
filePath := getMintDataDirFilePath("datafile-33-kB")
if filePath == "" {
// Make a temp file with 33 KB data.
- file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
+ file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return
@@ -4588,7 +4614,7 @@ func testSSECEncryptedGetObjectReadSeekFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -4770,7 +4796,7 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -4944,7 +4970,7 @@ func testSSECEncryptedGetObjectReadAtFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -5127,7 +5153,7 @@ func testSSES3EncryptedGetObjectReadAtFunctional() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -6138,7 +6164,7 @@ func testFunctional() {
return
}
- newReadBytes, err := ioutil.ReadAll(newReader)
+ newReadBytes, err := io.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -6269,7 +6295,7 @@ func testFunctional() {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
return
}
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ newPresignedBytes, err := io.ReadAll(resp.Body)
if err != nil {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
return
@@ -6312,7 +6338,7 @@ func testFunctional() {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
return
}
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ newPresignedBytes, err = io.ReadAll(resp.Body)
if err != nil {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
return
@@ -6372,7 +6398,7 @@ func testFunctional() {
return
}
- newReadBytes, err = ioutil.ReadAll(newReader)
+ newReadBytes, err = io.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
return
@@ -6428,7 +6454,7 @@ func testFunctional() {
return
}
- newReadBytes, err = ioutil.ReadAll(newReader)
+ newReadBytes, err = io.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
return
@@ -6652,7 +6678,7 @@ func testPutObjectUploadSeekedObject() {
}
args["fileToUpload"] = fileName
} else {
- tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
+ tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile create failed", err)
return
@@ -6916,7 +6942,7 @@ func testFPutObjectV2() {
defer cleanupBucket(bucketName, c)
// Make a temp file with 11*1024*1024 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return
@@ -7145,7 +7171,7 @@ func testGetObjectReadSeekFunctionalV2() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -7299,7 +7325,7 @@ func testGetObjectReadAtFunctionalV2() {
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -7837,7 +7863,7 @@ func testEncryptedEmptyObject() {
}
defer reader.Close()
- decBytes, err := ioutil.ReadAll(reader)
+ decBytes, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
return
@@ -7915,7 +7941,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc,
return
}
- decBytes, err := ioutil.ReadAll(reader)
+ decBytes, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -7955,7 +7981,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc,
return
}
- decBytes, err = ioutil.ReadAll(reader)
+ decBytes, err = io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -7994,7 +8020,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc,
}
defer reader.Close()
- decBytes, err = ioutil.ReadAll(reader)
+ decBytes, err = io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -8388,14 +8414,20 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() {
var completeParts []minio.CompletePart
- part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption)
+ part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1,
+ bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024,
+ minio.PutObjectPartOptions{SSE: srcencryption},
+ )
if err != nil {
logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
return
}
completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag})
- part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption)
+ part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2,
+ bytes.NewReader(buf[5*1024*1024:]), 1024*1024,
+ minio.PutObjectPartOptions{SSE: srcencryption},
+ )
if err != nil {
logError(testName, function, args, startTime, "", "PutObjectPart call failed", err)
return
@@ -11040,7 +11072,7 @@ func testFunctionalV2() {
return
}
- newReadBytes, err := ioutil.ReadAll(newReader)
+ newReadBytes, err := io.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -11146,7 +11178,7 @@ func testFunctionalV2() {
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
return
}
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ newPresignedBytes, err := io.ReadAll(resp.Body)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -11185,7 +11217,7 @@ func testFunctionalV2() {
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
return
}
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ newPresignedBytes, err = io.ReadAll(resp.Body)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err)
return
@@ -11239,7 +11271,7 @@ func testFunctionalV2() {
return
}
- newReadBytes, err = ioutil.ReadAll(newReader)
+ newReadBytes, err = io.ReadAll(newReader)
if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
return
@@ -11553,7 +11585,7 @@ func testGetObjectRanges() {
}
for _, test := range tests {
wantRC := getDataReader("datafile-129-MB")
- io.CopyN(ioutil.Discard, wantRC, test.start)
+ io.CopyN(io.Discard, wantRC, test.start)
want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
opts := minio.GetObjectOptions{}
opts.SetRange(test.start, test.end)
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
index e964b52173..1c73d1008b 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -24,7 +24,6 @@ import (
"encoding/xml"
"errors"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
- io.Copy(ioutil.Discard, resp.Body)
+ io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
- buf, err := ioutil.ReadAll(resp.Body)
+ buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleResponse{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
index f4b027a414..07a9c2f092 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -22,7 +22,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
)
// ErrorResponse - Is the typed error returned.
@@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error {
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB)
const maxBodyLength = 1 << 20
- body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
index 56437edb26..e4c0f6717e 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
@@ -18,7 +18,6 @@
package credentials
import (
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -114,6 +113,7 @@ type hostConfig struct {
type config struct {
Version string `json:"version"`
Hosts map[string]hostConfig `json:"hosts"`
+ Aliases map[string]hostConfig `json:"aliases"`
}
// loadAliass loads from the file pointed to by shared credentials filename for alias.
@@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) {
cfg := &config{}
json := jsoniter.ConfigCompatibleWithStandardLibrary
- configBytes, err := ioutil.ReadFile(filename)
+ configBytes, err := os.ReadFile(filename)
if err != nil {
return hostConfig{}, err
}
if err = json.Unmarshal(configBytes, cfg); err != nil {
return hostConfig{}, err
}
+
+ if cfg.Version == "10" {
+ return cfg.Aliases[alias], nil
+ }
+
return cfg.Hosts[alias], nil
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
index 14369cf10a..e641639c95 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
@@ -22,7 +22,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/url"
@@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) {
Client: m.Client,
STSEndpoint: endpoint,
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
- token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
+ token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
if err != nil {
return nil, err
}
@@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
return "", err
}
defer resp.Body.Close()
- data, err := ioutil.ReadAll(resp.Body)
+ data, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
index 34598bd8e7..9e92c1e0fd 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -22,7 +22,7 @@ import (
"encoding/xml"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strings"
@@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
- buf, err := ioutil.ReadAll(resp.Body)
+ buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
index 25b45ecb09..ec5f3f0971 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -21,7 +21,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strings"
@@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
- buf, err := ioutil.ReadAll(resp.Body)
+ buf, err := io.ReadAll(resp.Body)
if err != nil {
return value, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
index c7ac4db3b7..dee0a8cbb0 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -21,7 +21,6 @@ import (
"encoding/xml"
"errors"
"io"
- "io/ioutil"
"net"
"net/http"
"net/url"
@@ -141,6 +140,9 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
if err != nil {
return Value{}, err
}
+ if req.Form == nil {
+ req.Form = url.Values{}
+ }
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
resp, err := i.Client.Do(req)
@@ -152,7 +154,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
}
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
- buf, err := ioutil.ReadAll(resp.Body)
+ buf, err := io.ReadAll(resp.Body)
if err != nil {
return Value{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
index 50f5f1ce65..2e2af50b46 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -22,7 +22,7 @@ import (
"encoding/xml"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strconv"
@@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
- buf, err := ioutil.ReadAll(resp.Body)
+ buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
index 163fa62b42..a7081c5968 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go
@@ -28,27 +28,27 @@ import (
)
const (
- // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
- sseGenericHeader = "X-Amz-Server-Side-Encryption"
-
- // sseKmsKeyID is the AWS SSE-KMS key id.
- sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id"
- // sseEncryptionContext is the AWS SSE-KMS Encryption Context data.
- sseEncryptionContext = sseGenericHeader + "-Context"
-
- // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
- sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm"
- // sseCustomerKey is the AWS SSE-C encryption key HTTP header key.
- sseCustomerKey = sseGenericHeader + "-Customer-Key"
- // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
- sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5"
-
- // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
- sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
- // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
- sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
- // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
- sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
+ // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
+ SseGenericHeader = "X-Amz-Server-Side-Encryption"
+
+ // SseKmsKeyID is the AWS SSE-KMS key id.
+ SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
+ // SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
+ SseEncryptionContext = SseGenericHeader + "-Context"
+
+ // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
+ SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
+ // SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
+ SseCustomerKey = SseGenericHeader + "-Customer-Key"
+ // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
+ SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
+
+ // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
+ SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
+ SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
+ SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
)
// PBKDF creates a SSE-C key from the provided password and salt.
@@ -157,9 +157,9 @@ func (s ssec) Type() Type { return SSEC }
func (s ssec) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
- h.Set(sseCustomerAlgorithm, "AES256")
- h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
- h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+ h.Set(SseCustomerAlgorithm, "AES256")
+ h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type ssecCopy [32]byte
@@ -168,16 +168,16 @@ func (s ssecCopy) Type() Type { return SSEC }
func (s ssecCopy) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
- h.Set(sseCopyCustomerAlgorithm, "AES256")
- h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
- h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+ h.Set(SseCopyCustomerAlgorithm, "AES256")
+ h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type s3 struct{}
func (s s3) Type() Type { return S3 }
-func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") }
+func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
type kms struct {
key string
@@ -188,11 +188,11 @@ type kms struct {
func (s kms) Type() Type { return KMS }
func (s kms) Marshal(h http.Header) {
- h.Set(sseGenericHeader, "aws:kms")
+ h.Set(SseGenericHeader, "aws:kms")
if s.key != "" {
- h.Set(sseKmsKeyID, s.key)
+ h.Set(SseKmsKeyID, s.key)
}
if s.hasContext {
- h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
+ h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
}
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
index 931ca5bc28..fd034fdc80 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
@@ -34,19 +34,19 @@ type EventType string
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const (
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
- ObjectCreatedPut = "s3:ObjectCreated:Put"
- ObjectCreatedPost = "s3:ObjectCreated:Post"
- ObjectCreatedCopy = "s3:ObjectCreated:Copy"
- ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
- ObjectAccessedGet = "s3:ObjectAccessed:Get"
- ObjectAccessedHead = "s3:ObjectAccessed:Head"
- ObjectAccessedAll = "s3:ObjectAccessed:*"
- ObjectRemovedAll = "s3:ObjectRemoved:*"
- ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
- ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
- ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
- BucketCreatedAll = "s3:BucketCreated:*"
- BucketRemovedAll = "s3:BucketRemoved:*"
+ ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
+ ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
+ ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
+ ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
+ ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
+ ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
+ ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
+ BucketCreatedAll EventType = "s3:BucketCreated:*"
+ BucketRemovedAll EventType = "s3:BucketRemoved:*"
)
// FilterRule - child of S3Key, a tag in the notification xml which
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
index 97abf8df8f..645fe18cbe 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
@@ -700,6 +700,10 @@ type TargetMetrics struct {
PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates
FailedCount uint64 `json:"failedReplicationCount"`
+ // Bandwidth limit in bytes/sec for this target
+ BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
+ // Current bandwidth used in bytes/sec for this target
+ CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
}
// Metrics represents inline replication metrics for a bucket.
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
index 79c1294665..51a04b06d3 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -339,12 +339,12 @@ func EncodePath(pathName string) string {
encodedPathname.WriteRune(s)
continue
default:
- len := utf8.RuneLen(s)
- if len < 0 {
+ l := utf8.RuneLen(s)
+ if l < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
- u := make([]byte, len)
+ u := make([]byte, l)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
index 5838b9de99..77540e2d82 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go
@@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"net/http"
"strconv"
"strings"
@@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64,
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil {
- req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
stReader := &StreamingUSReader{
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
index 49e999b01a..1c2f1dc9d1 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
@@ -22,11 +22,12 @@ import (
"encoding/hex"
"fmt"
"io"
- "io/ioutil"
"net/http"
"strconv"
"strings"
"time"
+
+ md5simd "github.com/minio/md5-simd"
)
// Reference for constants used below -
@@ -91,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
// buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
-func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
+func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{
streamingPayloadHdr,
t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3),
previousSig,
emptySHA256,
- hex.EncodeToString(sum256(chunkData)),
+ chunkChecksum,
}
return strings.Join(stringToSignParts, "\n")
@@ -106,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
-func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
+func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{
streamingTrailerHdr,
t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3),
previousSig,
- hex.EncodeToString(sum256(chunkData)),
+ chunkChecksum,
}
return strings.Join(stringToSignParts, "\n")
@@ -149,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
-func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
+func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
previousSignature, secretAccessKey string,
) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region,
- previousSignature, chunkData)
+ previousSignature, chunkCheckSum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign)
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
-func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region,
+func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
previousSignature, secretAccessKey string,
) string {
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
- previousSignature, chunkData)
+ previousSignature, chunkChecksum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign)
}
@@ -203,12 +204,17 @@ type StreamingReader struct {
totalChunks int
lastChunkSize int
trailer http.Header
+ sh256 md5simd.Hasher
}
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
// Compute chunk signature for next header
- signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf[:chunkLen])
+ chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
+
+ signature := buildChunkSignature(chunckChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
@@ -240,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) {
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
}
+ s.sh256.Reset()
+ s.sh256.Write(s.chunkBuf)
+ chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
// Compute chunk signature
- signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime,
+ signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
@@ -274,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
- region string, dataLen int64, reqTime time.Time,
+ region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil {
- req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
+ req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
stReader := &StreamingReader{
@@ -295,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize),
+ sh256: sh256,
}
if len(req.Trailer) > 0 {
stReader.trailer = req.Trailer
@@ -385,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
// Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error {
+ if s.sh256 != nil {
+ s.sh256.Close()
+ s.sh256 = nil
+ }
return s.baseReadCloser.Close()
}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
index 4b3df19127..31b340dcf7 100644
--- a/vendor/github.com/minio/minio-go/v7/post-policy.go
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -25,7 +25,7 @@ import (
)
// expirationDateFormat date format for expiration key in json policy.
-const expirationDateFormat = "2006-01-02T15:04:05.999Z"
+const expirationDateFormat = "2006-01-02T15:04:05.000Z"
// policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
index 589c0e5498..0a26edd5a7 100644
--- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go
@@ -34,6 +34,7 @@ var awsS3EndpointMap = map[string]string{
"eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com",
"ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
"ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
+ "ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com",
"ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com",
@@ -48,6 +49,7 @@ var awsS3EndpointMap = map[string]string{
"cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn",
"cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
"ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
+ "ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
index a88477b736..1bff664628 100644
--- a/vendor/github.com/minio/minio-go/v7/transport.go
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -23,7 +23,6 @@ package minio
import (
"crypto/tls"
"crypto/x509"
- "io/ioutil"
"net"
"net/http"
"os"
@@ -73,7 +72,7 @@ var DefaultTransport = func(secure bool) (*http.Transport, error) {
}
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
rootCAs := mustGetSystemCertPool()
- data, err := ioutil.ReadFile(f)
+ data, err := os.ReadFile(f)
if err == nil {
rootCAs.AppendCertsFromPEM(data)
}
diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go
index a8a45b1a8c..9389a7fafb 100644
--- a/vendor/github.com/minio/minio-go/v7/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/utils.go
@@ -28,7 +28,6 @@ import (
"fmt"
"hash"
"io"
- "io/ioutil"
"math/rand"
"net"
"net/http"
@@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
- io.Copy(ioutil.Discard, resp.Body)
+ io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -512,6 +511,23 @@ func isAmzHeader(headerKey string) bool {
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
}
+// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
+var supportedQueryValues = map[string]bool{
+ "partNumber": true,
+ "versionId": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "response-content-language": true,
+ "response-content-type": true,
+ "response-expires": true,
+}
+
+// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
+func isStandardQueryValue(qsKey string) bool {
+ return supportedQueryValues[qsKey]
+}
+
var (
md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4a11ea8b07..5741119b8b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -522,8 +522,8 @@ github.com/docker/go-units
github.com/drone/envsubst
github.com/drone/envsubst/parse
github.com/drone/envsubst/path
-# github.com/dustin/go-humanize v1.0.0
-## explicit
+# github.com/dustin/go-humanize v1.0.1
+## explicit; go 1.16
github.com/dustin/go-humanize
# github.com/eapache/go-resiliency v1.3.0
## explicit; go 1.13
@@ -993,8 +993,8 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
-# github.com/klauspost/compress v1.15.15
-## explicit; go 1.17
+# github.com/klauspost/compress v1.16.0
+## explicit; go 1.18
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -1005,7 +1005,7 @@ github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/s2
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/cpuid/v2 v2.1.0
+# github.com/klauspost/cpuid/v2 v2.2.4
## explicit; go 1.15
github.com/klauspost/cpuid/v2
# github.com/klauspost/pgzip v1.2.5
@@ -1045,7 +1045,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.45
+# github.com/minio/minio-go/v7 v7.0.52
## explicit; go 1.17
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials