summaryrefslogtreecommitdiff
path: root/vendor/golang.org
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org')
-rw-r--r--vendor/golang.org/x/exp/AUTHORS3
-rw-r--r--vendor/golang.org/x/exp/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/exp/LICENSE27
-rw-r--r--vendor/golang.org/x/exp/PATENTS22
-rw-r--r--vendor/golang.org/x/exp/constraints/constraints.go50
-rw-r--r--vendor/golang.org/x/exp/slices/slices.go218
-rw-r--r--vendor/golang.org/x/exp/slices/sort.go127
-rw-r--r--vendor/golang.org/x/exp/slices/zsortfunc.go479
-rw-r--r--vendor/golang.org/x/exp/slices/zsortordered.go481
9 files changed, 1410 insertions, 0 deletions
diff --git a/vendor/golang.org/x/exp/AUTHORS b/vendor/golang.org/x/exp/AUTHORS
new file mode 100644
index 000000000..15167cd74
--- /dev/null
+++ b/vendor/golang.org/x/exp/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/exp/CONTRIBUTORS b/vendor/golang.org/x/exp/CONTRIBUTORS
new file mode 100644
index 000000000..1c4577e96
--- /dev/null
+++ b/vendor/golang.org/x/exp/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000..2c033dff4
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000..8a237c5d6
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i, vs := range s {
+ if v == vs {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i, v := range s {
+ if f(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ return append(s[:i], s[j:]...)
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) == 0 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if v != last {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) == 0 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if !eq(v, last) {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. Grow may modify elements of the
+// slice between the length and the capacity. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ return append(s, make(S, n)...)[:len(s)]
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000..c22e74bd1
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,127 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStable sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // search returns the leftmost position where f returns true, or len(x) if f
+ // returns false for all x. This is the insertion position for target in x,
+ // and could point to an element that's either == target or not.
+ pos := search(len(x), func(i int) bool { return x[i] >= target })
+ if pos >= len(x) || x[pos] != target {
+ return pos, false
+ } else {
+ return pos, true
+ }
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) {
+ pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 })
+ if pos >= len(x) || cmp(x[pos], target) != 0 {
+ return pos, false
+ } else {
+ return pos, true
+ }
+}
+
+func search(n int, f func(int) bool) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !f(h) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 000000000..2a632476c
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000..efaa1c8b7
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}