summaryrefslogtreecommitdiff
path: root/vendor/github.com/golang
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/golang')
-rw-r--r--vendor/github.com/golang/geo/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/r1/doc.go20
-rw-r--r--vendor/github.com/golang/geo/r1/interval.go177
-rw-r--r--vendor/github.com/golang/geo/r2/doc.go20
-rw-r--r--vendor/github.com/golang/geo/r2/rect.go255
-rw-r--r--vendor/github.com/golang/geo/r3/doc.go20
-rw-r--r--vendor/github.com/golang/geo/r3/precisevector.go198
-rw-r--r--vendor/github.com/golang/geo/r3/vector.go183
-rw-r--r--vendor/github.com/golang/geo/s1/angle.go120
-rw-r--r--vendor/github.com/golang/geo/s1/chordangle.go250
-rw-r--r--vendor/github.com/golang/geo/s1/doc.go20
-rw-r--r--vendor/github.com/golang/geo/s1/interval.go462
-rw-r--r--vendor/github.com/golang/geo/s2/bits_go18.go53
-rw-r--r--vendor/github.com/golang/geo/s2/bits_go19.go39
-rw-r--r--vendor/github.com/golang/geo/s2/cap.go519
-rw-r--r--vendor/github.com/golang/geo/s2/cell.go698
-rw-r--r--vendor/github.com/golang/geo/s2/cellid.go942
-rw-r--r--vendor/github.com/golang/geo/s2/cellunion.go590
-rw-r--r--vendor/github.com/golang/geo/s2/centroids.go133
-rw-r--r--vendor/github.com/golang/geo/s2/contains_point_query.go190
-rw-r--r--vendor/github.com/golang/geo/s2/contains_vertex_query.go63
-rw-r--r--vendor/github.com/golang/geo/s2/convex_hull_query.go239
-rw-r--r--vendor/github.com/golang/geo/s2/crossing_edge_query.go409
-rw-r--r--vendor/github.com/golang/geo/s2/distance_target.go149
-rw-r--r--vendor/github.com/golang/geo/s2/doc.go29
-rw-r--r--vendor/github.com/golang/geo/s2/edge_clipping.go672
-rw-r--r--vendor/github.com/golang/geo/s2/edge_crosser.go227
-rw-r--r--vendor/github.com/golang/geo/s2/edge_crossings.go396
-rw-r--r--vendor/github.com/golang/geo/s2/edge_distances.go408
-rw-r--r--vendor/github.com/golang/geo/s2/edge_query.go512
-rw-r--r--vendor/github.com/golang/geo/s2/edge_tessellator.go167
-rw-r--r--vendor/github.com/golang/geo/s2/encode.go237
-rw-r--r--vendor/github.com/golang/geo/s2/interleave.go143
-rw-r--r--vendor/github.com/golang/geo/s2/latlng.go101
-rw-r--r--vendor/github.com/golang/geo/s2/lexicon.go175
-rw-r--r--vendor/github.com/golang/geo/s2/loop.go1816
-rw-r--r--vendor/github.com/golang/geo/s2/matrix3x3.go127
-rw-r--r--vendor/github.com/golang/geo/s2/max_distance_targets.go306
-rw-r--r--vendor/github.com/golang/geo/s2/metric.go164
-rw-r--r--vendor/github.com/golang/geo/s2/min_distance_targets.go362
-rw-r--r--vendor/github.com/golang/geo/s2/nthderivative.go88
-rw-r--r--vendor/github.com/golang/geo/s2/paddedcell.go252
-rw-r--r--vendor/github.com/golang/geo/s2/point.go258
-rw-r--r--vendor/github.com/golang/geo/s2/point_measures.go149
-rw-r--r--vendor/github.com/golang/geo/s2/point_vector.go42
-rw-r--r--vendor/github.com/golang/geo/s2/pointcompression.go319
-rw-r--r--vendor/github.com/golang/geo/s2/polygon.go1212
-rw-r--r--vendor/github.com/golang/geo/s2/polyline.go589
-rw-r--r--vendor/github.com/golang/geo/s2/polyline_measures.go53
-rw-r--r--vendor/github.com/golang/geo/s2/predicates.go701
-rw-r--r--vendor/github.com/golang/geo/s2/projections.go203
-rw-r--r--vendor/github.com/golang/geo/s2/query_options.go196
-rw-r--r--vendor/github.com/golang/geo/s2/rect.go710
-rw-r--r--vendor/github.com/golang/geo/s2/rect_bounder.go352
-rw-r--r--vendor/github.com/golang/geo/s2/region.go71
-rw-r--r--vendor/github.com/golang/geo/s2/regioncoverer.go477
-rw-r--r--vendor/github.com/golang/geo/s2/shape.go263
-rw-r--r--vendor/github.com/golang/geo/s2/shapeindex.go1507
-rw-r--r--vendor/github.com/golang/geo/s2/shapeutil.go228
-rw-r--r--vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go72
-rw-r--r--vendor/github.com/golang/geo/s2/stuv.go427
-rw-r--r--vendor/github.com/golang/geo/s2/util.go125
-rw-r--r--vendor/github.com/golang/geo/s2/wedge_relations.go97
63 files changed, 20184 insertions, 0 deletions
diff --git a/vendor/github.com/golang/geo/LICENSE b/vendor/github.com/golang/geo/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/golang/geo/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/r1/doc.go b/vendor/github.com/golang/geo/r1/doc.go
new file mode 100644
index 000000000..c6b65c0e0
--- /dev/null
+++ b/vendor/github.com/golang/geo/r1/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package r1 implements types and functions for working with geometry in ℝ¹.
+
+See ../s2 for a more detailed overview.
+*/
+package r1
diff --git a/vendor/github.com/golang/geo/r1/interval.go b/vendor/github.com/golang/geo/r1/interval.go
new file mode 100644
index 000000000..48ea51982
--- /dev/null
+++ b/vendor/github.com/golang/geo/r1/interval.go
@@ -0,0 +1,177 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package r1
+
+import (
+ "fmt"
+ "math"
+)
+
+// Interval represents a closed interval on ℝ.
+// Zero-length intervals (where Lo == Hi) represent single points.
+// If Lo > Hi then the interval is empty.
+type Interval struct {
+ Lo, Hi float64
+}
+
+// EmptyInterval returns an empty interval.
+func EmptyInterval() Interval { return Interval{1, 0} }
+
+// IntervalFromPoint returns an interval representing a single point.
+func IntervalFromPoint(p float64) Interval { return Interval{p, p} }
+
+// IsEmpty reports whether the interval is empty.
+func (i Interval) IsEmpty() bool { return i.Lo > i.Hi }
+
+// Equal returns true iff the interval contains the same points as oi.
+func (i Interval) Equal(oi Interval) bool {
+ return i == oi || i.IsEmpty() && oi.IsEmpty()
+}
+
+// Center returns the midpoint of the interval.
+// It is undefined for empty intervals.
+func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) }
+
+// Length returns the length of the interval.
+// The length of an empty interval is negative.
+func (i Interval) Length() float64 { return i.Hi - i.Lo }
+
+// Contains returns true iff the interval contains p.
+func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi }
+
+// ContainsInterval returns true iff the interval contains oi.
+func (i Interval) ContainsInterval(oi Interval) bool {
+ if oi.IsEmpty() {
+ return true
+ }
+ return i.Lo <= oi.Lo && oi.Hi <= i.Hi
+}
+
+// InteriorContains returns true iff the interval strictly contains p.
+func (i Interval) InteriorContains(p float64) bool {
+ return i.Lo < p && p < i.Hi
+}
+
+// InteriorContainsInterval returns true iff the interval strictly contains oi.
+func (i Interval) InteriorContainsInterval(oi Interval) bool {
+ if oi.IsEmpty() {
+ return true
+ }
+ return i.Lo < oi.Lo && oi.Hi < i.Hi
+}
+
+// Intersects returns true iff the interval contains any points in common with oi.
+func (i Interval) Intersects(oi Interval) bool {
+ if i.Lo <= oi.Lo {
+ return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty
+ }
+ return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty
+}
+
+// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
+func (i Interval) InteriorIntersects(oi Interval) bool {
+ return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi
+}
+
+// Intersection returns the interval containing all points common to i and j.
+func (i Interval) Intersection(j Interval) Interval {
+ // Empty intervals do not need to be special-cased.
+ return Interval{
+ Lo: math.Max(i.Lo, j.Lo),
+ Hi: math.Min(i.Hi, j.Hi),
+ }
+}
+
+// AddPoint returns the interval expanded so that it contains the given point.
+func (i Interval) AddPoint(p float64) Interval {
+ if i.IsEmpty() {
+ return Interval{p, p}
+ }
+ if p < i.Lo {
+ return Interval{p, i.Hi}
+ }
+ if p > i.Hi {
+ return Interval{i.Lo, p}
+ }
+ return i
+}
+
+// ClampPoint returns the closest point in the interval to the given point "p".
+// The interval must be non-empty.
+func (i Interval) ClampPoint(p float64) float64 {
+ return math.Max(i.Lo, math.Min(i.Hi, p))
+}
+
+// Expanded returns an interval that has been expanded on each side by margin.
+// If margin is negative, then the function shrinks the interval on
+// each side by margin instead. The resulting interval may be empty. Any
+// expansion of an empty interval remains empty.
+func (i Interval) Expanded(margin float64) Interval {
+ if i.IsEmpty() {
+ return i
+ }
+ return Interval{i.Lo - margin, i.Hi + margin}
+}
+
+// Union returns the smallest interval that contains this interval and the given interval.
+func (i Interval) Union(other Interval) Interval {
+ if i.IsEmpty() {
+ return other
+ }
+ if other.IsEmpty() {
+ return i
+ }
+ return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)}
+}
+
+func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
+
+const (
+ // epsilon is a small number that represents a reasonable level of noise between two
+ // values that can be considered to be equal.
+ epsilon = 1e-15
+ // dblEpsilon is a smaller number for values that require more precision.
+ // This is the C++ DBL_EPSILON equivalent.
+ dblEpsilon = 2.220446049250313e-16
+)
+
+// ApproxEqual reports whether the interval can be transformed into the
+// given interval by moving each endpoint a small distance.
+// The empty interval is considered to be positioned arbitrarily on the
+// real line, so any interval with a small enough length will match
+// the empty interval.
+func (i Interval) ApproxEqual(other Interval) bool {
+ if i.IsEmpty() {
+ return other.Length() <= 2*epsilon
+ }
+ if other.IsEmpty() {
+ return i.Length() <= 2*epsilon
+ }
+ return math.Abs(other.Lo-i.Lo) <= epsilon &&
+ math.Abs(other.Hi-i.Hi) <= epsilon
+}
+
+// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two
+// intervals x and y, this distance is defined as
+// h(x, y) = max_{p in x} min_{q in y} d(p, q).
+func (i Interval) DirectedHausdorffDistance(other Interval) float64 {
+ if i.IsEmpty() {
+ return 0
+ }
+ if other.IsEmpty() {
+ return math.Inf(1)
+ }
+ return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo))
+}
diff --git a/vendor/github.com/golang/geo/r2/doc.go b/vendor/github.com/golang/geo/r2/doc.go
new file mode 100644
index 000000000..05b155543
--- /dev/null
+++ b/vendor/github.com/golang/geo/r2/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package r2 implements types and functions for working with geometry in ℝ².
+
+See package s2 for a more detailed overview.
+*/
+package r2
diff --git a/vendor/github.com/golang/geo/r2/rect.go b/vendor/github.com/golang/geo/r2/rect.go
new file mode 100644
index 000000000..495545bba
--- /dev/null
+++ b/vendor/github.com/golang/geo/r2/rect.go
@@ -0,0 +1,255 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package r2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r1"
+)
+
+// Point represents a point in ℝ².
+type Point struct {
+ X, Y float64
+}
+
+// Add returns the sum of p and op.
+func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} }
+
+// Sub returns the difference of p and op.
+func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} }
+
+// Mul returns the scalar product of p and m.
+func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} }
+
+// Ortho returns a counterclockwise orthogonal point with the same norm.
+func (p Point) Ortho() Point { return Point{-p.Y, p.X} }
+
+// Dot returns the dot product between p and op.
+func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y }
+
+// Cross returns the cross product of p and op.
+func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X }
+
+// Norm returns the vector's norm.
+func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) }
+
+// Normalize returns a unit point in the same direction as p.
+func (p Point) Normalize() Point {
+ if p.X == 0 && p.Y == 0 {
+ return p
+ }
+ return p.Mul(1 / p.Norm())
+}
+
+func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) }
+
+// Rect represents a closed axis-aligned rectangle in the (x,y) plane.
+type Rect struct {
+ X, Y r1.Interval
+}
+
+// RectFromPoints constructs a rect that contains the given points.
+func RectFromPoints(pts ...Point) Rect {
+ // Because the default value on interval is 0,0, we need to manually
+ // define the interval from the first point passed in as our starting
+ // interval, otherwise we end up with the case of passing in
+ // Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3}
+ // instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct.
+ if len(pts) == 0 {
+ return Rect{}
+ }
+
+ r := Rect{
+ X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X},
+ Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y},
+ }
+
+ for _, p := range pts[1:] {
+ r = r.AddPoint(p)
+ }
+ return r
+}
+
+// RectFromCenterSize constructs a rectangle with the given center and size.
+// Both dimensions of size must be non-negative.
+func RectFromCenterSize(center, size Point) Rect {
+ return Rect{
+ r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2},
+ r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2},
+ }
+}
+
+// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test
+// for empty rectangles, since they have more than one representation. A Rect{}
+// is not the same as the EmptyRect.
+func EmptyRect() Rect {
+ return Rect{r1.EmptyInterval(), r1.EmptyInterval()}
+}
+
+// IsValid reports whether the rectangle is valid.
+// This requires the width to be empty iff the height is empty.
+func (r Rect) IsValid() bool {
+ return r.X.IsEmpty() == r.Y.IsEmpty()
+}
+
+// IsEmpty reports whether the rectangle is empty.
+func (r Rect) IsEmpty() bool {
+ return r.X.IsEmpty()
+}
+
+// Vertices returns all four vertices of the rectangle. Vertices are returned in
+// CCW direction starting with the lower left corner.
+func (r Rect) Vertices() [4]Point {
+ return [4]Point{
+ {r.X.Lo, r.Y.Lo},
+ {r.X.Hi, r.Y.Lo},
+ {r.X.Hi, r.Y.Hi},
+ {r.X.Lo, r.Y.Hi},
+ }
+}
+
+// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and
+// direction j along the Y-axis (0=down, 1=up).
+func (r Rect) VertexIJ(i, j int) Point {
+ x := r.X.Lo
+ if i == 1 {
+ x = r.X.Hi
+ }
+ y := r.Y.Lo
+ if j == 1 {
+ y = r.Y.Hi
+ }
+ return Point{x, y}
+}
+
+// Lo returns the low corner of the rect.
+func (r Rect) Lo() Point {
+ return Point{r.X.Lo, r.Y.Lo}
+}
+
+// Hi returns the high corner of the rect.
+func (r Rect) Hi() Point {
+ return Point{r.X.Hi, r.Y.Hi}
+}
+
+// Center returns the center of the rectangle in (x,y)-space
+func (r Rect) Center() Point {
+ return Point{r.X.Center(), r.Y.Center()}
+}
+
+// Size returns the width and height of this rectangle in (x,y)-space. Empty
+// rectangles have a negative width and height.
+func (r Rect) Size() Point {
+ return Point{r.X.Length(), r.Y.Length()}
+}
+
+// ContainsPoint reports whether the rectangle contains the given point.
+// Rectangles are closed regions, i.e. they contain their boundary.
+func (r Rect) ContainsPoint(p Point) bool {
+ return r.X.Contains(p.X) && r.Y.Contains(p.Y)
+}
+
+// InteriorContainsPoint returns true iff the given point is contained in the interior
+// of the region (i.e. the region excluding its boundary).
+func (r Rect) InteriorContainsPoint(p Point) bool {
+ return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y)
+}
+
+// Contains reports whether the rectangle contains the given rectangle.
+func (r Rect) Contains(other Rect) bool {
+ return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y)
+}
+
+// InteriorContains reports whether the interior of this rectangle contains all of the
+// points of the given other rectangle (including its boundary).
+func (r Rect) InteriorContains(other Rect) bool {
+ return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y)
+}
+
+// Intersects reports whether this rectangle and the other rectangle have any points in common.
+func (r Rect) Intersects(other Rect) bool {
+ return r.X.Intersects(other.X) && r.Y.Intersects(other.Y)
+}
+
+// InteriorIntersects reports whether the interior of this rectangle intersects
+// any point (including the boundary) of the given other rectangle.
+func (r Rect) InteriorIntersects(other Rect) bool {
+ return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y)
+}
+
+// AddPoint expands the rectangle to include the given point. The rectangle is
+// expanded by the minimum amount possible.
+func (r Rect) AddPoint(p Point) Rect {
+ return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)}
+}
+
+// AddRect expands the rectangle to include the given rectangle. This is the
+// same as replacing the rectangle by the union of the two rectangles, but
+// is more efficient.
+func (r Rect) AddRect(other Rect) Rect {
+ return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
+}
+
+// ClampPoint returns the closest point in the rectangle to the given point.
+// The rectangle must be non-empty.
+func (r Rect) ClampPoint(p Point) Point {
+ return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)}
+}
+
+// Expanded returns a rectangle that has been expanded in the x-direction
+// by margin.X, and in y-direction by margin.Y. If either margin is empty,
+// then shrink the interval on the corresponding sides instead. The resulting
+// rectangle may be empty. Any expansion of an empty rectangle remains empty.
+func (r Rect) Expanded(margin Point) Rect {
+ xx := r.X.Expanded(margin.X)
+ yy := r.Y.Expanded(margin.Y)
+ if xx.IsEmpty() || yy.IsEmpty() {
+ return EmptyRect()
+ }
+ return Rect{xx, yy}
+}
+
+// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides.
+func (r Rect) ExpandedByMargin(margin float64) Rect {
+ return r.Expanded(Point{margin, margin})
+}
+
+// Union returns the smallest rectangle containing the union of this rectangle and
+// the given rectangle.
+func (r Rect) Union(other Rect) Rect {
+ return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
+}
+
+// Intersection returns the smallest rectangle containing the intersection of this
+// rectangle and the given rectangle.
+func (r Rect) Intersection(other Rect) Rect {
+ xx := r.X.Intersection(other.X)
+ yy := r.Y.Intersection(other.Y)
+ if xx.IsEmpty() || yy.IsEmpty() {
+ return EmptyRect()
+ }
+
+ return Rect{xx, yy}
+}
+
+// ApproxEqual returns true if the x- and y-intervals of the two rectangles are
+// the same up to the given tolerance.
+func (r Rect) ApproxEqual(r2 Rect) bool {
+ return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
+}
+
+func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) }
diff --git a/vendor/github.com/golang/geo/r3/doc.go b/vendor/github.com/golang/geo/r3/doc.go
new file mode 100644
index 000000000..1eb4710c8
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package r3 implements types and functions for working with geometry in ℝ³.
+
+See ../s2 for a more detailed overview.
+*/
+package r3
diff --git a/vendor/github.com/golang/geo/r3/precisevector.go b/vendor/github.com/golang/geo/r3/precisevector.go
new file mode 100644
index 000000000..b13393dbc
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/precisevector.go
@@ -0,0 +1,198 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package r3
+
+import (
+ "fmt"
+ "math/big"
+)
+
+const (
+ // prec is the number of bits of precision to use for the Float values.
+ // To keep things simple, we use the maximum allowable precision on big
+ // values. This allows us to handle all values we expect in the s2 library.
+ prec = big.MaxPrec
+)
+
+// define some commonly referenced values.
+var (
+ precise0 = precInt(0)
+ precise1 = precInt(1)
+)
+
+// precStr wraps the conversion from a string into a big.Float. For results that
+// actually can be represented exactly, this should only be used on values that
+// are integer multiples of integer powers of 2.
+func precStr(s string) *big.Float {
+ // Explicitly ignoring the bool return for this usage.
+ f, _ := new(big.Float).SetPrec(prec).SetString(s)
+ return f
+}
+
+func precInt(i int64) *big.Float {
+ return new(big.Float).SetPrec(prec).SetInt64(i)
+}
+
+func precFloat(f float64) *big.Float {
+ return new(big.Float).SetPrec(prec).SetFloat64(f)
+}
+
+func precAdd(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Add(a, b)
+}
+
+func precSub(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Sub(a, b)
+}
+
+func precMul(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Mul(a, b)
+}
+
+// PreciseVector represents a point in ℝ³ using high-precision values.
+// Note that this is NOT a complete implementation because there are some
+// operations that Vector supports that are not feasible with arbitrary precision
+// math. (e.g., methods that need division like Normalize, or methods needing a
+// square root operation such as Norm)
+type PreciseVector struct {
+ X, Y, Z *big.Float
+}
+
+// PreciseVectorFromVector creates a high precision vector from the given Vector.
+func PreciseVectorFromVector(v Vector) PreciseVector {
+ return NewPreciseVector(v.X, v.Y, v.Z)
+}
+
+// NewPreciseVector creates a high precision vector from the given floating point values.
+func NewPreciseVector(x, y, z float64) PreciseVector {
+ return PreciseVector{
+ X: precFloat(x),
+ Y: precFloat(y),
+ Z: precFloat(z),
+ }
+}
+
+// Vector returns this precise vector converted to a Vector.
+func (v PreciseVector) Vector() Vector {
+ // The accuracy flag is ignored on these conversions back to float64.
+ x, _ := v.X.Float64()
+ y, _ := v.Y.Float64()
+ z, _ := v.Z.Float64()
+ return Vector{x, y, z}.Normalize()
+}
+
+// Equal reports whether v and ov are equal.
+func (v PreciseVector) Equal(ov PreciseVector) bool {
+ return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
+}
+
+func (v PreciseVector) String() string {
+ return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z)
+}
+
+// Norm2 returns the square of the norm.
+func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) }
+
+// IsUnit reports whether this vector is of unit length.
+func (v PreciseVector) IsUnit() bool {
+ return v.Norm2().Cmp(precise1) == 0
+}
+
+// Abs returns the vector with nonnegative components.
+func (v PreciseVector) Abs() PreciseVector {
+ return PreciseVector{
+ X: new(big.Float).Abs(v.X),
+ Y: new(big.Float).Abs(v.Y),
+ Z: new(big.Float).Abs(v.Z),
+ }
+}
+
+// Add returns the standard vector sum of v and ov.
+func (v PreciseVector) Add(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precAdd(v.X, ov.X),
+ Y: precAdd(v.Y, ov.Y),
+ Z: precAdd(v.Z, ov.Z),
+ }
+}
+
+// Sub returns the standard vector difference of v and ov.
+func (v PreciseVector) Sub(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precSub(v.X, ov.X),
+ Y: precSub(v.Y, ov.Y),
+ Z: precSub(v.Z, ov.Z),
+ }
+}
+
+// Mul returns the standard scalar product of v and f.
+func (v PreciseVector) Mul(f *big.Float) PreciseVector {
+ return PreciseVector{
+ X: precMul(v.X, f),
+ Y: precMul(v.Y, f),
+ Z: precMul(v.Z, f),
+ }
+}
+
+// MulByFloat64 returns the standard scalar product of v and f.
+func (v PreciseVector) MulByFloat64(f float64) PreciseVector {
+ return v.Mul(precFloat(f))
+}
+
+// Dot returns the standard dot product of v and ov.
+func (v PreciseVector) Dot(ov PreciseVector) *big.Float {
+ return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z)))
+}
+
+// Cross returns the standard cross product of v and ov.
+func (v PreciseVector) Cross(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)),
+ Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)),
+ Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)),
+ }
+}
+
+// LargestComponent returns the axis that represents the largest component in this vector.
+func (v PreciseVector) LargestComponent() Axis {
+ t := v.Abs()
+
+ if t.X.Cmp(t.Y) > 0 {
+ if t.X.Cmp(t.Z) > 0 {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y.Cmp(t.Z) > 0 {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// SmallestComponent returns the axis that represents the smallest component in this vector.
+func (v PreciseVector) SmallestComponent() Axis {
+ t := v.Abs()
+
+ if t.X.Cmp(t.Y) < 0 {
+ if t.X.Cmp(t.Z) < 0 {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y.Cmp(t.Z) < 0 {
+ return YAxis
+ }
+ return ZAxis
+}
diff --git a/vendor/github.com/golang/geo/r3/vector.go b/vendor/github.com/golang/geo/r3/vector.go
new file mode 100644
index 000000000..ccda622f4
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/vector.go
@@ -0,0 +1,183 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package r3
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// Vector represents a point in ℝ³.
+type Vector struct {
+ X, Y, Z float64
+}
+
+// ApproxEqual reports whether v and ov are equal within a small epsilon.
+func (v Vector) ApproxEqual(ov Vector) bool {
+ const epsilon = 1e-16
+ return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon
+}
+
+func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) }
+
+// Norm returns the vector's norm.
+func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) }
+
+// Norm2 returns the square of the norm.
+func (v Vector) Norm2() float64 { return v.Dot(v) }
+
+// Normalize returns a unit vector in the same direction as v.
+func (v Vector) Normalize() Vector {
+ n2 := v.Norm2()
+ if n2 == 0 {
+ return Vector{0, 0, 0}
+ }
+ return v.Mul(1 / math.Sqrt(n2))
+}
+
+// IsUnit returns whether this vector is of approximately unit length.
+func (v Vector) IsUnit() bool {
+ const epsilon = 5e-14
+ return math.Abs(v.Norm2()-1) <= epsilon
+}
+
+// Abs returns the vector with nonnegative components.
+func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} }
+
+// Add returns the standard vector sum of v and ov.
+func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} }
+
+// Sub returns the standard vector difference of v and ov.
+func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} }
+
+// Mul returns the standard scalar product of v and m.
+func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} }
+
+// Dot returns the standard dot product of v and ov.
+func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
+
+// Cross returns the standard cross product of v and ov.
+func (v Vector) Cross(ov Vector) Vector {
+ return Vector{
+ v.Y*ov.Z - v.Z*ov.Y,
+ v.Z*ov.X - v.X*ov.Z,
+ v.X*ov.Y - v.Y*ov.X,
+ }
+}
+
+// Distance returns the Euclidean distance between v and ov.
+func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() }
+
+// Angle returns the angle between v and ov.
+func (v Vector) Angle(ov Vector) s1.Angle {
+ return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian
+}
+
+// Axis enumerates the 3 axes of ℝ³.
+type Axis int
+
+// The three axes of ℝ³.
+const (
+ XAxis Axis = iota
+ YAxis
+ ZAxis
+)
+
+// Ortho returns a unit vector that is orthogonal to v.
+// Ortho(-v) = -Ortho(v) for all v.
+func (v Vector) Ortho() Vector {
+ ov := Vector{0.012, 0.0053, 0.00457}
+ switch v.LargestComponent() {
+ case XAxis:
+ ov.Z = 1
+ case YAxis:
+ ov.X = 1
+ default:
+ ov.Y = 1
+ }
+ return v.Cross(ov).Normalize()
+}
+
+// LargestComponent returns the axis that represents the largest component in this vector.
+func (v Vector) LargestComponent() Axis {
+ t := v.Abs()
+
+ if t.X > t.Y {
+ if t.X > t.Z {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y > t.Z {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// SmallestComponent returns the axis that represents the smallest component in this vector.
+func (v Vector) SmallestComponent() Axis {
+ t := v.Abs()
+
+ if t.X < t.Y {
+ if t.X < t.Z {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y < t.Z {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// Cmp compares v and ov lexicographically and returns:
+//
+// -1 if v < ov
+// 0 if v == ov
+// +1 if v > ov
+//
+// This method is based on C++'s std::lexicographical_compare. Two entities
+// are compared element by element with the given operator. The first mismatch
+// defines which is less (or greater) than the other. If both have equivalent
+// values they are lexicographically equal.
+func (v Vector) Cmp(ov Vector) int {
+ if v.X < ov.X {
+ return -1
+ }
+ if v.X > ov.X {
+ return 1
+ }
+
+ // First elements were the same, try the next.
+ if v.Y < ov.Y {
+ return -1
+ }
+ if v.Y > ov.Y {
+ return 1
+ }
+
+ // Second elements were the same return the final compare.
+ if v.Z < ov.Z {
+ return -1
+ }
+ if v.Z > ov.Z {
+ return 1
+ }
+
+ // Both are equal
+ return 0
+}
diff --git a/vendor/github.com/golang/geo/s1/angle.go b/vendor/github.com/golang/geo/s1/angle.go
new file mode 100644
index 000000000..747b23dea
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/angle.go
@@ -0,0 +1,120 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s1
+
+import (
+ "math"
+ "strconv"
+)
+
+// Angle represents a 1D angle. The internal representation is a double precision
+// value in radians, so conversion to and from radians is exact.
+// Conversions between E5, E6, E7, and Degrees are not always
+// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000).
+//
+// The following conversions between degrees and radians are exact:
+//
+// Degree*180 == Radian*math.Pi
+// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8
+//
+// These identities hold when the arguments are scaled up or down by any power
+// of 2. Some similar identities are also true, for example,
+//
+// Degree*60 == Radian*(math.Pi/3)
+//
+// But be aware that this type of identity does not hold in general. For example,
+//
+// Degree*3 != Radian*(math.Pi/60)
+//
+// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees()
+// does not always equal x. For example,
+//
+// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8
+//
+// but
+//
+// (60*Degree).Degrees() != 60
+//
+// When testing for equality, you should allow for numerical errors (ApproxEqual)
+// or convert to discrete E5/E6/E7 values first.
+type Angle float64
+
+// Angle units.
+const (
+ Radian Angle = 1
+ Degree = (math.Pi / 180) * Radian
+
+ E5 = 1e-5 * Degree
+ E6 = 1e-6 * Degree
+ E7 = 1e-7 * Degree
+)
+
+// Radians returns the angle in radians.
+func (a Angle) Radians() float64 { return float64(a) }
+
+// Degrees returns the angle in degrees.
+func (a Angle) Degrees() float64 { return float64(a / Degree) }
+
+// round returns the value rounded to nearest as an int32.
+// This does not match C++ exactly for the case of x.5.
+func round(val float64) int32 {
+ if val < 0 {
+ return int32(val - 0.5)
+ }
+ return int32(val + 0.5)
+}
+
+// InfAngle returns an angle larger than any finite angle.
+func InfAngle() Angle {
+ return Angle(math.Inf(1))
+}
+
+// isInf reports whether this Angle is infinite.
+func (a Angle) isInf() bool {
+ return math.IsInf(float64(a), 0)
+}
+
+// E5 returns the angle in hundred thousandths of degrees.
+func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) }
+
+// E6 returns the angle in millionths of degrees.
+func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) }
+
+// E7 returns the angle in ten millionths of degrees.
+func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
+
+// Abs returns the absolute value of the angle.
+func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
+
+// Normalized returns an equivalent angle in (-π, π].
+func (a Angle) Normalized() Angle {
+ rad := math.Remainder(float64(a), 2*math.Pi)
+ if rad <= -math.Pi {
+ rad = math.Pi
+ }
+ return Angle(rad)
+}
+
+func (a Angle) String() string {
+ return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f"
+}
+
+// ApproxEqual reports whether the two angles are the same up to a small tolerance.
+func (a Angle) ApproxEqual(other Angle) bool {
+ return math.Abs(float64(a)-float64(other)) <= epsilon
+}
+
+// BUG(dsymonds): The major differences from the C++ version are:
+// - no unsigned E5/E6/E7 methods
diff --git a/vendor/github.com/golang/geo/s1/chordangle.go b/vendor/github.com/golang/geo/s1/chordangle.go
new file mode 100644
index 000000000..406c69ef1
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/chordangle.go
@@ -0,0 +1,250 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s1
+
+import (
+ "math"
+)
+
+// ChordAngle represents the angle subtended by a chord (i.e., the straight
+// line segment connecting two points on the sphere). Its representation
+// makes it very efficient for computing and comparing distances, but unlike
+// Angle it is only capable of representing angles between 0 and π radians.
+// Generally, ChordAngle should only be used in loops where many angles need
+// to be calculated and compared. Otherwise it is simpler to use Angle.
+//
+// ChordAngle loses some accuracy as the angle approaches π radians.
+// Specifically, the representation of (π - x) radians has an error of about
+// (1e-15 / x), with a maximum error of about 2e-8 radians (about 13cm on the
+// Earth's surface). For comparison, for angles up to π/2 radians (10000km)
+// the worst-case representation error is about 2e-16 radians (1 nanonmeter),
+// which is about the same as Angle.
+//
+// ChordAngles are represented by the squared chord length, which can
+// range from 0 to 4. Positive infinity represents an infinite squared length.
+type ChordAngle float64
+
+const (
+ // NegativeChordAngle represents a chord angle smaller than the zero angle.
+ // The only valid operations on a NegativeChordAngle are comparisons,
+ // Angle conversions, and Successor/Predecessor.
+ NegativeChordAngle = ChordAngle(-1)
+
+ // RightChordAngle represents a chord angle of 90 degrees (a "right angle").
+ RightChordAngle = ChordAngle(2)
+
+ // StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
+ // This is the maximum finite chord angle.
+ StraightChordAngle = ChordAngle(4)
+
+ // maxLength2 is the square of the maximum length allowed in a ChordAngle.
+ maxLength2 = 4.0
+)
+
+// ChordAngleFromAngle returns a ChordAngle from the given Angle.
+func ChordAngleFromAngle(a Angle) ChordAngle {
+ if a < 0 {
+ return NegativeChordAngle
+ }
+ if a.isInf() {
+ return InfChordAngle()
+ }
+ l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians()))
+ return ChordAngle(l * l)
+}
+
+// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
+// Note that the argument is automatically clamped to a maximum of 4 to
+// handle possible roundoff errors. The argument must be non-negative.
+func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
+ if length2 > maxLength2 {
+ return StraightChordAngle
+ }
+ return ChordAngle(length2)
+}
+
+// Expanded returns a new ChordAngle that has been adjusted by the given error
+// bound (which can be positive or negative). Error should be the value
+// returned by either MaxPointError or MaxAngleError. For example:
+// a := ChordAngleFromPoints(x, y)
+// a1 := a.Expanded(a.MaxPointError())
+func (c ChordAngle) Expanded(e float64) ChordAngle {
+ // If the angle is special, don't change it. Otherwise clamp it to the valid range.
+ if c.isSpecial() {
+ return c
+ }
+ return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e)))
+}
+
+// Angle converts this ChordAngle to an Angle.
+func (c ChordAngle) Angle() Angle {
+ if c < 0 {
+ return -1 * Radian
+ }
+ if c.isInf() {
+ return InfAngle()
+ }
+ return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c))))
+}
+
+// InfChordAngle returns a chord angle larger than any finite chord angle.
+// The only valid operations on an InfChordAngle are comparisons, Angle
+// conversions, and Successor/Predecessor.
+func InfChordAngle() ChordAngle {
+ return ChordAngle(math.Inf(1))
+}
+
+// isInf reports whether this ChordAngle is infinite.
+func (c ChordAngle) isInf() bool {
+ return math.IsInf(float64(c), 1)
+}
+
+// isSpecial reports whether this ChordAngle is one of the special cases.
+func (c ChordAngle) isSpecial() bool {
+ return c < 0 || c.isInf()
+}
+
+// isValid reports whether this ChordAngle is valid or not.
+func (c ChordAngle) isValid() bool {
+ return (c >= 0 && c <= maxLength2) || c.isSpecial()
+}
+
+// Successor returns the smallest representable ChordAngle larger than this one.
+// This can be used to convert a "<" comparison to a "<=" comparison.
+//
+// Note the following special cases:
+// NegativeChordAngle.Successor == 0
+// StraightChordAngle.Successor == InfChordAngle
+// InfChordAngle.Successor == InfChordAngle
+func (c ChordAngle) Successor() ChordAngle {
+ if c >= maxLength2 {
+ return InfChordAngle()
+ }
+ if c < 0 {
+ return 0
+ }
+ return ChordAngle(math.Nextafter(float64(c), 10.0))
+}
+
+// Predecessor returns the largest representable ChordAngle less than this one.
+//
+// Note the following special cases:
+// InfChordAngle.Predecessor == StraightChordAngle
+// ChordAngle(0).Predecessor == NegativeChordAngle
+// NegativeChordAngle.Predecessor == NegativeChordAngle
+func (c ChordAngle) Predecessor() ChordAngle {
+ if c <= 0 {
+ return NegativeChordAngle
+ }
+ if c > maxLength2 {
+ return StraightChordAngle
+ }
+
+ return ChordAngle(math.Nextafter(float64(c), -10.0))
+}
+
+// MaxPointError returns the maximum error size for a ChordAngle constructed
+// from 2 Points x and y, assuming that x and y are normalized to within the
+// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to
+// the true distance after the points are projected to lie exactly on the sphere.
+func (c ChordAngle) MaxPointError() float64 {
+ // There is a relative error of (2.5*dblEpsilon) when computing the squared
+ // distance, plus a relative error of 2 * dblEpsilon, plus an absolute error
+ // of (16 * dblEpsilon**2) because the lengths of the input points may differ
+ // from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize).
+ return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
+}
+
+// MaxAngleError returns the maximum error for a ChordAngle constructed
+// as an Angle distance.
+func (c ChordAngle) MaxAngleError() float64 {
+ return dblEpsilon * float64(c)
+}
+
+// Add adds the other ChordAngle to this one and returns the resulting value.
+// This method assumes the ChordAngles are not special.
+func (c ChordAngle) Add(other ChordAngle) ChordAngle {
+ // Note that this method (and Sub) is much more efficient than converting
+ // the ChordAngle to an Angle and adding those and converting back. It
+ // requires only one square root plus a few additions and multiplications.
+
+ // Optimization for the common case where b is an error tolerance
+ // parameter that happens to be set to zero.
+ if other == 0 {
+ return c
+ }
+
+ // Clamp the angle sum to at most 180 degrees.
+ if c+other >= maxLength2 {
+ return StraightChordAngle
+ }
+
+ // Let a and b be the (non-squared) chord lengths, and let c = a+b.
+ // Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc).
+ // Then the formula below can be derived from c = 2 * sin(A+B) and the
+ // relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A)
+ // cos(X) = sqrt(1 - sin^2(X))
+ x := float64(c * (1 - 0.25*other))
+ y := float64(other * (1 - 0.25*c))
+ return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y)))
+}
+
+// Sub subtracts the other ChordAngle from this one and returns the resulting
+// value. This method assumes the ChordAngles are not special.
+func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
+ if other == 0 {
+ return c
+ }
+ if c <= other {
+ return 0
+ }
+ x := float64(c * (1 - 0.25*other))
+ y := float64(other * (1 - 0.25*c))
+ return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y)))
+}
+
+// Sin returns the sine of this chord angle. This method is more efficient
+// than converting to Angle and performing the computation.
+func (c ChordAngle) Sin() float64 {
+ return math.Sqrt(c.Sin2())
+}
+
+// Sin2 returns the square of the sine of this chord angle.
+// It is more efficient than Sin.
+func (c ChordAngle) Sin2() float64 {
+ // Let a be the (non-squared) chord length, and let A be the corresponding
+ // half-angle (a = 2*sin(A)). The formula below can be derived from:
+ // sin(2*A) = 2 * sin(A) * cos(A)
+ // cos^2(A) = 1 - sin^2(A)
+ // This is much faster than converting to an angle and computing its sine.
+ return float64(c * (1 - 0.25*c))
+}
+
+// Cos returns the cosine of this chord angle. This method is more efficient
+// than converting to Angle and performing the computation.
+func (c ChordAngle) Cos() float64 {
+ // cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A)
+ return float64(1 - 0.5*c)
+}
+
+// Tan returns the tangent of this chord angle.
+func (c ChordAngle) Tan() float64 {
+ return c.Sin() / c.Cos()
+}
+
+// TODO(roberts): Differences from C++:
+// Helpers to/from E5/E6/E7
+// Helpers to/from degrees and radians directly.
+// FastUpperBoundFrom(angle Angle)
diff --git a/vendor/github.com/golang/geo/s1/doc.go b/vendor/github.com/golang/geo/s1/doc.go
new file mode 100644
index 000000000..52a2c526d
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package s1 implements types and functions for working with geometry in S¹ (circular geometry).
+
+See ../s2 for a more detailed overview.
+*/
+package s1
diff --git a/vendor/github.com/golang/geo/s1/interval.go b/vendor/github.com/golang/geo/s1/interval.go
new file mode 100644
index 000000000..6fea5221f
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/interval.go
@@ -0,0 +1,462 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s1
+
+import (
+ "math"
+ "strconv"
+)
+
+// An Interval represents a closed interval on a unit circle (also known
+// as a 1-dimensional sphere). It is capable of representing the empty
+// interval (containing no points), the full interval (containing all
+// points), and zero-length intervals (containing a single point).
+//
+// Points are represented by the angle they make with the positive x-axis in
+// the range [-π, π]. An interval is represented by its lower and upper
+// bounds (both inclusive, since the interval is closed). The lower bound may
+// be greater than the upper bound, in which case the interval is "inverted"
+// (i.e. it passes through the point (-1, 0)).
+//
+// The point (-1, 0) has two valid representations, π and -π. The
+// normalized representation of this point is π, so that endpoints
+// of normal intervals are in the range (-π, π]. We normalize the latter to
+// the former in IntervalFromEndpoints. However, we take advantage of the point
+// -π to construct two special intervals:
+// The full interval is [-π, π]
+// The empty interval is [π, -π].
+//
+// Treat the exported fields as read-only.
+type Interval struct {
+ Lo, Hi float64
+}
+
+// IntervalFromEndpoints constructs a new interval from endpoints.
+// Both arguments must be in the range [-π,π]. This function allows inverted intervals
+// to be created.
+func IntervalFromEndpoints(lo, hi float64) Interval {
+ i := Interval{lo, hi}
+ if lo == -math.Pi && hi != math.Pi {
+ i.Lo = math.Pi
+ }
+ if hi == -math.Pi && lo != math.Pi {
+ i.Hi = math.Pi
+ }
+ return i
+}
+
+// IntervalFromPointPair returns the minimal interval containing the two given points.
+// Both arguments must be in [-π,π].
+func IntervalFromPointPair(a, b float64) Interval {
+ if a == -math.Pi {
+ a = math.Pi
+ }
+ if b == -math.Pi {
+ b = math.Pi
+ }
+ if positiveDistance(a, b) <= math.Pi {
+ return Interval{a, b}
+ }
+ return Interval{b, a}
+}
+
+// EmptyInterval returns an empty interval.
+func EmptyInterval() Interval { return Interval{math.Pi, -math.Pi} }
+
+// FullInterval returns a full interval.
+func FullInterval() Interval { return Interval{-math.Pi, math.Pi} }
+
+// IsValid reports whether the interval is valid.
+func (i Interval) IsValid() bool {
+ return (math.Abs(i.Lo) <= math.Pi && math.Abs(i.Hi) <= math.Pi &&
+ !(i.Lo == -math.Pi && i.Hi != math.Pi) &&
+ !(i.Hi == -math.Pi && i.Lo != math.Pi))
+}
+
+// IsFull reports whether the interval is full.
+func (i Interval) IsFull() bool { return i.Lo == -math.Pi && i.Hi == math.Pi }
+
+// IsEmpty reports whether the interval is empty.
+func (i Interval) IsEmpty() bool { return i.Lo == math.Pi && i.Hi == -math.Pi }
+
+// IsInverted reports whether the interval is inverted; that is, whether Lo > Hi.
+func (i Interval) IsInverted() bool { return i.Lo > i.Hi }
+
+// Invert returns the interval with endpoints swapped.
+func (i Interval) Invert() Interval {
+ return Interval{i.Hi, i.Lo}
+}
+
+// Center returns the midpoint of the interval.
+// It is undefined for full and empty intervals.
+func (i Interval) Center() float64 {
+ c := 0.5 * (i.Lo + i.Hi)
+ if !i.IsInverted() {
+ return c
+ }
+ if c <= 0 {
+ return c + math.Pi
+ }
+ return c - math.Pi
+}
+
+// Length returns the length of the interval.
+// The length of an empty interval is negative.
+func (i Interval) Length() float64 {
+ l := i.Hi - i.Lo
+ if l >= 0 {
+ return l
+ }
+ l += 2 * math.Pi
+ if l > 0 {
+ return l
+ }
+ return -1
+}
+
+// Assumes p ∈ (-π,π].
+func (i Interval) fastContains(p float64) bool {
+ if i.IsInverted() {
+ return (p >= i.Lo || p <= i.Hi) && !i.IsEmpty()
+ }
+ return p >= i.Lo && p <= i.Hi
+}
+
+// Contains returns true iff the interval contains p.
+// Assumes p ∈ [-π,π].
+func (i Interval) Contains(p float64) bool {
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ return i.fastContains(p)
+}
+
+// ContainsInterval returns true iff the interval contains oi.
+func (i Interval) ContainsInterval(oi Interval) bool {
+ if i.IsInverted() {
+ if oi.IsInverted() {
+ return oi.Lo >= i.Lo && oi.Hi <= i.Hi
+ }
+ return (oi.Lo >= i.Lo || oi.Hi <= i.Hi) && !i.IsEmpty()
+ }
+ if oi.IsInverted() {
+ return i.IsFull() || oi.IsEmpty()
+ }
+ return oi.Lo >= i.Lo && oi.Hi <= i.Hi
+}
+
+// InteriorContains returns true iff the interior of the interval contains p.
+// Assumes p ∈ [-π,π].
+func (i Interval) InteriorContains(p float64) bool {
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ if i.IsInverted() {
+ return p > i.Lo || p < i.Hi
+ }
+ return (p > i.Lo && p < i.Hi) || i.IsFull()
+}
+
+// InteriorContainsInterval returns true iff the interior of the interval contains oi.
+func (i Interval) InteriorContainsInterval(oi Interval) bool {
+ if i.IsInverted() {
+ if oi.IsInverted() {
+ return (oi.Lo > i.Lo && oi.Hi < i.Hi) || oi.IsEmpty()
+ }
+ return oi.Lo > i.Lo || oi.Hi < i.Hi
+ }
+ if oi.IsInverted() {
+ return i.IsFull() || oi.IsEmpty()
+ }
+ return (oi.Lo > i.Lo && oi.Hi < i.Hi) || i.IsFull()
+}
+
+// Intersects returns true iff the interval contains any points in common with oi.
+func (i Interval) Intersects(oi Interval) bool {
+ if i.IsEmpty() || oi.IsEmpty() {
+ return false
+ }
+ if i.IsInverted() {
+ return oi.IsInverted() || oi.Lo <= i.Hi || oi.Hi >= i.Lo
+ }
+ if oi.IsInverted() {
+ return oi.Lo <= i.Hi || oi.Hi >= i.Lo
+ }
+ return oi.Lo <= i.Hi && oi.Hi >= i.Lo
+}
+
+// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
+func (i Interval) InteriorIntersects(oi Interval) bool {
+ if i.IsEmpty() || oi.IsEmpty() || i.Lo == i.Hi {
+ return false
+ }
+ if i.IsInverted() {
+ return oi.IsInverted() || oi.Lo < i.Hi || oi.Hi > i.Lo
+ }
+ if oi.IsInverted() {
+ return oi.Lo < i.Hi || oi.Hi > i.Lo
+ }
+ return (oi.Lo < i.Hi && oi.Hi > i.Lo) || i.IsFull()
+}
+
+// Compute distance from a to b in [0,2π], in a numerically stable way.
+func positiveDistance(a, b float64) float64 {
+ d := b - a
+ if d >= 0 {
+ return d
+ }
+ return (b + math.Pi) - (a - math.Pi)
+}
+
+// Union returns the smallest interval that contains both the interval and oi.
+func (i Interval) Union(oi Interval) Interval {
+ if oi.IsEmpty() {
+ return i
+ }
+ if i.fastContains(oi.Lo) {
+ if i.fastContains(oi.Hi) {
+ // Either oi ⊂ i, or i ∪ oi is the full interval.
+ if i.ContainsInterval(oi) {
+ return i
+ }
+ return FullInterval()
+ }
+ return Interval{i.Lo, oi.Hi}
+ }
+ if i.fastContains(oi.Hi) {
+ return Interval{oi.Lo, i.Hi}
+ }
+
+ // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
+ if i.IsEmpty() || oi.fastContains(i.Lo) {
+ return oi
+ }
+
+ // This is the only hard case where we need to find the closest pair of endpoints.
+ if positiveDistance(oi.Hi, i.Lo) < positiveDistance(i.Hi, oi.Lo) {
+ return Interval{oi.Lo, i.Hi}
+ }
+ return Interval{i.Lo, oi.Hi}
+}
+
+// Intersection returns the smallest interval that contains the intersection of the interval and oi.
+func (i Interval) Intersection(oi Interval) Interval {
+ if oi.IsEmpty() {
+ return EmptyInterval()
+ }
+ if i.fastContains(oi.Lo) {
+ if i.fastContains(oi.Hi) {
+ // Either oi ⊂ i, or i and oi intersect twice. Neither are empty.
+ // In the first case we want to return i (which is shorter than oi).
+ // In the second case one of them is inverted, and the smallest interval
+ // that covers the two disjoint pieces is the shorter of i and oi.
+ // We thus want to pick the shorter of i and oi in both cases.
+ if oi.Length() < i.Length() {
+ return oi
+ }
+ return i
+ }
+ return Interval{oi.Lo, i.Hi}
+ }
+ if i.fastContains(oi.Hi) {
+ return Interval{i.Lo, oi.Hi}
+ }
+
+ // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
+ if oi.fastContains(i.Lo) {
+ return i
+ }
+ return EmptyInterval()
+}
+
+// AddPoint returns the interval expanded by the minimum amount necessary such
+// that it contains the given point "p" (an angle in the range [-π, π]).
+func (i Interval) AddPoint(p float64) Interval {
+ if math.Abs(p) > math.Pi {
+ return i
+ }
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ if i.fastContains(p) {
+ return i
+ }
+ if i.IsEmpty() {
+ return Interval{p, p}
+ }
+ if positiveDistance(p, i.Lo) < positiveDistance(i.Hi, p) {
+ return Interval{p, i.Hi}
+ }
+ return Interval{i.Lo, p}
+}
+
+// Define the maximum rounding error for arithmetic operations. Depending on the
+// platform the mantissa precision may be different than others, so we choose to
+// use specific values to be consistent across all.
+// The values come from the C++ implementation.
+var (
+ // epsilon is a small number that represents a reasonable level of noise between two
+ // values that can be considered to be equal.
+ epsilon = 1e-15
+ // dblEpsilon is a smaller number for values that require more precision.
+ dblEpsilon = 2.220446049e-16
+)
+
+// Expanded returns an interval that has been expanded on each side by margin.
+// If margin is negative, then the function shrinks the interval on
+// each side by margin instead. The resulting interval may be empty or
+// full. Any expansion (positive or negative) of a full interval remains
+// full, and any expansion of an empty interval remains empty.
+func (i Interval) Expanded(margin float64) Interval {
+ if margin >= 0 {
+ if i.IsEmpty() {
+ return i
+ }
+ // Check whether this interval will be full after expansion, allowing
+ // for a rounding error when computing each endpoint.
+ if i.Length()+2*margin+2*dblEpsilon >= 2*math.Pi {
+ return FullInterval()
+ }
+ } else {
+ if i.IsFull() {
+ return i
+ }
+ // Check whether this interval will be empty after expansion, allowing
+ // for a rounding error when computing each endpoint.
+ if i.Length()+2*margin-2*dblEpsilon <= 0 {
+ return EmptyInterval()
+ }
+ }
+ result := IntervalFromEndpoints(
+ math.Remainder(i.Lo-margin, 2*math.Pi),
+ math.Remainder(i.Hi+margin, 2*math.Pi),
+ )
+ if result.Lo <= -math.Pi {
+ result.Lo = math.Pi
+ }
+ return result
+}
+
+// ApproxEqual reports whether this interval can be transformed into the given
+// interval by moving each endpoint by at most ε, without the
+// endpoints crossing (which would invert the interval). Empty and full
+// intervals are considered to start at an arbitrary point on the unit circle,
+// so any interval with (length <= 2*ε) matches the empty interval, and
+// any interval with (length >= 2*π - 2*ε) matches the full interval.
+func (i Interval) ApproxEqual(other Interval) bool {
+ // Full and empty intervals require special cases because the endpoints
+ // are considered to be positioned arbitrarily.
+ if i.IsEmpty() {
+ return other.Length() <= 2*epsilon
+ }
+ if other.IsEmpty() {
+ return i.Length() <= 2*epsilon
+ }
+ if i.IsFull() {
+ return other.Length() >= 2*(math.Pi-epsilon)
+ }
+ if other.IsFull() {
+ return i.Length() >= 2*(math.Pi-epsilon)
+ }
+
+ // The purpose of the last test below is to verify that moving the endpoints
+ // does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20].
+ return (math.Abs(math.Remainder(other.Lo-i.Lo, 2*math.Pi)) <= epsilon &&
+ math.Abs(math.Remainder(other.Hi-i.Hi, 2*math.Pi)) <= epsilon &&
+ math.Abs(i.Length()-other.Length()) <= 2*epsilon)
+
+}
+
+func (i Interval) String() string {
+ // like "[%.7f, %.7f]"
+ return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]"
+}
+
+// Complement returns the complement of the interior of the interval. An interval and
+// its complement have the same boundary but do not share any interior
+// values. The complement operator is not a bijection, since the complement
+// of a singleton interval (containing a single value) is the same as the
+// complement of an empty interval.
+func (i Interval) Complement() Interval {
+ if i.Lo == i.Hi {
+ // Singleton. The interval just contains a single point.
+ return FullInterval()
+ }
+ // Handles empty and full.
+ return Interval{i.Hi, i.Lo}
+}
+
+// ComplementCenter returns the midpoint of the complement of the interval. For full and empty
+// intervals, the result is arbitrary. For a singleton interval (containing a
+// single point), the result is its antipodal point on S1.
+func (i Interval) ComplementCenter() float64 {
+ if i.Lo != i.Hi {
+ return i.Complement().Center()
+ }
+ // Singleton. The interval just contains a single point.
+ if i.Hi <= 0 {
+ return i.Hi + math.Pi
+ }
+ return i.Hi - math.Pi
+}
+
+// DirectedHausdorffDistance returns the Hausdorff distance to the given interval.
+// For two intervals i and y, this distance is defined by
+// h(i, y) = max_{p in i} min_{q in y} d(p, q),
+// where d(.,.) is measured along S1.
+func (i Interval) DirectedHausdorffDistance(y Interval) Angle {
+ if y.ContainsInterval(i) {
+ return 0 // This includes the case i is empty.
+ }
+ if y.IsEmpty() {
+ return Angle(math.Pi) // maximum possible distance on s1.
+ }
+ yComplementCenter := y.ComplementCenter()
+ if i.Contains(yComplementCenter) {
+ return Angle(positiveDistance(y.Hi, yComplementCenter))
+ }
+
+ // The Hausdorff distance is realized by either two i.Hi endpoints or two
+ // i.Lo endpoints, whichever is farther apart.
+ hiHi := 0.0
+ if IntervalFromEndpoints(y.Hi, yComplementCenter).Contains(i.Hi) {
+ hiHi = positiveDistance(y.Hi, i.Hi)
+ }
+
+ loLo := 0.0
+ if IntervalFromEndpoints(yComplementCenter, y.Lo).Contains(i.Lo) {
+ loLo = positiveDistance(i.Lo, y.Lo)
+ }
+
+ return Angle(math.Max(hiHi, loLo))
+}
+
+// Project returns the closest point in the interval to the given point p.
+// The interval must be non-empty.
+func (i Interval) Project(p float64) float64 {
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ if i.fastContains(p) {
+ return p
+ }
+ // Compute distance from p to each endpoint.
+ dlo := positiveDistance(p, i.Lo)
+ dhi := positiveDistance(i.Hi, p)
+ if dlo < dhi {
+ return i.Lo
+ }
+ return i.Hi
+}
diff --git a/vendor/github.com/golang/geo/s2/bits_go18.go b/vendor/github.com/golang/geo/s2/bits_go18.go
new file mode 100644
index 000000000..10a674da5
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/bits_go18.go
@@ -0,0 +1,53 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.9
+
+package s2
+
+// This file is for the bit manipulation code pre-Go 1.9.
+
+// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
+// significant set bit. Passing zero to this function returns zero.
+func findMSBSetNonZero64(x uint64) int {
+ val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
+ shift := []uint64{1, 2, 4, 8, 16, 32}
+ var msbPos uint64
+ for i := 5; i >= 0; i-- {
+ if x&val[i] != 0 {
+ x >>= shift[i]
+ msbPos |= shift[i]
+ }
+ }
+ return int(msbPos)
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+const digitMask = uint64(1<<64 - 1)
+
+var deBruijn64Lookup = []byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
+// significant set bit. Passing zero to this function returns zero.
+//
+// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
+// which references (Knuth, volume 4, section 7.3.1).
+func findLSBSetNonZero64(x uint64) int {
+ return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58])
+}
diff --git a/vendor/github.com/golang/geo/s2/bits_go19.go b/vendor/github.com/golang/geo/s2/bits_go19.go
new file mode 100644
index 000000000..9532b377d
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/bits_go19.go
@@ -0,0 +1,39 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.9
+
+package s2
+
+// This file is for the bit manipulation code post-Go 1.9.
+
+import "math/bits"
+
+// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
+// significant set bit. Passing zero to this function return zero.
+func findMSBSetNonZero64(x uint64) int {
+ if x == 0 {
+ return 0
+ }
+ return 63 - bits.LeadingZeros64(x)
+}
+
+// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
+// significant set bit. Passing zero to this function return zero.
+func findLSBSetNonZero64(x uint64) int {
+ if x == 0 {
+ return 0
+ }
+ return bits.TrailingZeros64(x)
+}
diff --git a/vendor/github.com/golang/geo/s2/cap.go b/vendor/github.com/golang/geo/s2/cap.go
new file mode 100644
index 000000000..c4fb2e1e0
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cap.go
@@ -0,0 +1,519 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/s1"
+)
+
+var (
+ // centerPoint is the default center for Caps
+ centerPoint = PointFromCoords(1.0, 0, 0)
+)
+
+// Cap represents a disc-shaped region defined by a center and radius.
+// Technically this shape is called a "spherical cap" (rather than disc)
+// because it is not planar; the cap represents a portion of the sphere that
+// has been cut off by a plane. The boundary of the cap is the circle defined
+// by the intersection of the sphere and the plane. For containment purposes,
+// the cap is a closed set, i.e. it contains its boundary.
+//
+// For the most part, you can use a spherical cap wherever you would use a
+// disc in planar geometry. The radius of the cap is measured along the
+// surface of the sphere (rather than the straight-line distance through the
+// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius
+// π covers the entire sphere.
+//
+// The center is a point on the surface of the unit sphere. (Hence the need for
+// it to be of unit length.)
+//
+// A cap can also be defined by its center point and height. The height is the
+// distance from the center point to the cutoff plane. There is also support for
+// "empty" and "full" caps, which contain no points and all points respectively.
+//
+// Here are some useful relationships between the cap height (h), the cap
+// radius (r), the maximum chord length from the cap's center (d), and the
+// radius of cap's base (a).
+//
+// h = 1 - cos(r)
+// = 2 * sin^2(r/2)
+// d^2 = 2 * h
+// = a^2 + h^2
+//
+// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap.
+type Cap struct {
+ center Point
+ radius s1.ChordAngle
+}
+
+// CapFromPoint constructs a cap containing a single point.
+func CapFromPoint(p Point) Cap {
+ return CapFromCenterChordAngle(p, 0)
+}
+
+// CapFromCenterAngle constructs a cap with the given center and angle.
+func CapFromCenterAngle(center Point, angle s1.Angle) Cap {
+ return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle))
+}
+
+// CapFromCenterChordAngle constructs a cap where the angle is expressed as an
+// s1.ChordAngle. This constructor is more efficient than using an s1.Angle.
+func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap {
+ return Cap{
+ center: center,
+ radius: radius,
+ }
+}
+
+// CapFromCenterHeight constructs a cap with the given center and height. A
+// negative height yields an empty cap; a height of 2 or more yields a full cap.
+// The center should be unit length.
+func CapFromCenterHeight(center Point, height float64) Cap {
+ return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height))
+}
+
+// CapFromCenterArea constructs a cap with the given center and surface area.
+// Note that the area can also be interpreted as the solid angle subtended by the
+// cap (because the sphere has unit radius). A negative area yields an empty cap;
+// an area of 4*π or more yields a full cap.
+func CapFromCenterArea(center Point, area float64) Cap {
+ return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi))
+}
+
+// EmptyCap returns a cap that contains no points.
+func EmptyCap() Cap {
+ return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle)
+}
+
+// FullCap returns a cap that contains all points.
+func FullCap() Cap {
+ return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle)
+}
+
+// IsValid reports whether the Cap is considered valid.
+func (c Cap) IsValid() bool {
+ return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle
+}
+
+// IsEmpty reports whether the cap is empty, i.e. it contains no points.
+func (c Cap) IsEmpty() bool {
+ return c.radius < 0
+}
+
+// IsFull reports whether the cap is full, i.e. it contains all points.
+func (c Cap) IsFull() bool {
+ return c.radius == s1.StraightChordAngle
+}
+
+// Center returns the cap's center point.
+func (c Cap) Center() Point {
+ return c.center
+}
+
+// Height returns the height of the cap. This is the distance from the center
+// point to the cutoff plane.
+func (c Cap) Height() float64 {
+ return float64(0.5 * c.radius)
+}
+
+// Radius returns the cap radius as an s1.Angle. (Note that the cap angle
+// is stored internally as a ChordAngle, so this method requires a trigonometric
+// operation and may yield a slightly different result than the value passed
+// to CapFromCenterAngle).
+func (c Cap) Radius() s1.Angle {
+ return c.radius.Angle()
+}
+
+// Area returns the surface area of the Cap on the unit sphere.
+func (c Cap) Area() float64 {
+ return 2.0 * math.Pi * math.Max(0, c.Height())
+}
+
+// Contains reports whether this cap contains the other.
+func (c Cap) Contains(other Cap) bool {
+ // In a set containment sense, every cap contains the empty cap.
+ if c.IsFull() || other.IsEmpty() {
+ return true
+ }
+ return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
+}
+
+// Intersects reports whether this cap intersects the other cap.
+// i.e. whether they have any points in common.
+func (c Cap) Intersects(other Cap) bool {
+ if c.IsEmpty() || other.IsEmpty() {
+ return false
+ }
+
+ return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center)
+}
+
+// InteriorIntersects reports whether this caps interior intersects the other cap.
+func (c Cap) InteriorIntersects(other Cap) bool {
+ // Make sure this cap has an interior and the other cap is non-empty.
+ if c.radius <= 0 || other.IsEmpty() {
+ return false
+ }
+
+ return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center)
+}
+
+// ContainsPoint reports whether this cap contains the point.
+func (c Cap) ContainsPoint(p Point) bool {
+ return ChordAngleBetweenPoints(c.center, p) <= c.radius
+}
+
+// InteriorContainsPoint reports whether the point is within the interior of this cap.
+func (c Cap) InteriorContainsPoint(p Point) bool {
+ return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius
+}
+
+// Complement returns the complement of the interior of the cap. A cap and its
+// complement have the same boundary but do not share any interior points.
+// The complement operator is not a bijection because the complement of a
+// singleton cap (containing a single point) is the same as the complement
+// of an empty cap.
+func (c Cap) Complement() Cap {
+ if c.IsFull() {
+ return EmptyCap()
+ }
+ if c.IsEmpty() {
+ return FullCap()
+ }
+
+ return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius))
+}
+
+// CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
+func (c Cap) CapBound() Cap {
+ return c
+}
+
+// RectBound returns a bounding latitude-longitude rectangle.
+// The bounds are not guaranteed to be tight.
+func (c Cap) RectBound() Rect {
+ if c.IsEmpty() {
+ return EmptyRect()
+ }
+
+ capAngle := c.Radius().Radians()
+ allLongitudes := false
+ lat := r1.Interval{
+ Lo: latitude(c.center).Radians() - capAngle,
+ Hi: latitude(c.center).Radians() + capAngle,
+ }
+ lng := s1.FullInterval()
+
+ // Check whether cap includes the south pole.
+ if lat.Lo <= -math.Pi/2 {
+ lat.Lo = -math.Pi / 2
+ allLongitudes = true
+ }
+
+ // Check whether cap includes the north pole.
+ if lat.Hi >= math.Pi/2 {
+ lat.Hi = math.Pi / 2
+ allLongitudes = true
+ }
+
+ if !allLongitudes {
+ // Compute the range of longitudes covered by the cap. We use the law
+ // of sines for spherical triangles. Consider the triangle ABC where
+ // A is the north pole, B is the center of the cap, and C is the point
+ // of tangency between the cap boundary and a line of longitude. Then
+ // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
+ // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
+ // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
+ // minus the latitude). This formula also works for negative latitudes.
+ //
+ // The formula for sin(a) follows from the relationship h = 1 - cos(a).
+ sinA := c.radius.Sin()
+ sinC := math.Cos(latitude(c.center).Radians())
+ if sinA <= sinC {
+ angleA := math.Asin(sinA / sinC)
+ lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2)
+ lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2)
+ }
+ }
+ return Rect{lat, lng}
+}
+
+// Equal reports whether this cap is equal to the other cap.
+func (c Cap) Equal(other Cap) bool {
+ return (c.radius == other.radius && c.center == other.center) ||
+ (c.IsEmpty() && other.IsEmpty()) ||
+ (c.IsFull() && other.IsFull())
+}
+
+// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance.
+func (c Cap) ApproxEqual(other Cap) bool {
+ const epsilon = 1e-14
+ r2 := float64(c.radius)
+ otherR2 := float64(other.radius)
+ return c.center.ApproxEqual(other.center) &&
+ math.Abs(r2-otherR2) <= epsilon ||
+ c.IsEmpty() && otherR2 <= epsilon ||
+ other.IsEmpty() && r2 <= epsilon ||
+ c.IsFull() && otherR2 >= 2-epsilon ||
+ other.IsFull() && r2 >= 2-epsilon
+}
+
+// AddPoint increases the cap if necessary to include the given point. If this cap is empty,
+// then the center is set to the point with a zero height. p must be unit-length.
+func (c Cap) AddPoint(p Point) Cap {
+ if c.IsEmpty() {
+ c.center = p
+ c.radius = 0
+ return c
+ }
+
+ // After calling cap.AddPoint(p), cap.Contains(p) must be true. However
+ // we don't need to do anything special to achieve this because Contains()
+ // does exactly the same distance calculation that we do here.
+ if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius {
+ c.radius = newRad
+ }
+ return c
+}
+
+// AddCap increases the cap height if necessary to include the other cap. If this cap is empty,
+// it is set to the other cap.
+func (c Cap) AddCap(other Cap) Cap {
+ if c.IsEmpty() {
+ return other
+ }
+ if other.IsEmpty() {
+ return c
+ }
+
+ // We round up the distance to ensure that the cap is actually contained.
+ // TODO(roberts): Do some error analysis in order to guarantee this.
+ dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius)
+ if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius {
+ c.radius = newRad
+ }
+ return c
+}
+
+// Expanded returns a new cap expanded by the given angle. If the cap is empty,
+// it returns an empty cap.
+func (c Cap) Expanded(distance s1.Angle) Cap {
+ if c.IsEmpty() {
+ return EmptyCap()
+ }
+ return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance)))
+}
+
+func (c Cap) String() string {
+ return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees())
+}
+
+// radiusToHeight converts an s1.Angle into the height of the cap.
+func radiusToHeight(r s1.Angle) float64 {
+ if r.Radians() < 0 {
+ return float64(s1.NegativeChordAngle)
+ }
+ if r.Radians() >= math.Pi {
+ return float64(s1.RightChordAngle)
+ }
+ return float64(0.5 * s1.ChordAngleFromAngle(r))
+
+}
+
+// ContainsCell reports whether the cap contains the given cell.
+func (c Cap) ContainsCell(cell Cell) bool {
+ // If the cap does not contain all cell vertices, return false.
+ var vertices [4]Point
+ for k := 0; k < 4; k++ {
+ vertices[k] = cell.Vertex(k)
+ if !c.ContainsPoint(vertices[k]) {
+ return false
+ }
+ }
+ // Otherwise, return true if the complement of the cap does not intersect the cell.
+ return !c.Complement().intersects(cell, vertices)
+}
+
+// IntersectsCell reports whether the cap intersects the cell.
+func (c Cap) IntersectsCell(cell Cell) bool {
+ // If the cap contains any cell vertex, return true.
+ var vertices [4]Point
+ for k := 0; k < 4; k++ {
+ vertices[k] = cell.Vertex(k)
+ if c.ContainsPoint(vertices[k]) {
+ return true
+ }
+ }
+ return c.intersects(cell, vertices)
+}
+
+// intersects reports whether the cap intersects any point of the cell excluding
+// its vertices (which are assumed to already have been checked).
+func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
+ // If the cap is a hemisphere or larger, the cell and the complement of the cap
+ // are both convex. Therefore since no vertex of the cell is contained, no other
+ // interior point of the cell is contained either.
+ if c.radius >= s1.RightChordAngle {
+ return false
+ }
+
+ // We need to check for empty caps due to the center check just below.
+ if c.IsEmpty() {
+ return false
+ }
+
+ // Optimization: return true if the cell contains the cap center. This allows half
+ // of the edge checks below to be skipped.
+ if cell.ContainsPoint(c.center) {
+ return true
+ }
+
+ // At this point we know that the cell does not contain the cap center, and the cap
+ // does not contain any cell vertex. The only way that they can intersect is if the
+ // cap intersects the interior of some edge.
+ sin2Angle := c.radius.Sin2()
+ for k := 0; k < 4; k++ {
+ edge := cell.Edge(k).Vector
+ dot := c.center.Vector.Dot(edge)
+ if dot > 0 {
+ // The center is in the interior half-space defined by the edge. We do not need
+ // to consider these edges, since if the cap intersects this edge then it also
+ // intersects the edge on the opposite side of the cell, because the center is
+ // not contained with the cell.
+ continue
+ }
+
+ // The Norm2() factor is necessary because "edge" is not normalized.
+ if dot*dot > sin2Angle*edge.Norm2() {
+ return false
+ }
+
+ // Otherwise, the great circle containing this edge intersects the interior of the cap. We just
+ // need to check whether the point of closest approach occurs between the two edge endpoints.
+ dir := edge.Cross(c.center.Vector)
+ if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// CellUnionBound computes a covering of the Cap. In general the covering
+// consists of at most 4 cells except for very large caps, which may need
+// up to 6 cells. The output is not sorted.
+func (c Cap) CellUnionBound() []CellID {
+ // TODO(roberts): The covering could be made quite a bit tighter by mapping
+ // the cap to a rectangle in (i,j)-space and finding a covering for that.
+
+ // Find the maximum level such that the cap contains at most one cell vertex
+ // and such that CellID.AppendVertexNeighbors() can be called.
+ level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1
+
+ // If level < 0, more than three face cells are required.
+ if level < 0 {
+ cellIDs := make([]CellID, 6)
+ for face := 0; face < 6; face++ {
+ cellIDs[face] = CellIDFromFace(face)
+ }
+ return cellIDs
+ }
+ // The covering consists of the 4 cells at the given level that share the
+ // cell vertex that is closest to the cap center.
+ return cellIDFromPoint(c.center).VertexNeighbors(level)
+}
+
+// Centroid returns the true centroid of the cap multiplied by its surface area
+// The result lies on the ray from the origin through the cap's center, but it
+// is not unit length. Note that if you just want the "surface centroid", i.e.
+// the normalized result, then it is simpler to call Center.
+//
+// The reason for multiplying the result by the cap area is to make it
+// easier to compute the centroid of more complicated shapes. The centroid
+// of a union of disjoint regions can be computed simply by adding their
+// Centroid() results. Caveat: for caps that contain a single point
+// (i.e., zero radius), this method always returns the origin (0, 0, 0).
+// This is because shapes with no area don't affect the centroid of a
+// union whose total area is positive.
+func (c Cap) Centroid() Point {
+ // From symmetry, the centroid of the cap must be somewhere on the line
+ // from the origin to the center of the cap on the surface of the sphere.
+ // When a sphere is divided into slices of constant thickness by a set of
+ // parallel planes, all slices have the same surface area. This implies
+ // that the radial component of the centroid is simply the midpoint of the
+ // range of radial distances spanned by the cap. That is easily computed
+ // from the cap height.
+ if c.IsEmpty() {
+ return Point{}
+ }
+ r := 1 - 0.5*c.Height()
+ return Point{c.center.Mul(r * c.Area())}
+}
+
+// Union returns the smallest cap which encloses this cap and other.
+func (c Cap) Union(other Cap) Cap {
+ // If the other cap is larger, swap c and other for the rest of the computations.
+ if c.radius < other.radius {
+ c, other = other, c
+ }
+
+ if c.IsFull() || other.IsEmpty() {
+ return c
+ }
+
+ // TODO: This calculation would be more efficient using s1.ChordAngles.
+ cRadius := c.Radius()
+ otherRadius := other.Radius()
+ distance := c.center.Distance(other.center)
+ if cRadius >= distance+otherRadius {
+ return c
+ }
+
+ resRadius := 0.5 * (distance + cRadius + otherRadius)
+ resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
+ return CapFromCenterAngle(resCenter, resRadius)
+}
+
+// Encode encodes the Cap.
+func (c Cap) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ c.encode(e)
+ return e.err
+}
+
+func (c Cap) encode(e *encoder) {
+ e.writeFloat64(c.center.X)
+ e.writeFloat64(c.center.Y)
+ e.writeFloat64(c.center.Z)
+ e.writeFloat64(float64(c.radius))
+}
+
+// Decode decodes the Cap.
+func (c *Cap) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ c.decode(d)
+ return d.err
+}
+
+func (c *Cap) decode(d *decoder) {
+ c.center.X = d.readFloat64()
+ c.center.Y = d.readFloat64()
+ c.center.Z = d.readFloat64()
+ c.radius = s1.ChordAngle(d.readFloat64())
+}
diff --git a/vendor/github.com/golang/geo/s2/cell.go b/vendor/github.com/golang/geo/s2/cell.go
new file mode 100644
index 000000000..0a01a4f1f
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cell.go
@@ -0,0 +1,698 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "io"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Cell is an S2 region object that represents a cell. Unlike CellIDs,
+// it supports efficient containment and intersection tests. However, it is
+// also a more expensive representation.
+type Cell struct {
+ face int8
+ level int8
+ orientation int8
+ id CellID
+ uv r2.Rect
+}
+
+// CellFromCellID constructs a Cell corresponding to the given CellID.
+func CellFromCellID(id CellID) Cell {
+ c := Cell{}
+ c.id = id
+ f, i, j, o := c.id.faceIJOrientation()
+ c.face = int8(f)
+ c.level = int8(c.id.Level())
+ c.orientation = int8(o)
+ c.uv = ijLevelToBoundUV(i, j, int(c.level))
+ return c
+}
+
+// CellFromPoint constructs a cell for the given Point.
+func CellFromPoint(p Point) Cell {
+ return CellFromCellID(cellIDFromPoint(p))
+}
+
+// CellFromLatLng constructs a cell for the given LatLng.
+func CellFromLatLng(ll LatLng) Cell {
+ return CellFromCellID(CellIDFromLatLng(ll))
+}
+
+// Face returns the face this cell is on.
+func (c Cell) Face() int {
+ return int(c.face)
+}
+
+// oppositeFace returns the face opposite the given face.
+func oppositeFace(face int) int {
+ return (face + 3) % 6
+}
+
+// Level returns the level of this cell.
+func (c Cell) Level() int {
+ return int(c.level)
+}
+
+// ID returns the CellID this cell represents.
+func (c Cell) ID() CellID {
+ return c.id
+}
+
+// IsLeaf returns whether this Cell is a leaf or not.
+func (c Cell) IsLeaf() bool {
+ return c.level == maxLevel
+}
+
+// SizeIJ returns the edge length of this cell in (i,j)-space.
+func (c Cell) SizeIJ() int {
+ return sizeIJ(int(c.level))
+}
+
+// SizeST returns the edge length of this cell in (s,t)-space.
+func (c Cell) SizeST() float64 {
+ return c.id.sizeST(int(c.level))
+}
+
+// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
+// (lower left, lower right, upper right, upper left in the UV plane).
+func (c Cell) Vertex(k int) Point {
+ return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()}
+}
+
+// Edge returns the inward-facing normal of the great circle passing through
+// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3).
+func (c Cell) Edge(k int) Point {
+ switch k {
+ case 0:
+ return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom
+ case 1:
+ return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right
+ case 2:
+ return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top
+ default:
+ return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left
+ }
+}
+
+// BoundUV returns the bounds of this cell in (u,v)-space.
+func (c Cell) BoundUV() r2.Rect {
+ return c.uv
+}
+
+// Center returns the direction vector corresponding to the center in
+// (s,t)-space of the given cell. This is the point at which the cell is
+// divided into four subcells; it is not necessarily the centroid of the
+// cell in (u,v)-space or (x,y,z)-space
+func (c Cell) Center() Point {
+ return Point{c.id.rawPoint().Normalize()}
+}
+
+// Children returns the four direct children of this cell in traversal order
+// and returns true. If this is a leaf cell, or the children could not be created,
+// false is returned.
+// The C++ method is called Subdivide.
+func (c Cell) Children() ([4]Cell, bool) {
+ var children [4]Cell
+
+ if c.id.IsLeaf() {
+ return children, false
+ }
+
+ // Compute the cell midpoint in uv-space.
+ uvMid := c.id.centerUV()
+
+ // Create four children with the appropriate bounds.
+ cid := c.id.ChildBegin()
+ for pos := 0; pos < 4; pos++ {
+ children[pos] = Cell{
+ face: c.face,
+ level: c.level + 1,
+ orientation: c.orientation ^ int8(posToOrientation[pos]),
+ id: cid,
+ }
+
+ // We want to split the cell in half in u and v. To decide which
+ // side to set equal to the midpoint value, we look at cell's (i,j)
+ // position within its parent. The index for i is in bit 1 of ij.
+ ij := posToIJ[c.orientation][pos]
+ i := ij >> 1
+ j := ij & 1
+ if i == 1 {
+ children[pos].uv.X.Hi = c.uv.X.Hi
+ children[pos].uv.X.Lo = uvMid.X
+ } else {
+ children[pos].uv.X.Lo = c.uv.X.Lo
+ children[pos].uv.X.Hi = uvMid.X
+ }
+ if j == 1 {
+ children[pos].uv.Y.Hi = c.uv.Y.Hi
+ children[pos].uv.Y.Lo = uvMid.Y
+ } else {
+ children[pos].uv.Y.Lo = c.uv.Y.Lo
+ children[pos].uv.Y.Hi = uvMid.Y
+ }
+ cid = cid.Next()
+ }
+ return children, true
+}
+
+// ExactArea returns the area of this cell as accurately as possible.
+func (c Cell) ExactArea() float64 {
+ v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3)
+ return PointArea(v0, v1, v2) + PointArea(v0, v2, v3)
+}
+
+// ApproxArea returns the approximate area of this cell. This method is accurate
+// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
+// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's
+// surface). It is moderately cheap to compute.
+func (c Cell) ApproxArea() float64 {
+ // All cells at the first two levels have the same area.
+ if c.level < 2 {
+ return c.AverageArea()
+ }
+
+ // First, compute the approximate area of the cell when projected
+ // perpendicular to its normal. The cross product of its diagonals gives
+ // the normal, and the length of the normal is twice the projected area.
+ flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector).
+ Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm())
+
+ // Now, compensate for the curvature of the cell surface by pretending
+ // that the cell is shaped like a spherical cap. The ratio of the
+ // area of a spherical cap to the area of its projected disc turns out
+ // to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc.
+ // For example, when r=0 the ratio is 1, and when r=1 the ratio is 2.
+ // Here we set Pi*r*r == flatArea to find the equivalent disc.
+ return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1)))
+}
+
+// AverageArea returns the average area of cells at the level of this cell.
+// This is accurate to within a factor of 1.7.
+func (c Cell) AverageArea() float64 {
+ return AvgAreaMetric.Value(int(c.level))
+}
+
+// IntersectsCell reports whether the intersection of this cell and the other cell is not nil.
+func (c Cell) IntersectsCell(oc Cell) bool {
+ return c.id.Intersects(oc.id)
+}
+
+// ContainsCell reports whether this cell contains the other cell.
+func (c Cell) ContainsCell(oc Cell) bool {
+ return c.id.Contains(oc.id)
+}
+
+// CellUnionBound computes a covering of the Cell.
+func (c Cell) CellUnionBound() []CellID {
+ return c.CapBound().CellUnionBound()
+}
+
+// latitude returns the latitude of the cell vertex in radians given by (i,j),
+// where i and j indicate the Hi (1) or Lo (0) corner.
+func (c Cell) latitude(i, j int) float64 {
+ var u, v float64
+ switch {
+ case i == 0 && j == 0:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Lo
+ case i == 0 && j == 1:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Hi
+ case i == 1 && j == 0:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Lo
+ case i == 1 && j == 1:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Hi
+ default:
+ panic("i and/or j is out of bounds")
+ }
+ return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
+}
+
+// longitude returns the longitude of the cell vertex in radians given by (i,j),
+// where i and j indicate the Hi (1) or Lo (0) corner.
+func (c Cell) longitude(i, j int) float64 {
+ var u, v float64
+ switch {
+ case i == 0 && j == 0:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Lo
+ case i == 0 && j == 1:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Hi
+ case i == 1 && j == 0:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Lo
+ case i == 1 && j == 1:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Hi
+ default:
+ panic("i and/or j is out of bounds")
+ }
+ return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
+}
+
+var (
+ poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon
+)
+
+// RectBound returns the bounding rectangle of this cell.
+func (c Cell) RectBound() Rect {
+ if c.level > 0 {
+ // Except for cells at level 0, the latitude and longitude extremes are
+ // attained at the vertices. Furthermore, the latitude range is
+ // determined by one pair of diagonally opposite vertices and the
+ // longitude range is determined by the other pair.
+ //
+ // We first determine which corner (i,j) of the cell has the largest
+ // absolute latitude. To maximize latitude, we want to find the point in
+ // the cell that has the largest absolute z-coordinate and the smallest
+ // absolute x- and y-coordinates. To do this we look at each coordinate
+ // (u and v), and determine whether we want to minimize or maximize that
+ // coordinate based on the axis direction and the cell's (u,v) quadrant.
+ u := c.uv.X.Lo + c.uv.X.Hi
+ v := c.uv.Y.Lo + c.uv.Y.Hi
+ var i, j int
+ if uAxis(int(c.face)).Z == 0 {
+ if u < 0 {
+ i = 1
+ }
+ } else if u > 0 {
+ i = 1
+ }
+ if vAxis(int(c.face)).Z == 0 {
+ if v < 0 {
+ j = 1
+ }
+ } else if v > 0 {
+ j = 1
+ }
+ lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j))
+ lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j))
+
+ // We grow the bounds slightly to make sure that the bounding rectangle
+ // contains LatLngFromPoint(P) for any point P inside the loop L defined by the
+ // four *normalized* vertices. Note that normalization of a vector can
+ // change its direction by up to 0.5 * dblEpsilon radians, and it is not
+ // enough just to add Normalize calls to the code above because the
+ // latitude/longitude ranges are not necessarily determined by diagonally
+ // opposite vertex pairs after normalization.
+ //
+ // We would like to bound the amount by which the latitude/longitude of a
+ // contained point P can exceed the bounds computed above. In the case of
+ // longitude, the normalization error can change the direction of rounding
+ // leading to a maximum difference in longitude of 2 * dblEpsilon. In
+ // the case of latitude, the normalization error can shift the latitude by
+ // up to 0.5 * dblEpsilon and the other sources of error can cause the
+ // two latitudes to differ by up to another 1.5 * dblEpsilon, which also
+ // leads to a maximum difference of 2 * dblEpsilon.
+ return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure()
+ }
+
+ // The 4 cells around the equator extend to +/-45 degrees latitude at the
+ // midpoints of their top and bottom edges. The two cells covering the
+ // poles extend down to +/-35.26 degrees at their vertices. The maximum
+ // error in this calculation is 0.5 * dblEpsilon.
+ var bound Rect
+ switch c.face {
+ case 0:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}}
+ case 1:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}}
+ case 2:
+ bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()}
+ case 3:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}}
+ case 4:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}}
+ default:
+ bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()}
+ }
+
+ // Finally, we expand the bound to account for the error when a point P is
+ // converted to an LatLng to test for containment. (The bound should be
+ // large enough so that it contains the computed LatLng of any contained
+ // point, not just the infinite-precision version.) We don't need to expand
+ // longitude because longitude is calculated via a single call to math.Atan2,
+ // which is guaranteed to be semi-monotonic.
+ return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)})
+}
+
+// CapBound returns the bounding cap of this cell.
+func (c Cell) CapBound() Cap {
+ // We use the cell center in (u,v)-space as the cap axis. This vector is very close
+ // to GetCenter() and faster to compute. Neither one of these vectors yields the
+ // bounding cap with minimal surface area, but they are both pretty close.
+ cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()})
+ for k := 0; k < 4; k++ {
+ cap = cap.AddPoint(c.Vertex(k))
+ }
+ return cap
+}
+
+// ContainsPoint reports whether this cell contains the given point. Note that
+// unlike Loop/Polygon, a Cell is considered to be a closed set. This means
+// that a point on a Cell's edge or vertex belong to the Cell and the relevant
+// adjacent Cells too.
+//
+// If you want every point to be contained by exactly one Cell,
+// you will need to convert the Cell to a Loop.
+func (c Cell) ContainsPoint(p Point) bool {
+ var uv r2.Point
+ var ok bool
+ if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok {
+ return false
+ }
+
+ // Expand the (u,v) bound to ensure that
+ //
+ // CellFromPoint(p).ContainsPoint(p)
+ //
+ // is always true. To do this, we need to account for the error when
+ // converting from (u,v) coordinates to (s,t) coordinates. In the
+ // normal case the total error is at most dblEpsilon.
+ return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
+}
+
+// Encode encodes the Cell.
+func (c Cell) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ c.encode(e)
+ return e.err
+}
+
+func (c Cell) encode(e *encoder) {
+ c.id.encode(e)
+}
+
+// Decode decodes the Cell.
+func (c *Cell) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ c.decode(d)
+ return d.err
+}
+
+func (c *Cell) decode(d *decoder) {
+ c.id.decode(d)
+ *c = CellFromCellID(c.id)
+}
+
+// vertexChordDist2 returns the squared chord distance from point P to the
+// given corner vertex specified by the Hi or Lo values of each.
+func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle {
+ x := c.uv.X.Lo
+ y := c.uv.Y.Lo
+ if xHi {
+ x = c.uv.X.Hi
+ }
+ if yHi {
+ y = c.uv.Y.Hi
+ }
+
+ return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1))
+}
+
+// uEdgeIsClosest reports whether a point P is closer to the interior of the specified
+// Cell edge (either the lower or upper edge of the Cell) or to the endpoints.
+func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool {
+ u0 := c.uv.X.Lo
+ u1 := c.uv.X.Hi
+ v := c.uv.Y.Lo
+ if vHi {
+ v = c.uv.Y.Hi
+ }
+ // These are the normals to the planes that are perpendicular to the edge
+ // and pass through one of its two endpoints.
+ dir0 := r3.Vector{v*v + 1, -u0 * v, -u0}
+ dir1 := r3.Vector{v*v + 1, -u1 * v, -u1}
+ return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
+}
+
+// vEdgeIsClosest reports whether a point P is closer to the interior of the specified
+// Cell edge (either the right or left edge of the Cell) or to the endpoints.
+func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool {
+ v0 := c.uv.Y.Lo
+ v1 := c.uv.Y.Hi
+ u := c.uv.X.Lo
+ if uHi {
+ u = c.uv.X.Hi
+ }
+ dir0 := r3.Vector{-u * v0, u*u + 1, -v0}
+ dir1 := r3.Vector{-u * v1, u*u + 1, -v1}
+ return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
+}
+
+// edgeDistance reports the distance from a Point P to a given Cell edge. The point
+// P is given by its dot product, and the uv edge by its normal in the
+// given coordinate value.
+func edgeDistance(ij, uv float64) s1.ChordAngle {
+ // Let P by the target point and let R be the closest point on the given
+ // edge AB. The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2
+ // where Q is the point P projected onto the plane through the great circle
+ // through AB. We can compute the distance PQ^2 perpendicular to the plane
+ // from "dirIJ" (the dot product of the target point P with the edge
+ // normal) and the squared length the edge normal (1 + uv**2).
+ pq2 := (ij * ij) / (1 + uv*uv)
+
+ // We can compute the distance QR as (1 - OQ) where O is the sphere origin,
+ // and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem.
+ // (This calculation loses accuracy as angle POQ approaches Pi/2.)
+ qr := 1 - math.Sqrt(1-pq2)
+ return s1.ChordAngleFromSquaredLength(pq2 + qr*qr)
+}
+
+// distanceInternal reports the distance from the given point to the interior of
+// the cell if toInterior is true or to the boundary of the cell otherwise.
+func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle {
+ // All calculations are done in the (u,v,w) coordinates of this cell's face.
+ target := faceXYZtoUVW(int(c.face), targetXYZ)
+
+ // Compute dot products with all four upward or rightward-facing edge
+ // normals. dirIJ is the dot product for the edge corresponding to axis
+ // I, endpoint J. For example, dir01 is the right edge of the Cell
+ // (corresponding to the upper endpoint of the u-axis).
+ dir00 := target.X - target.Z*c.uv.X.Lo
+ dir01 := target.X - target.Z*c.uv.X.Hi
+ dir10 := target.Y - target.Z*c.uv.Y.Lo
+ dir11 := target.Y - target.Z*c.uv.Y.Hi
+ inside := true
+ if dir00 < 0 {
+ inside = false // Target is to the left of the cell
+ if c.vEdgeIsClosest(target, false) {
+ return edgeDistance(-dir00, c.uv.X.Lo)
+ }
+ }
+ if dir01 > 0 {
+ inside = false // Target is to the right of the cell
+ if c.vEdgeIsClosest(target, true) {
+ return edgeDistance(dir01, c.uv.X.Hi)
+ }
+ }
+ if dir10 < 0 {
+ inside = false // Target is below the cell
+ if c.uEdgeIsClosest(target, false) {
+ return edgeDistance(-dir10, c.uv.Y.Lo)
+ }
+ }
+ if dir11 > 0 {
+ inside = false // Target is above the cell
+ if c.uEdgeIsClosest(target, true) {
+ return edgeDistance(dir11, c.uv.Y.Hi)
+ }
+ }
+ if inside {
+ if toInterior {
+ return s1.ChordAngle(0)
+ }
+ // Although you might think of Cells as rectangles, they are actually
+ // arbitrary quadrilaterals after they are projected onto the sphere.
+ // Therefore the simplest approach is just to find the minimum distance to
+ // any of the four edges.
+ return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo),
+ edgeDistance(dir01, c.uv.X.Hi),
+ edgeDistance(-dir10, c.uv.Y.Lo),
+ edgeDistance(dir11, c.uv.Y.Hi))
+ }
+
+ // Otherwise, the closest point is one of the four cell vertices. Note that
+ // it is *not* trivial to narrow down the candidates based on the edge sign
+ // tests above, because (1) the edges don't meet at right angles and (2)
+ // there are points on the far side of the sphere that are both above *and*
+ // below the cell, etc.
+ return minChordAngle(c.vertexChordDist2(target, false, false),
+ c.vertexChordDist2(target, true, false),
+ c.vertexChordDist2(target, false, true),
+ c.vertexChordDist2(target, true, true))
+}
+
+// Distance reports the distance from the cell to the given point. Returns zero if
+// the point is inside the cell.
+func (c Cell) Distance(target Point) s1.ChordAngle {
+ return c.distanceInternal(target, true)
+}
+
+// MaxDistance reports the maximum distance from the cell (including its interior) to the
+// given point.
+func (c Cell) MaxDistance(target Point) s1.ChordAngle {
+ // First check the 4 cell vertices. If all are within the hemisphere
+ // centered around target, the max distance will be to one of these vertices.
+ targetUVW := faceXYZtoUVW(int(c.face), target)
+ maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false),
+ c.vertexChordDist2(targetUVW, true, false),
+ c.vertexChordDist2(targetUVW, false, true),
+ c.vertexChordDist2(targetUVW, true, true))
+
+ if maxDist <= s1.RightChordAngle {
+ return maxDist
+ }
+
+ // Otherwise, find the minimum distance dMin to the antipodal point and the
+ // maximum distance will be pi - dMin.
+ return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)})
+}
+
+// BoundaryDistance reports the distance from the cell boundary to the given point.
+func (c Cell) BoundaryDistance(target Point) s1.ChordAngle {
+ return c.distanceInternal(target, false)
+}
+
+// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns
+// zero if the edge intersects the cell interior.
+func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle {
+ // Possible optimizations:
+ // - Currently the (cell vertex, edge endpoint) distances are computed
+ // twice each, and the length of AB is computed 4 times.
+ // - To fix this, refactor GetDistance(target) so that it skips calculating
+ // the distance to each cell vertex. Instead, compute the cell vertices
+ // and distances in this function, and add a low-level UpdateMinDistance
+ // that allows the XA, XB, and AB distances to be passed in.
+ // - It might also be more efficient to do all calculations in UVW-space,
+ // since this would involve transforming 2 points rather than 4.
+
+ // First, check the minimum distance to the edge endpoints A and B.
+ // (This also detects whether either endpoint is inside the cell.)
+ minDist := minChordAngle(c.Distance(a), c.Distance(b))
+ if minDist == 0 {
+ return minDist
+ }
+
+ // Otherwise, check whether the edge crosses the cell boundary.
+ crosser := NewChainEdgeCrosser(a, b, c.Vertex(3))
+ for i := 0; i < 4; i++ {
+ if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross {
+ return 0
+ }
+ }
+
+ // Finally, check whether the minimum distance occurs between a cell vertex
+ // and the interior of the edge AB. (Some of this work is redundant, since
+ // it also checks the distance to the endpoints A and B again.)
+ //
+ // Note that we don't need to check the distance from the interior of AB to
+ // the interior of a cell edge, because the only way that this distance can
+ // be minimal is if the two edges cross (already checked above).
+ for i := 0; i < 4; i++ {
+ minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist)
+ }
+ return minDist
+}
+
+// MaxDistanceToEdge returns the maximum distance from the cell (including its interior)
+// to the given edge AB.
+func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle {
+ // If the maximum distance from both endpoints to the cell is less than π/2
+ // then the maximum distance from the edge to the cell is the maximum of the
+ // two endpoint distances.
+ maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b))
+ if maxDist <= s1.RightChordAngle {
+ return maxDist
+ }
+
+ return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)})
+}
+
+// DistanceToCell returns the minimum distance from this cell to the given cell.
+// It returns zero if one cell contains the other.
+func (c Cell) DistanceToCell(target Cell) s1.ChordAngle {
+ // If the cells intersect, the distance is zero. We use the (u,v) ranges
+ // rather than CellID intersects so that cells that share a partial edge or
+ // corner are considered to intersect.
+ if c.face == target.face && c.uv.Intersects(target.uv) {
+ return 0
+ }
+
+ // Otherwise, the minimum distance always occurs between a vertex of one
+ // cell and an edge of the other cell (including the edge endpoints). This
+ // represents a total of 32 possible (vertex, edge) pairs.
+ //
+ // TODO(roberts): This could be optimized to be at least 5x faster by pruning
+ // the set of possible closest vertex/edge pairs using the faces and (u,v)
+ // ranges of both cells.
+ var va, vb [4]Point
+ for i := 0; i < 4; i++ {
+ va[i] = c.Vertex(i)
+ vb[i] = target.Vertex(i)
+ }
+ minDist := s1.InfChordAngle()
+ for i := 0; i < 4; i++ {
+ for j := 0; j < 4; j++ {
+ minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist)
+ minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist)
+ }
+ }
+ return minDist
+}
+
+// MaxDistanceToCell returns the maximum distance from the cell (including its
+// interior) to the given target cell.
+func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle {
+ // Need to check the antipodal target for intersection with the cell. If it
+ // intersects, the distance is the straight ChordAngle.
+ // antipodalUV is the transpose of the original UV, interpreted within the opposite face.
+ antipodalUV := r2.Rect{target.uv.Y, target.uv.X}
+ if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) {
+ return s1.StraightChordAngle
+ }
+
+ // Otherwise, the maximum distance always occurs between a vertex of one
+ // cell and an edge of the other cell (including the edge endpoints). This
+ // represents a total of 32 possible (vertex, edge) pairs.
+ //
+ // TODO(roberts): When the maximum distance is at most π/2, the maximum is
+ // always attained between a pair of vertices, and this could be made much
+ // faster by testing each vertex pair once rather than the current 4 times.
+ var va, vb [4]Point
+ for i := 0; i < 4; i++ {
+ va[i] = c.Vertex(i)
+ vb[i] = target.Vertex(i)
+ }
+ maxDist := s1.NegativeChordAngle
+ for i := 0; i < 4; i++ {
+ for j := 0; j < 4; j++ {
+ maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist)
+ maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist)
+ }
+ }
+ return maxDist
+}
diff --git a/vendor/github.com/golang/geo/s2/cellid.go b/vendor/github.com/golang/geo/s2/cellid.go
new file mode 100644
index 000000000..37d488685
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cellid.go
@@ -0,0 +1,942 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// CellID uniquely identifies a cell in the S2 cell decomposition.
+// The most significant 3 bits encode the face number (0-5). The
+// remaining 61 bits encode the position of the center of this cell
+// along the Hilbert curve on that face. The zero value and the value
+// (1<<64)-1 are invalid cell IDs. The first compares less than any
+// valid cell ID, the second as greater than any valid cell ID.
+//
+// Sequentially increasing cell IDs follow a continuous space-filling curve
+// over the entire sphere. They have the following properties:
+//
+// - The ID of a cell at level k consists of a 3-bit face number followed
+// by k bit pairs that recursively select one of the four children of
+// each cell. The next bit is always 1, and all other bits are 0.
+// Therefore, the level of a cell is determined by the position of its
+// lowest-numbered bit that is turned on (for a cell at level k, this
+// position is 2 * (maxLevel - k)).
+//
+// - The ID of a parent cell is at the midpoint of the range of IDs spanned
+// by its children (or by its descendants at any level).
+//
+// Leaf cells are often used to represent points on the unit sphere, and
+// this type provides methods for converting directly between these two
+// representations. For cells that represent 2D regions rather than
+// discrete point, it is better to use Cells.
+type CellID uint64
+
+// SentinelCellID is an invalid cell ID guaranteed to be larger than any
+// valid cell ID. It is used primarily by ShapeIndex. The value is also used
+// by some S2 types when encoding data.
+// Note that the sentinel's RangeMin == RangeMax == itself.
+const SentinelCellID = CellID(^uint64(0))
+
+// sortCellIDs sorts the slice of CellIDs in place.
+func sortCellIDs(ci []CellID) {
+ sort.Sort(cellIDs(ci))
+}
+
+// cellIDs implements the Sort interface for slices of CellIDs.
+type cellIDs []CellID
+
+func (c cellIDs) Len() int { return len(c) }
+func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] }
+
+// TODO(dsymonds): Some of these constants should probably be exported.
+const (
+ faceBits = 3
+ numFaces = 6
+
+ // This is the number of levels needed to specify a leaf cell.
+ maxLevel = 30
+
+ // The extra position bit (61 rather than 60) lets us encode each cell as its
+ // Hilbert curve position at the cell center (which is halfway along the
+ // portion of the Hilbert curve that fills that cell).
+ posBits = 2*maxLevel + 1
+
+ // The maximum index of a valid leaf cell plus one. The range of valid leaf
+ // cell indices is [0..maxSize-1].
+ maxSize = 1 << maxLevel
+
+ wrapOffset = uint64(numFaces) << posBits
+)
+
+// CellIDFromFacePosLevel returns a cell given its face in the range
+// [0,5], the 61-bit Hilbert curve position pos within that face, and
+// the level in the range [0,maxLevel]. The position in the cell ID
+// will be truncated to correspond to the Hilbert curve position at
+// the center of the returned cell.
+func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID {
+ return CellID(uint64(face)<<posBits + pos | 1).Parent(level)
+}
+
+// CellIDFromFace returns the cell corresponding to a given S2 cube face.
+func CellIDFromFace(face int) CellID {
+ return CellID((uint64(face) << posBits) + lsbForLevel(0))
+}
+
+// CellIDFromLatLng returns the leaf cell containing ll.
+func CellIDFromLatLng(ll LatLng) CellID {
+ return cellIDFromPoint(PointFromLatLng(ll))
+}
+
+// CellIDFromToken returns a cell given a hex-encoded string of its uint64 ID.
+func CellIDFromToken(s string) CellID {
+ if len(s) > 16 {
+ return CellID(0)
+ }
+ n, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return CellID(0)
+ }
+ // Equivalent to right-padding string with zeros to 16 characters.
+ if len(s) < 16 {
+ n = n << (4 * uint(16-len(s)))
+ }
+ return CellID(n)
+}
+
+// ToToken returns a hex-encoded string of the uint64 cell id, with leading
+// zeros included but trailing zeros stripped.
+func (ci CellID) ToToken() string {
+ s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0")
+ if len(s) == 0 {
+ return "X"
+ }
+ return s
+}
+
+// IsValid reports whether ci represents a valid cell.
+func (ci CellID) IsValid() bool {
+ return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0)
+}
+
+// Face returns the cube face for this cell ID, in the range [0,5].
+func (ci CellID) Face() int { return int(uint64(ci) >> posBits) }
+
+// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1].
+func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) }
+
+// Level returns the subdivision level of this cell ID, in the range [0, maxLevel].
+func (ci CellID) Level() int {
+ return maxLevel - findLSBSetNonZero64(uint64(ci))>>1
+}
+
+// IsLeaf returns whether this cell ID is at the deepest level;
+// that is, the level at which the cells are smallest.
+func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 }
+
+// ChildPosition returns the child position (0..3) of this cell's
+// ancestor at the given level, relative to its parent. The argument
+// should be in the range 1..kMaxLevel. For example,
+// ChildPosition(1) returns the position of this cell's level-1
+// ancestor within its top-level face cell.
+func (ci CellID) ChildPosition(level int) int {
+ return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3
+}
+
+// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level.
+func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) }
+
+// Parent returns the cell at the given level, which must be no greater than the current level.
+func (ci CellID) Parent(level int) CellID {
+ lsb := lsbForLevel(level)
+ return CellID((uint64(ci) & -lsb) | lsb)
+}
+
+// immediateParent is cheaper than Parent, but assumes !ci.isFace().
+func (ci CellID) immediateParent() CellID {
+ nlsb := CellID(ci.lsb() << 2)
+ return (ci & -nlsb) | nlsb
+}
+
+// isFace returns whether this is a top-level (face) cell.
+func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 }
+
+// lsb returns the least significant bit that is set.
+func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) }
+
+// Children returns the four immediate children of this cell.
+// If ci is a leaf cell, it returns four identical cells that are not the children.
+func (ci CellID) Children() [4]CellID {
+ var ch [4]CellID
+ lsb := CellID(ci.lsb())
+ ch[0] = ci - lsb + lsb>>2
+ lsb >>= 1
+ ch[1] = ch[0] + lsb
+ ch[2] = ch[1] + lsb
+ ch[3] = ch[2] + lsb
+ return ch
+}
+
+func sizeIJ(level int) int {
+ return 1 << uint(maxLevel-level)
+}
+
+// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges.
+// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space.
+// All neighbors are guaranteed to be distinct.
+func (ci CellID) EdgeNeighbors() [4]CellID {
+ level := ci.Level()
+ size := sizeIJ(level)
+ f, i, j, _ := ci.faceIJOrientation()
+ return [4]CellID{
+ cellIDFromFaceIJWrap(f, i, j-size).Parent(level),
+ cellIDFromFaceIJWrap(f, i+size, j).Parent(level),
+ cellIDFromFaceIJWrap(f, i, j+size).Parent(level),
+ cellIDFromFaceIJWrap(f, i-size, j).Parent(level),
+ }
+}
+
+// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level.
+// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of
+// the 8 cube vertices.)
+func (ci CellID) VertexNeighbors(level int) []CellID {
+ halfSize := sizeIJ(level + 1)
+ size := halfSize << 1
+ f, i, j, _ := ci.faceIJOrientation()
+
+ var isame, jsame bool
+ var ioffset, joffset int
+ if i&halfSize != 0 {
+ ioffset = size
+ isame = (i + size) < maxSize
+ } else {
+ ioffset = -size
+ isame = (i - size) >= 0
+ }
+ if j&halfSize != 0 {
+ joffset = size
+ jsame = (j + size) < maxSize
+ } else {
+ joffset = -size
+ jsame = (j - size) >= 0
+ }
+
+ results := []CellID{
+ ci.Parent(level),
+ cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level),
+ cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level),
+ }
+
+ if isame || jsame {
+ results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level))
+ }
+
+ return results
+}
+
+// AllNeighbors returns all neighbors of this cell at the given level. Two
+// cells X and Y are neighbors if their boundaries intersect but their
+// interiors do not. In particular, two cells that intersect at a single
+// point are neighbors. Note that for cells adjacent to a face vertex, the
+// same neighbor may be returned more than once. There could be up to eight
+// neighbors including the diagonal ones that share the vertex.
+//
+// This requires level >= ci.Level().
+func (ci CellID) AllNeighbors(level int) []CellID {
+ var neighbors []CellID
+
+ face, i, j, _ := ci.faceIJOrientation()
+
+ // Find the coordinates of the lower left-hand leaf cell. We need to
+ // normalize (i,j) to a known position within the cell because level
+ // may be larger than this cell's level.
+ size := sizeIJ(ci.Level())
+ i &= -size
+ j &= -size
+
+ nbrSize := sizeIJ(level)
+
+ // We compute the top-bottom, left-right, and diagonal neighbors in one
+ // pass. The loop test is at the end of the loop to avoid 32-bit overflow.
+ for k := -nbrSize; ; k += nbrSize {
+ var sameFace bool
+ if k < 0 {
+ sameFace = (j+k >= 0)
+ } else if k >= size {
+ sameFace = (j+k < maxSize)
+ } else {
+ sameFace = true
+ // Top and bottom neighbors.
+ neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize,
+ j-size >= 0).Parent(level))
+ neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size,
+ j+size < maxSize).Parent(level))
+ }
+
+ // Left, right, and diagonal neighbors.
+ neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k,
+ sameFace && i-size >= 0).Parent(level))
+ neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k,
+ sameFace && i+size < maxSize).Parent(level))
+
+ if k >= size {
+ break
+ }
+ }
+
+ return neighbors
+}
+
+// RangeMin returns the minimum CellID that is contained within this cell.
+func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) }
+
+// RangeMax returns the maximum CellID that is contained within this cell.
+func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) }
+
+// Contains returns true iff the CellID contains oci.
+func (ci CellID) Contains(oci CellID) bool {
+ return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax())
+}
+
+// Intersects returns true iff the CellID intersects oci.
+func (ci CellID) Intersects(oci CellID) bool {
+ return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin())
+}
+
+// String returns the string representation of the cell ID in the form "1/3210".
+func (ci CellID) String() string {
+ if !ci.IsValid() {
+ return "Invalid: " + strconv.FormatInt(int64(ci), 16)
+ }
+ var b bytes.Buffer
+ b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above
+ b.WriteByte('/')
+ for level := 1; level <= ci.Level(); level++ {
+ b.WriteByte("0123"[ci.ChildPosition(level)])
+ }
+ return b.String()
+}
+
+// cellIDFromString returns a CellID from a string in the form "1/3210".
+func cellIDFromString(s string) CellID {
+ level := len(s) - 2
+ if level < 0 || level > maxLevel {
+ return CellID(0)
+ }
+ face := int(s[0] - '0')
+ if face < 0 || face > 5 || s[1] != '/' {
+ return CellID(0)
+ }
+ id := CellIDFromFace(face)
+ for i := 2; i < len(s); i++ {
+ childPos := s[i] - '0'
+ if childPos < 0 || childPos > 3 {
+ return CellID(0)
+ }
+ id = id.Children()[childPos]
+ }
+ return id
+}
+
+// Point returns the center of the s2 cell on the sphere as a Point.
+// The maximum directional error in Point (compared to the exact
+// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length
+// error is 2 * dblEpsilon (the same as Normalize).
+func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} }
+
+// LatLng returns the center of the s2 cell on the sphere as a LatLng.
+func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) }
+
+// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order.
+//
+// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() {
+// ...
+// }
+func (ci CellID) ChildBegin() CellID {
+ ol := ci.lsb()
+ return CellID(uint64(ci) - ol + ol>>2)
+}
+
+// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in
+// Hilbert curve order. The given level must be no smaller than the cell's level.
+// See ChildBegin for example use.
+func (ci CellID) ChildBeginAtLevel(level int) CellID {
+ return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level))
+}
+
+// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order.
+// The returned cell may be invalid.
+func (ci CellID) ChildEnd() CellID {
+ ol := ci.lsb()
+ return CellID(uint64(ci) + ol + ol>>2)
+}
+
+// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper
+// than this cell, in Hilbert curve order.
+// The given level must be no smaller than the cell's level.
+// The returned cell may be invalid.
+func (ci CellID) ChildEndAtLevel(level int) CellID {
+ return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level))
+}
+
+// Next returns the next cell along the Hilbert curve.
+// This is expected to be used with ChildBegin and ChildEnd,
+// or ChildBeginAtLevel and ChildEndAtLevel.
+func (ci CellID) Next() CellID {
+ return CellID(uint64(ci) + ci.lsb()<<1)
+}
+
+// Prev returns the previous cell along the Hilbert curve.
+func (ci CellID) Prev() CellID {
+ return CellID(uint64(ci) - ci.lsb()<<1)
+}
+
+// NextWrap returns the next cell along the Hilbert curve, wrapping from last to
+// first as necessary. This should not be used with ChildBegin and ChildEnd.
+func (ci CellID) NextWrap() CellID {
+ n := ci.Next()
+ if uint64(n) < wrapOffset {
+ return n
+ }
+ return CellID(uint64(n) - wrapOffset)
+}
+
+// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from
+// first to last as necessary. This should not be used with ChildBegin and ChildEnd.
+func (ci CellID) PrevWrap() CellID {
+ p := ci.Prev()
+ if uint64(p) < wrapOffset {
+ return p
+ }
+ return CellID(uint64(p) + wrapOffset)
+}
+
+// AdvanceWrap advances or retreats the indicated number of steps along the
+// Hilbert curve at the current level and returns the new position. The
+// position wraps between the first and last faces as necessary.
+func (ci CellID) AdvanceWrap(steps int64) CellID {
+ if steps == 0 {
+ return ci
+ }
+
+ // We clamp the number of steps if necessary to ensure that we do not
+ // advance past the End() or before the Begin() of this level.
+ shift := uint(2*(maxLevel-ci.Level()) + 1)
+ if steps < 0 {
+ if min := -int64(uint64(ci) >> shift); steps < min {
+ wrap := int64(wrapOffset >> shift)
+ steps %= wrap
+ if steps < min {
+ steps += wrap
+ }
+ }
+ } else {
+ // Unlike Advance(), we don't want to return End(level).
+ if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max {
+ wrap := int64(wrapOffset >> shift)
+ steps %= wrap
+ if steps > max {
+ steps -= wrap
+ }
+ }
+ }
+
+ // If steps is negative, then shifting it left has undefined behavior.
+ // Cast to uint64 for a 2's complement answer.
+ return CellID(uint64(ci) + (uint64(steps) << shift))
+}
+
+// Encode encodes the CellID.
+func (ci CellID) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ ci.encode(e)
+ return e.err
+}
+
+func (ci CellID) encode(e *encoder) {
+ e.writeUint64(uint64(ci))
+}
+
+// Decode decodes the CellID.
+func (ci *CellID) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ ci.decode(d)
+ return d.err
+}
+
+func (ci *CellID) decode(d *decoder) {
+ *ci = CellID(d.readUint64())
+}
+
+// TODO: the methods below are not exported yet. Settle on the entire API design
+// before doing this. Do we want to mirror the C++ one as closely as possible?
+
+// distanceFromBegin returns the number of steps that this cell is from the first
+// node in the S2 hierarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
+// The return value is always non-negative.
+func (ci CellID) distanceFromBegin() int64 {
+ return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
+}
+
+// rawPoint returns an unnormalized r3 vector from the origin through the center
+// of the s2 cell on the sphere.
+func (ci CellID) rawPoint() r3.Vector {
+ face, si, ti := ci.faceSiTi()
+ return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti)))
+}
+
+// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
+func (ci CellID) faceSiTi() (face int, si, ti uint32) {
+ face, i, j, _ := ci.faceIJOrientation()
+ delta := 0
+ if ci.IsLeaf() {
+ delta = 1
+ } else {
+ if (i^(int(ci)>>2))&1 != 0 {
+ delta = 2
+ }
+ }
+ return face, uint32(2*i + delta), uint32(2*j + delta)
+}
+
+// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
+func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
+ f = ci.Face()
+ orientation = f & swapMask
+ nbits := maxLevel - 7*lookupBits // first iteration
+
+ // Each iteration maps 8 bits of the Hilbert curve position into
+ // 4 bits of "i" and "j". The lookup table transforms a key of the
+ // form "ppppppppoo" to a value of the form "iiiijjjjoo", where the
+ // letters [ijpo] represents bits of "i", "j", the Hilbert curve
+ // position, and the Hilbert curve orientation respectively.
+ //
+ // On the first iteration we need to be careful to clear out the bits
+ // representing the cube face.
+ for k := 7; k >= 0; k-- {
+ orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2
+ orientation = lookupIJ[orientation]
+ i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
+ j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
+ orientation &= (swapMask | invertMask)
+ nbits = lookupBits // following iterations
+ }
+
+ // The position of a non-leaf cell at level "n" consists of a prefix of
+ // 2*n bits that identifies the cell, followed by a suffix of
+ // 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is
+ // just "1" and has no effect. Otherwise, it consists of "10", followed
+ // by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has
+ // no effect, while each occurrence of "00" has the effect of reversing
+ // the swapMask bit.
+ if ci.lsb()&0x1111111111111110 != 0 {
+ orientation ^= swapMask
+ }
+
+ return
+}
+
+// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates.
+func cellIDFromFaceIJ(f, i, j int) CellID {
+ // Note that this value gets shifted one bit to the left at the end
+ // of the function.
+ n := uint64(f) << (posBits - 1)
+ // Alternating faces have opposite Hilbert curve orientations; this
+ // is necessary in order for all faces to have a right-handed
+ // coordinate system.
+ bits := f & swapMask
+ // Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert
+ // curve position. The lookup table transforms a 10-bit key of the form
+ // "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the
+ // letters [ijpo] denote bits of "i", "j", Hilbert curve position, and
+ // Hilbert curve orientation respectively.
+ for k := 7; k >= 0; k-- {
+ mask := (1 << lookupBits) - 1
+ bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2)
+ bits += ((j >> uint(k*lookupBits)) & mask) << 2
+ bits = lookupPos[bits]
+ n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits)
+ bits &= (swapMask | invertMask)
+ }
+ return CellID(n*2 + 1)
+}
+
+func cellIDFromFaceIJWrap(f, i, j int) CellID {
+ // Convert i and j to the coordinates of a leaf cell just beyond the
+ // boundary of this face. This prevents 32-bit overflow in the case
+ // of finding the neighbors of a face cell.
+ i = clampInt(i, -1, maxSize)
+ j = clampInt(j, -1, maxSize)
+
+ // We want to wrap these coordinates onto the appropriate adjacent face.
+ // The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
+ // (which yields a point outside the normal face boundary), and then call
+ // xyzToFaceUV to project back onto the correct face.
+ //
+ // The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using
+ // the linear projection (u=2*s-1 and v=2*t-1). (The code further below
+ // converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1).
+ // Any projection would work here, so we use the simplest.) We also clamp
+ // the (u,v) coordinates so that the point is barely outside the
+ // [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step
+ // (which divides by the new z coordinate) might change the other
+ // coordinates enough so that we end up in the wrong leaf cell.
+ const scale = 1.0 / maxSize
+ limit := math.Nextafter(1, 2)
+ u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize)))
+ v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize)))
+
+ // Find the leaf cell coordinates on the adjacent face, and convert
+ // them to a cell id at the appropriate level.
+ f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v))
+ return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1)))
+}
+
+func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
+ if sameFace {
+ return cellIDFromFaceIJ(f, i, j)
+ }
+ return cellIDFromFaceIJWrap(f, i, j)
+}
+
+// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
+// s- or t-value contained by that cell. The argument must be in the range
+// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
+// cell indices.
+func ijToSTMin(i int) float64 {
+ return float64(i) / float64(maxSize)
+}
+
+// stToIJ converts value in ST coordinates to a value in IJ coordinates.
+func stToIJ(s float64) int {
+ return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1)
+}
+
+// cellIDFromPoint returns a leaf cell containing point p. Usually there is
+// exactly one such cell, but for points along the edge of a cell, any
+// adjacent cell may be (deterministically) chosen. This is because
+// s2.CellIDs are considered to be closed sets. The returned cell will
+// always contain the given point, i.e.
+//
+// CellFromPoint(p).ContainsPoint(p)
+//
+// is always true.
+func cellIDFromPoint(p Point) CellID {
+ f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z})
+ i := stToIJ(uvToST(u))
+ j := stToIJ(uvToST(v))
+ return cellIDFromFaceIJ(f, i, j)
+}
+
+// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given
+// level containing the leaf cell with the given (i,j)-coordinates.
+func ijLevelToBoundUV(i, j, level int) r2.Rect {
+ cellSize := sizeIJ(level)
+ xLo := i & -cellSize
+ yLo := j & -cellSize
+
+ return r2.Rect{
+ X: r1.Interval{
+ Lo: stToUV(ijToSTMin(xLo)),
+ Hi: stToUV(ijToSTMin(xLo + cellSize)),
+ },
+ Y: r1.Interval{
+ Lo: stToUV(ijToSTMin(yLo)),
+ Hi: stToUV(ijToSTMin(yLo + cellSize)),
+ },
+ }
+}
+
+// Constants related to the bit mangling in the Cell ID.
+const (
+ lookupBits = 4
+ swapMask = 0x01
+ invertMask = 0x02
+)
+
+// The following lookup tables are used to convert efficiently between an
+// (i,j) cell index and the corresponding position along the Hilbert curve.
+//
+// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the
+// orientation of the current cell into 8 bits representing the order in which
+// that subcell is visited by the Hilbert curve, plus 2 bits indicating the
+// new orientation of the Hilbert curve within that subcell. (Cell
+// orientations are represented as combination of swapMask and invertMask.)
+//
+// lookupIJ is an inverted table used for mapping in the opposite
+// direction.
+//
+// We also experimented with looking up 16 bits at a time (14 bits of position
+// plus 2 of orientation) but found that smaller lookup tables gave better
+// performance. (2KB fits easily in the primary cache.)
+var (
+ ijToPos = [4][4]int{
+ {0, 1, 3, 2}, // canonical order
+ {0, 3, 1, 2}, // axes swapped
+ {2, 3, 1, 0}, // bits inverted
+ {2, 1, 3, 0}, // swapped & inverted
+ }
+ posToIJ = [4][4]int{
+ {0, 1, 3, 2}, // canonical order: (0,0), (0,1), (1,1), (1,0)
+ {0, 2, 3, 1}, // axes swapped: (0,0), (1,0), (1,1), (0,1)
+ {3, 2, 0, 1}, // bits inverted: (1,1), (1,0), (0,0), (0,1)
+ {3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0)
+ }
+ posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask}
+ lookupIJ [1 << (2*lookupBits + 2)]int
+ lookupPos [1 << (2*lookupBits + 2)]int
+)
+
+func init() {
+ initLookupCell(0, 0, 0, 0, 0, 0)
+ initLookupCell(0, 0, 0, swapMask, 0, swapMask)
+ initLookupCell(0, 0, 0, invertMask, 0, invertMask)
+ initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask)
+}
+
+// initLookupCell initializes the lookupIJ table at init time.
+func initLookupCell(level, i, j, origOrientation, pos, orientation int) {
+ if level == lookupBits {
+ ij := (i << lookupBits) + j
+ lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation
+ lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation
+ return
+ }
+
+ level++
+ i <<= 1
+ j <<= 1
+ pos <<= 2
+ r := posToIJ[orientation]
+ initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0])
+ initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1])
+ initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2])
+ initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3])
+}
+
+// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs.
+func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) {
+ bits := uint64(ci ^ other)
+ if bits < ci.lsb() {
+ bits = ci.lsb()
+ }
+ if bits < other.lsb() {
+ bits = other.lsb()
+ }
+
+ msbPos := findMSBSetNonZero64(bits)
+ if msbPos > 60 {
+ return 0, false
+ }
+ return (60 - msbPos) >> 1, true
+}
+
+// Advance advances or retreats the indicated number of steps along the
+// Hilbert curve at the current level, and returns the new position. The
+// position is never advanced past End() or before Begin().
+func (ci CellID) Advance(steps int64) CellID {
+ if steps == 0 {
+ return ci
+ }
+
+ // We clamp the number of steps if necessary to ensure that we do not
+ // advance past the End() or before the Begin() of this level. Note that
+ // minSteps and maxSteps always fit in a signed 64-bit integer.
+ stepShift := uint(2*(maxLevel-ci.Level()) + 1)
+ if steps < 0 {
+ minSteps := -int64(uint64(ci) >> stepShift)
+ if steps < minSteps {
+ steps = minSteps
+ }
+ } else {
+ maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift)
+ if steps > maxSteps {
+ steps = maxSteps
+ }
+ }
+ return ci + CellID(steps)<<stepShift
+}
+
+// centerST return the center of the CellID in (s,t)-space.
+func (ci CellID) centerST() r2.Point {
+ _, si, ti := ci.faceSiTi()
+ return r2.Point{siTiToST(si), siTiToST(ti)}
+}
+
+// sizeST returns the edge length of this CellID in (s,t)-space at the given level.
+func (ci CellID) sizeST(level int) float64 {
+ return ijToSTMin(sizeIJ(level))
+}
+
+// boundST returns the bound of this CellID in (s,t)-space.
+func (ci CellID) boundST() r2.Rect {
+ s := ci.sizeST(ci.Level())
+ return r2.RectFromCenterSize(ci.centerST(), r2.Point{s, s})
+}
+
+// centerUV returns the center of this CellID in (u,v)-space. Note that
+// the center of the cell is defined as the point at which it is recursively
+// subdivided into four children; in general, it is not at the midpoint of
+// the (u,v) rectangle covered by the cell.
+func (ci CellID) centerUV() r2.Point {
+ _, si, ti := ci.faceSiTi()
+ return r2.Point{stToUV(siTiToST(si)), stToUV(siTiToST(ti))}
+}
+
+// boundUV returns the bound of this CellID in (u,v)-space.
+func (ci CellID) boundUV() r2.Rect {
+ _, i, j, _ := ci.faceIJOrientation()
+ return ijLevelToBoundUV(i, j, ci.Level())
+}
+
+// expandEndpoint returns a new u-coordinate u' such that the distance from the
+// line u=u' to the given edge (u,v0)-(u,v1) is exactly the given distance
+// (which is specified as the sine of the angle corresponding to the distance).
+func expandEndpoint(u, maxV, sinDist float64) float64 {
+ // This is based on solving a spherical right triangle, similar to the
+ // calculation in Cap.RectBound.
+ // Given an edge of the form (u,v0)-(u,v1), let maxV = max(abs(v0), abs(v1)).
+ sinUShift := sinDist * math.Sqrt((1+u*u+maxV*maxV)/(1+u*u))
+ cosUShift := math.Sqrt(1 - sinUShift*sinUShift)
+ // The following is an expansion of tan(atan(u) + asin(sinUShift)).
+ return (cosUShift*u + sinUShift) / (cosUShift - sinUShift*u)
+}
+
+// expandedByDistanceUV returns a rectangle expanded in (u,v)-space so that it
+// contains all points within the given distance of the boundary, and return the
+// smallest such rectangle. If the distance is negative, then instead shrink this
+// rectangle so that it excludes all points within the given absolute distance
+// of the boundary.
+//
+// Distances are measured *on the sphere*, not in (u,v)-space. For example,
+// you can use this method to expand the (u,v)-bound of an CellID so that
+// it contains all points within 5km of the original cell. You can then
+// test whether a point lies within the expanded bounds like this:
+//
+// if u, v, ok := faceXYZtoUV(face, point); ok && bound.ContainsPoint(r2.Point{u,v}) { ... }
+//
+// Limitations:
+//
+// - Because the rectangle is drawn on one of the six cube-face planes
+// (i.e., {x,y,z} = +/-1), it can cover at most one hemisphere. This
+// limits the maximum amount that a rectangle can be expanded. For
+// example, CellID bounds can be expanded safely by at most 45 degrees
+// (about 5000 km on the Earth's surface).
+//
+// - The implementation is not exact for negative distances. The resulting
+// rectangle will exclude all points within the given distance of the
+// boundary but may be slightly smaller than necessary.
+func expandedByDistanceUV(uv r2.Rect, distance s1.Angle) r2.Rect {
+ // Expand each of the four sides of the rectangle just enough to include all
+ // points within the given distance of that side. (The rectangle may be
+ // expanded by a different amount in (u,v)-space on each side.)
+ maxU := math.Max(math.Abs(uv.X.Lo), math.Abs(uv.X.Hi))
+ maxV := math.Max(math.Abs(uv.Y.Lo), math.Abs(uv.Y.Hi))
+ sinDist := math.Sin(float64(distance))
+ return r2.Rect{
+ X: r1.Interval{expandEndpoint(uv.X.Lo, maxV, -sinDist),
+ expandEndpoint(uv.X.Hi, maxV, sinDist)},
+ Y: r1.Interval{expandEndpoint(uv.Y.Lo, maxU, -sinDist),
+ expandEndpoint(uv.Y.Hi, maxU, sinDist)}}
+}
+
+// MaxTile returns the largest cell with the same RangeMin such that
+// RangeMax < limit.RangeMin. It returns limit if no such cell exists.
+// This method can be used to generate a small set of CellIDs that covers
+// a given range (a tiling). This example shows how to generate a tiling
+// for a semi-open range of leaf cells [start, limit):
+//
+// for id := start.MaxTile(limit); id != limit; id = id.Next().MaxTile(limit)) { ... }
+//
+// Note that in general the cells in the tiling will be of different sizes;
+// they gradually get larger (near the middle of the range) and then
+// gradually get smaller as limit is approached.
+func (ci CellID) MaxTile(limit CellID) CellID {
+ start := ci.RangeMin()
+ if start >= limit.RangeMin() {
+ return limit
+ }
+
+ if ci.RangeMax() >= limit {
+ // The cell is too large, shrink it. Note that when generating coverings
+ // of CellID ranges, this loop usually executes only once. Also because
+ // ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the
+ // time we reach a leaf cell.
+ for {
+ ci = ci.Children()[0]
+ if ci.RangeMax() < limit {
+ break
+ }
+ }
+ return ci
+ }
+
+ // The cell may be too small. Grow it if necessary. Note that generally
+ // this loop only iterates once.
+ for !ci.isFace() {
+ parent := ci.immediateParent()
+ if parent.RangeMin() != start || parent.RangeMax() >= limit {
+ break
+ }
+ ci = parent
+ }
+ return ci
+}
+
+// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell.
+// Note that although (si,ti) coordinates span the range [0,2**31] in general,
+// the cell center coordinates are always in the range [1,2**31-1] and
+// therefore can be represented using a signed 32-bit integer.
+func (ci CellID) centerFaceSiTi() (face, si, ti int) {
+ // First we compute the discrete (i,j) coordinates of a leaf cell contained
+ // within the given cell. Given that cells are represented by the Hilbert
+ // curve position corresponding at their center, it turns out that the cell
+ // returned by faceIJOrientation is always one of two leaf cells closest
+ // to the center of the cell (unless the given cell is a leaf cell itself,
+ // in which case there is only one possibility).
+ //
+ // Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin,
+ // jmin) be the coordinates of its lower left-hand corner, the leaf cell
+ // returned by faceIJOrientation is either (imin + s/2, jmin + s/2)
+ // (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want.
+ // We can distinguish these two cases by looking at the low bit of i or
+ // j. In the second case the low bit is one, unless s == 2 (i.e. the
+ // level just above leaf cells) in which case the low bit is zero.
+ //
+ // In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true
+ // if we are in the second case described above.
+ face, i, j, _ := ci.faceIJOrientation()
+ delta := 0
+ if ci.IsLeaf() {
+ delta = 1
+ } else if (int64(i)^(int64(ci)>>2))&1 == 1 {
+ delta = 2
+ }
+
+ // Note that (2 * {i,j} + delta) will never overflow a 32-bit integer.
+ return face, 2*i + delta, 2*j + delta
+}
diff --git a/vendor/github.com/golang/geo/s2/cellunion.go b/vendor/github.com/golang/geo/s2/cellunion.go
new file mode 100644
index 000000000..0654de973
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cellunion.go
@@ -0,0 +1,590 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/golang/geo/s1"
+)
+
+// A CellUnion is a collection of CellIDs.
+//
+// It is normalized if it is sorted, and does not contain redundancy.
+// Specifically, it may not contain the same CellID twice, nor a CellID that
+// is contained by another, nor the four sibling CellIDs that are children of
+// a single higher level CellID.
+//
+// CellUnions are not required to be normalized, but certain operations will
+// return different results if they are not (e.g. Contains).
+type CellUnion []CellID
+
+// CellUnionFromRange creates a CellUnion that covers the half-open range
+// of leaf cells [begin, end). If begin == end the resulting union is empty.
+// This requires that begin and end are both leaves, and begin <= end.
+// To create a closed-ended range, pass in end.Next().
+func CellUnionFromRange(begin, end CellID) CellUnion {
+ // We repeatedly add the largest cell we can.
+ var cu CellUnion
+ for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
+ cu = append(cu, id)
+ }
+ // The output is normalized because the cells are added in order by the iteration.
+ return cu
+}
+
+// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions.
+func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion {
+ var cu CellUnion
+ for _, cellUnion := range cellUnions {
+ cu = append(cu, cellUnion...)
+ }
+ cu.Normalize()
+ return cu
+}
+
+// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions.
+func CellUnionFromIntersection(x, y CellUnion) CellUnion {
+ var cu CellUnion
+
+ // This is a fairly efficient calculation that uses binary search to skip
+ // over sections of both input vectors. It takes constant time if all the
+ // cells of x come before or after all the cells of y in CellID order.
+ var i, j int
+ for i < len(x) && j < len(y) {
+ iMin := x[i].RangeMin()
+ jMin := y[j].RangeMin()
+ if iMin > jMin {
+ // Either j.Contains(i) or the two cells are disjoint.
+ if x[i] <= y[j].RangeMax() {
+ cu = append(cu, x[i])
+ i++
+ } else {
+ // Advance j to the first cell possibly contained by x[i].
+ j = y.lowerBound(j+1, len(y), iMin)
+ // The previous cell y[j-1] may now contain x[i].
+ if x[i] <= y[j-1].RangeMax() {
+ j--
+ }
+ }
+ } else if jMin > iMin {
+ // Identical to the code above with i and j reversed.
+ if y[j] <= x[i].RangeMax() {
+ cu = append(cu, y[j])
+ j++
+ } else {
+ i = x.lowerBound(i+1, len(x), jMin)
+ if y[j] <= x[i-1].RangeMax() {
+ i--
+ }
+ }
+ } else {
+ // i and j have the same RangeMin(), so one contains the other.
+ if x[i] < y[j] {
+ cu = append(cu, x[i])
+ i++
+ } else {
+ cu = append(cu, y[j])
+ j++
+ }
+ }
+ }
+
+ // The output is generated in sorted order.
+ cu.Normalize()
+ return cu
+}
+
+// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection
+// of a CellUnion with the given CellID. This can be useful for splitting a
+// CellUnion into chunks.
+func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion {
+ var cu CellUnion
+ if x.ContainsCellID(id) {
+ cu = append(cu, id)
+ cu.Normalize()
+ return cu
+ }
+
+ idmax := id.RangeMax()
+ for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ {
+ cu = append(cu, x[i])
+ }
+
+ cu.Normalize()
+ return cu
+}
+
+// CellUnionFromDifference creates a CellUnion from the difference (x - y)
+// of the given CellUnions.
+func CellUnionFromDifference(x, y CellUnion) CellUnion {
+ // TODO(roberts): This is approximately O(N*log(N)), but could probably
+ // use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient.
+
+ var cu CellUnion
+ for _, xid := range x {
+ cu.cellUnionDifferenceInternal(xid, &y)
+ }
+
+ // The output is generated in sorted order, and there should not be any
+ // cells that can be merged (provided that both inputs were normalized).
+ return cu
+}
+
+// The C++ constructor methods FromNormalized and FromVerbatim are not necessary
+// since they don't call Normalize, and just set the CellIDs directly on the object,
+// so straight casting is sufficient in Go to replicate this behavior.
+
+// IsValid reports whether the cell union is valid, meaning that the CellIDs are
+// valid, non-overlapping, and sorted in increasing order.
+func (cu *CellUnion) IsValid() bool {
+ for i, cid := range *cu {
+ if !cid.IsValid() {
+ return false
+ }
+ if i == 0 {
+ continue
+ }
+ if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
+ return false
+ }
+ }
+ return true
+}
+
+// IsNormalized reports whether the cell union is normalized, meaning that it is
+// satisfies IsValid and that no four cells have a common parent.
+// Certain operations such as Contains will return a different
+// result if the cell union is not normalized.
+func (cu *CellUnion) IsNormalized() bool {
+ for i, cid := range *cu {
+ if !cid.IsValid() {
+ return false
+ }
+ if i == 0 {
+ continue
+ }
+ if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
+ return false
+ }
+ if i < 3 {
+ continue
+ }
+ if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) {
+ return false
+ }
+ }
+ return true
+}
+
+// Normalize normalizes the CellUnion.
+func (cu *CellUnion) Normalize() {
+ sortCellIDs(*cu)
+
+ output := make([]CellID, 0, len(*cu)) // the list of accepted cells
+ // Loop invariant: output is a sorted list of cells with no redundancy.
+ for _, ci := range *cu {
+ // The first two passes here either ignore this new candidate,
+ // or remove previously accepted cells that are covered by this candidate.
+
+ // Ignore this cell if it is contained by the previous one.
+ // We only need to check the last accepted cell. The ordering of the
+ // cells implies containment (but not the converse), and output has no redundancy,
+ // so if this candidate is not contained by the last accepted cell
+ // then it cannot be contained by any previously accepted cell.
+ if len(output) > 0 && output[len(output)-1].Contains(ci) {
+ continue
+ }
+
+ // Discard any previously accepted cells contained by this one.
+ // This could be any contiguous trailing subsequence, but it can't be
+ // a discontiguous subsequence because of the containment property of
+ // sorted S2 cells mentioned above.
+ j := len(output) - 1 // last index to keep
+ for j >= 0 {
+ if !ci.Contains(output[j]) {
+ break
+ }
+ j--
+ }
+ output = output[:j+1]
+
+ // See if the last three cells plus this one can be collapsed.
+ // We loop because collapsing three accepted cells and adding a higher level cell
+ // could cascade into previously accepted cells.
+ for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) {
+ // Replace four children by their parent cell.
+ output = output[:len(output)-3]
+ ci = ci.immediateParent() // checked !ci.isFace above
+ }
+ output = append(output, ci)
+ }
+ *cu = output
+}
+
+// IntersectsCellID reports whether this CellUnion intersects the given cell ID.
+func (cu *CellUnion) IntersectsCellID(id CellID) bool {
+ // Find index of array item that occurs directly after our probe cell:
+ i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
+
+ if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() {
+ return true
+ }
+ return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
+}
+
+// ContainsCellID reports whether the CellUnion contains the given cell ID.
+// Containment is defined with respect to regions, e.g. a cell contains its 4 children.
+//
+// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups
+// of 4 child cells are *not* considered to contain their parent cell. To get
+// this behavior you must use one of the call Normalize() explicitly.
+func (cu *CellUnion) ContainsCellID(id CellID) bool {
+ // Find index of array item that occurs directly after our probe cell:
+ i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
+
+ if i != len(*cu) && (*cu)[i].RangeMin() <= id {
+ return true
+ }
+ return i != 0 && (*cu)[i-1].RangeMax() >= id
+}
+
+// Denormalize replaces this CellUnion with an expanded version of the
+// CellUnion where any cell whose level is less than minLevel or where
+// (level - minLevel) is not a multiple of levelMod is replaced by its
+// children, until either both of these conditions are satisfied or the
+// maximum level is reached.
+func (cu *CellUnion) Denormalize(minLevel, levelMod int) {
+ var denorm CellUnion
+ for _, id := range *cu {
+ level := id.Level()
+ newLevel := level
+ if newLevel < minLevel {
+ newLevel = minLevel
+ }
+ if levelMod > 1 {
+ newLevel += (maxLevel - (newLevel - minLevel)) % levelMod
+ if newLevel > maxLevel {
+ newLevel = maxLevel
+ }
+ }
+ if newLevel == level {
+ denorm = append(denorm, id)
+ } else {
+ end := id.ChildEndAtLevel(newLevel)
+ for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() {
+ denorm = append(denorm, ci)
+ }
+ }
+ }
+ *cu = denorm
+}
+
+// RectBound returns a Rect that bounds this entity.
+func (cu *CellUnion) RectBound() Rect {
+ bound := EmptyRect()
+ for _, c := range *cu {
+ bound = bound.Union(CellFromCellID(c).RectBound())
+ }
+ return bound
+}
+
+// CapBound returns a Cap that bounds this entity.
+func (cu *CellUnion) CapBound() Cap {
+ if len(*cu) == 0 {
+ return EmptyCap()
+ }
+
+ // Compute the approximate centroid of the region. This won't produce the
+ // bounding cap of minimal area, but it should be close enough.
+ var centroid Point
+
+ for _, ci := range *cu {
+ area := AvgAreaMetric.Value(ci.Level())
+ centroid = Point{centroid.Add(ci.Point().Mul(area))}
+ }
+
+ if zero := (Point{}); centroid == zero {
+ centroid = PointFromCoords(1, 0, 0)
+ } else {
+ centroid = Point{centroid.Normalize()}
+ }
+
+ // Use the centroid as the cap axis, and expand the cap angle so that it
+ // contains the bounding caps of all the individual cells. Note that it is
+ // *not* sufficient to just bound all the cell vertices because the bounding
+ // cap may be concave (i.e. cover more than one hemisphere).
+ c := CapFromPoint(centroid)
+ for _, ci := range *cu {
+ c = c.AddCap(CellFromCellID(ci).CapBound())
+ }
+
+ return c
+}
+
+// ContainsCell reports whether this cell union contains the given cell.
+func (cu *CellUnion) ContainsCell(c Cell) bool {
+ return cu.ContainsCellID(c.id)
+}
+
+// IntersectsCell reports whether this cell union intersects the given cell.
+func (cu *CellUnion) IntersectsCell(c Cell) bool {
+ return cu.IntersectsCellID(c.id)
+}
+
+// ContainsPoint reports whether this cell union contains the given point.
+func (cu *CellUnion) ContainsPoint(p Point) bool {
+ return cu.ContainsCell(CellFromPoint(p))
+}
+
+// CellUnionBound computes a covering of the CellUnion.
+func (cu *CellUnion) CellUnionBound() []CellID {
+ return cu.CapBound().CellUnionBound()
+}
+
+// LeafCellsCovered reports the number of leaf cells covered by this cell union.
+// This will be no more than 6*2^60 for the whole sphere.
+func (cu *CellUnion) LeafCellsCovered() int64 {
+ var numLeaves int64
+ for _, c := range *cu {
+ numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1)
+ }
+ return numLeaves
+}
+
+// Returns true if the given four cells have a common parent.
+// This requires that the four CellIDs are distinct.
+func areSiblings(a, b, c, d CellID) bool {
+ // A necessary (but not sufficient) condition is that the XOR of the
+ // four cell IDs must be zero. This is also very fast to test.
+ if (a ^ b ^ c) != d {
+ return false
+ }
+
+ // Now we do a slightly more expensive but exact test. First, compute a
+ // mask that blocks out the two bits that encode the child position of
+ // "id" with respect to its parent, then check that the other three
+ // children all agree with "mask".
+ mask := d.lsb() << 1
+ mask = ^(mask + (mask << 1))
+ idMasked := (uint64(d) & mask)
+ return ((uint64(a)&mask) == idMasked &&
+ (uint64(b)&mask) == idMasked &&
+ (uint64(c)&mask) == idMasked &&
+ !d.isFace())
+}
+
+// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion.
+func (cu *CellUnion) Contains(o CellUnion) bool {
+ // TODO(roberts): Investigate alternatives such as divide-and-conquer
+ // or alternating-skip-search that may be significantly faster in both
+ // the average and worst case. This applies to Intersects as well.
+ for _, id := range o {
+ if !cu.ContainsCellID(id) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
+func (cu *CellUnion) Intersects(o CellUnion) bool {
+ for _, c := range *cu {
+ if o.IntersectsCellID(c) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// lowerBound returns the index in this CellUnion to the first element whose value
+// is not considered to go before the given cell id. (i.e., either it is equivalent
+// or comes after the given id.) If there is no match, then end is returned.
+func (cu *CellUnion) lowerBound(begin, end int, id CellID) int {
+ for i := begin; i < end; i++ {
+ if (*cu)[i] >= id {
+ return i
+ }
+ }
+
+ return end
+}
+
+// cellUnionDifferenceInternal adds the difference between the CellID and the union to
+// the result CellUnion. If they intersect but the difference is non-empty, it divides
+// and conquers.
+func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) {
+ if !other.IntersectsCellID(id) {
+ (*cu) = append((*cu), id)
+ return
+ }
+
+ if !other.ContainsCellID(id) {
+ for _, child := range id.Children() {
+ cu.cellUnionDifferenceInternal(child, other)
+ }
+ }
+}
+
+// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel
+// around the unions boundary.
+//
+// For each cell c in the union, we add all cells at level
+// expandLevel that abut c. There are typically eight of those
+// (four edge-abutting and four sharing a vertex). However, if c is
+// finer than expandLevel, we add all cells abutting
+// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself,
+// as an expandLevel cell rarely abuts a smaller cell.
+//
+// Note that the size of the output is exponential in
+// expandLevel. For example, if expandLevel == 20 and the input
+// has a cell at level 10, there will be on the order of 4000
+// adjacent cells in the output. For most applications the
+// ExpandByRadius method below is easier to use.
+func (cu *CellUnion) ExpandAtLevel(level int) {
+ var output CellUnion
+ levelLsb := lsbForLevel(level)
+ for i := len(*cu) - 1; i >= 0; i-- {
+ id := (*cu)[i]
+ if id.lsb() < levelLsb {
+ id = id.Parent(level)
+ // Optimization: skip over any cells contained by this one. This is
+ // especially important when very small regions are being expanded.
+ for i > 0 && id.Contains((*cu)[i-1]) {
+ i--
+ }
+ }
+ output = append(output, id)
+ output = append(output, id.AllNeighbors(level)...)
+ }
+ sortCellIDs(output)
+
+ *cu = output
+ cu.Normalize()
+}
+
+// ExpandByRadius expands this CellUnion such that it contains all points whose
+// distance to the CellUnion is at most minRadius, but do not use cells that
+// are more than maxLevelDiff levels higher than the largest cell in the input.
+// The second parameter controls the tradeoff between accuracy and output size
+// when a large region is being expanded by a small amount (e.g. expanding Canada
+// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded
+// by approximately 1/16 the width of its largest cell. Note that in the worst case,
+// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times
+// larger than the number of cells in the input.
+func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) {
+ minLevel := maxLevel
+ for _, cid := range *cu {
+ minLevel = minInt(minLevel, cid.Level())
+ }
+
+ // Find the maximum level such that all cells are at least "minRadius" wide.
+ radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians())
+ if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) {
+ // The requested expansion is greater than the width of a face cell.
+ // The easiest way to handle this is to expand twice.
+ cu.ExpandAtLevel(0)
+ }
+ cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel))
+}
+
+// Equal reports whether the two CellUnions are equal.
+func (cu CellUnion) Equal(o CellUnion) bool {
+ if len(cu) != len(o) {
+ return false
+ }
+ for i := 0; i < len(cu); i++ {
+ if cu[i] != o[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// AverageArea returns the average area of this CellUnion.
+// This is accurate to within a factor of 1.7.
+func (cu *CellUnion) AverageArea() float64 {
+ return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered())
+}
+
+// ApproxArea returns the approximate area of this CellUnion. This method is accurate
+// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
+// at level 5 or higher within the union.
+func (cu *CellUnion) ApproxArea() float64 {
+ var area float64
+ for _, id := range *cu {
+ area += CellFromCellID(id).ApproxArea()
+ }
+ return area
+}
+
+// ExactArea returns the area of this CellUnion as accurately as possible.
+func (cu *CellUnion) ExactArea() float64 {
+ var area float64
+ for _, id := range *cu {
+ area += CellFromCellID(id).ExactArea()
+ }
+ return area
+}
+
+// Encode encodes the CellUnion.
+func (cu *CellUnion) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ cu.encode(e)
+ return e.err
+}
+
+func (cu *CellUnion) encode(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeInt64(int64(len(*cu)))
+ for _, ci := range *cu {
+ ci.encode(e)
+ }
+}
+
+// Decode decodes the CellUnion.
+func (cu *CellUnion) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ cu.decode(d)
+ return d.err
+}
+
+func (cu *CellUnion) decode(d *decoder) {
+ version := d.readInt8()
+ if d.err != nil {
+ return
+ }
+ if version != encodingVersion {
+ d.err = fmt.Errorf("only version %d is supported", encodingVersion)
+ return
+ }
+ n := d.readInt64()
+ if d.err != nil {
+ return
+ }
+ const maxCells = 1000000
+ if n > maxCells {
+ d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells)
+ return
+ }
+ *cu = make([]CellID, n)
+ for i := range *cu {
+ (*cu)[i].decode(d)
+ }
+}
diff --git a/vendor/github.com/golang/geo/s2/centroids.go b/vendor/github.com/golang/geo/s2/centroids.go
new file mode 100644
index 000000000..e8a91c442
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/centroids.go
@@ -0,0 +1,133 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r3"
+)
+
+// There are several notions of the "centroid" of a triangle. First, there
+// is the planar centroid, which is simply the centroid of the ordinary
+// (non-spherical) triangle defined by the three vertices. Second, there is
+// the surface centroid, which is defined as the intersection of the three
+// medians of the spherical triangle. It is possible to show that this
+// point is simply the planar centroid projected to the surface of the
+// sphere. Finally, there is the true centroid (mass centroid), which is
+// defined as the surface integral over the spherical triangle of (x,y,z)
+// divided by the triangle area. This is the point that the triangle would
+// rotate around if it was spinning in empty space.
+//
+// The best centroid for most purposes is the true centroid. Unlike the
+// planar and surface centroids, the true centroid behaves linearly as
+// regions are added or subtracted. That is, if you split a triangle into
+// pieces and compute the average of their centroids (weighted by triangle
+// area), the result equals the centroid of the original triangle. This is
+// not true of the other centroids.
+//
+// Also note that the surface centroid may be nowhere near the intuitive
+// "center" of a spherical triangle. For example, consider the triangle
+// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere).
+// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is
+// within a distance of 2*eps of the vertex B. Note that the median from A
+// (the segment connecting A to the midpoint of BC) passes through S, since
+// this is the shortest path connecting the two endpoints. On the other
+// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto
+// the surface is a much more reasonable interpretation of the "center" of
+// this triangle.
+//
+
+// TrueCentroid returns the true centroid of the spherical triangle ABC
+// multiplied by the signed area of spherical triangle ABC. The reasons for
+// multiplying by the signed area are (1) this is the quantity that needs to be
+// summed to compute the centroid of a union or difference of triangles, and
+// (2) it's actually easier to calculate this way. All points must have unit length.
+//
+// Note that the result of this function is defined to be Point(0, 0, 0) if
+// the triangle is degenerate.
+func TrueCentroid(a, b, c Point) Point {
+ // Use Distance to get accurate results for small triangles.
+ ra := float64(1)
+ if sa := float64(b.Distance(c)); sa != 0 {
+ ra = sa / math.Sin(sa)
+ }
+ rb := float64(1)
+ if sb := float64(c.Distance(a)); sb != 0 {
+ rb = sb / math.Sin(sb)
+ }
+ rc := float64(1)
+ if sc := float64(a.Distance(b)); sc != 0 {
+ rc = sc / math.Sin(sc)
+ }
+
+ // Now compute a point M such that:
+ //
+ // [Ax Ay Az] [Mx] [ra]
+ // [Bx By Bz] [My] = 0.5 * det(A,B,C) * [rb]
+ // [Cx Cy Cz] [Mz] [rc]
+ //
+ // To improve the numerical stability we subtract the first row (A) from the
+ // other two rows; this reduces the cancellation error when A, B, and C are
+ // very close together. Then we solve it using Cramer's rule.
+ //
+ // The result is the true centroid of the triangle multiplied by the
+ // triangle's area.
+ //
+ // This code still isn't as numerically stable as it could be.
+ // The biggest potential improvement is to compute B-A and C-A more
+ // accurately so that (B-A)x(C-A) is always inside triangle ABC.
+ x := r3.Vector{a.X, b.X - a.X, c.X - a.X}
+ y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y}
+ z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z}
+ r := r3.Vector{ra, rb - ra, rc - ra}
+
+ return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)}
+}
+
+// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB
+// multiplied by the length of the edge AB. As with triangles, the true centroid
+// of a collection of line segments may be computed simply by summing the result
+// of this method for each segment.
+//
+// Note that the planar centroid of a line segment is simply 0.5 * (a + b),
+// while the surface centroid is (a + b).Normalize(). However neither of
+// these values is appropriate for computing the centroid of a collection of
+// edges (such as a polyline).
+//
+// Also note that the result of this function is defined to be Point(0, 0, 0)
+// if the edge is degenerate.
+func EdgeTrueCentroid(a, b Point) Point {
+ // The centroid (multiplied by length) is a vector toward the midpoint
+ // of the edge, whose length is twice the sine of half the angle between
+ // the two vertices. Defining theta to be this angle, we have:
+ vDiff := a.Sub(b.Vector) // Length == 2*sin(theta)
+ vSum := a.Add(b.Vector) // Length == 2*cos(theta)
+ sin2 := vDiff.Norm2()
+ cos2 := vSum.Norm2()
+ if cos2 == 0 {
+ return Point{} // Ignore antipodal edges.
+ }
+ return Point{vSum.Mul(math.Sqrt(sin2 / cos2))} // Length == 2*sin(theta)
+}
+
+// PlanarCentroid returns the centroid of the planar triangle ABC. This can be
+// normalized to unit length to obtain the "surface centroid" of the corresponding
+// spherical triangle, i.e. the intersection of the three medians. However, note
+// that for large spherical triangles the surface centroid may be nowhere near
+// the intuitive "center".
+func PlanarCentroid(a, b, c Point) Point {
+ return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)}
+}
diff --git a/vendor/github.com/golang/geo/s2/contains_point_query.go b/vendor/github.com/golang/geo/s2/contains_point_query.go
new file mode 100644
index 000000000..3026f3601
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/contains_point_query.go
@@ -0,0 +1,190 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// VertexModel defines whether shapes are considered to contain their vertices.
+// Note that these definitions differ from the ones used by BooleanOperation.
+//
+// Note that points other than vertices are never contained by polylines.
+// If you want need this behavior, use ClosestEdgeQuery's IsDistanceLess
+// with a suitable distance threshold instead.
+type VertexModel int
+
+const (
+ // VertexModelOpen means no shapes contain their vertices (not even
+ // points). Therefore Contains(Point) returns true if and only if the
+ // point is in the interior of some polygon.
+ VertexModelOpen VertexModel = iota
+
+ // VertexModelSemiOpen means that polygon point containment is defined
+ // such that if several polygons tile the region around a vertex, then
+ // exactly one of those polygons contains that vertex. Points and
+ // polylines still do not contain any vertices.
+ VertexModelSemiOpen
+
+ // VertexModelClosed means all shapes contain their vertices (including
+ // points and polylines).
+ VertexModelClosed
+)
+
+// ContainsPointQuery determines whether one or more shapes in a ShapeIndex
+// contain a given Point. The ShapeIndex may contain any number of points,
+// polylines, and/or polygons (possibly overlapping). Shape boundaries may be
+// modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are
+// considered to contain their vertices).
+//
+// This type is not safe for concurrent use.
+//
+// However, note that if you need to do a large number of point containment
+// tests, it is more efficient to re-use the query rather than creating a new
+// one each time.
+type ContainsPointQuery struct {
+ model VertexModel
+ index *ShapeIndex
+ iter *ShapeIndexIterator
+}
+
+// NewContainsPointQuery creates a new instance of the ContainsPointQuery for the index
+// and given vertex model choice.
+func NewContainsPointQuery(index *ShapeIndex, model VertexModel) *ContainsPointQuery {
+ return &ContainsPointQuery{
+ index: index,
+ model: model,
+ iter: index.Iterator(),
+ }
+}
+
+// Contains reports whether any shape in the queries index contains the point p
+// under the queries vertex model (Open, SemiOpen, or Closed).
+func (q *ContainsPointQuery) Contains(p Point) bool {
+ if !q.iter.LocatePoint(p) {
+ return false
+ }
+
+ cell := q.iter.IndexCell()
+ for _, clipped := range cell.shapes {
+ if q.shapeContains(clipped, q.iter.Center(), p) {
+ return true
+ }
+ }
+ return false
+}
+
+// shapeContains reports whether the clippedShape from the iterator's center position contains
+// the given point.
+func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Point) bool {
+ inside := clipped.containsCenter
+ numEdges := clipped.numEdges()
+ if numEdges <= 0 {
+ return inside
+ }
+
+ shape := q.index.Shape(clipped.shapeID)
+ if shape.Dimension() != 2 {
+ // Points and polylines can be ignored unless the vertex model is Closed.
+ if q.model != VertexModelClosed {
+ return false
+ }
+
+ // Otherwise, the point is contained if and only if it matches a vertex.
+ for _, edgeID := range clipped.edges {
+ edge := shape.Edge(edgeID)
+ if edge.V0 == p || edge.V1 == p {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Test containment by drawing a line segment from the cell center to the
+ // given point and counting edge crossings.
+ crosser := NewEdgeCrosser(center, p)
+ for _, edgeID := range clipped.edges {
+ edge := shape.Edge(edgeID)
+ sign := crosser.CrossingSign(edge.V0, edge.V1)
+ if sign == DoNotCross {
+ continue
+ }
+ if sign == MaybeCross {
+ // For the Open and Closed models, check whether p is a vertex.
+ if q.model != VertexModelSemiOpen && (edge.V0 == p || edge.V1 == p) {
+ return (q.model == VertexModelClosed)
+ }
+ // C++ plays fast and loose with the int <-> bool conversions here.
+ if VertexCrossing(crosser.a, crosser.b, edge.V0, edge.V1) {
+ sign = Cross
+ } else {
+ sign = DoNotCross
+ }
+ }
+ inside = inside != (sign == Cross)
+ }
+
+ return inside
+}
+
+// ShapeContains reports whether the given shape contains the point under this
+// queries vertex model (Open, SemiOpen, or Closed).
+//
+// This requires the shape belongs to this queries index.
+func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool {
+ if !q.iter.LocatePoint(p) {
+ return false
+ }
+
+ clipped := q.iter.IndexCell().findByShapeID(q.index.idForShape(shape))
+ if clipped == nil {
+ return false
+ }
+ return q.shapeContains(clipped, q.iter.Center(), p)
+}
+
+// shapeVisitorFunc is a type of function that can be called against shaped in an index.
+type shapeVisitorFunc func(shape Shape) bool
+
+// visitContainingShapes visits all shapes in the given index that contain the
+// given point p, terminating early if the given visitor function returns false,
+// in which case visitContainingShapes returns false. Each shape is
+// visited at most once.
+func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool {
+ // This function returns false only if the algorithm terminates early
+ // because the visitor function returned false.
+ if !q.iter.LocatePoint(p) {
+ return true
+ }
+
+ cell := q.iter.IndexCell()
+ for _, clipped := range cell.shapes {
+ if q.shapeContains(clipped, q.iter.Center(), p) &&
+ !f(q.index.Shape(clipped.shapeID)) {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainingShapes returns a slice of all shapes that contain the given point.
+func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape {
+ var shapes []Shape
+ q.visitContainingShapes(p, func(shape Shape) bool {
+ shapes = append(shapes, shape)
+ return true
+ })
+ return shapes
+}
+
+// TODO(roberts): Remaining methods from C++
+// type edgeVisitorFunc func(shape ShapeEdge) bool
+// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool
diff --git a/vendor/github.com/golang/geo/s2/contains_vertex_query.go b/vendor/github.com/golang/geo/s2/contains_vertex_query.go
new file mode 100644
index 000000000..8e74f9e5b
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/contains_vertex_query.go
@@ -0,0 +1,63 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// ContainsVertexQuery is used to track the edges entering and leaving the
+// given vertex of a Polygon in order to be able to determine if the point is
+// contained by the Polygon.
+//
+// Point containment is defined according to the semi-open boundary model
+// which means that if several polygons tile the region around a vertex,
+// then exactly one of those polygons contains that vertex.
+type ContainsVertexQuery struct {
+ target Point
+ edgeMap map[Point]int
+}
+
+// NewContainsVertexQuery returns a new query for the given vertex whose
+// containment will be determined.
+func NewContainsVertexQuery(target Point) *ContainsVertexQuery {
+ return &ContainsVertexQuery{
+ target: target,
+ edgeMap: make(map[Point]int),
+ }
+}
+
+// AddEdge adds the edge between target and v with the given direction.
+// (+1 = outgoing, -1 = incoming, 0 = degenerate).
+func (q *ContainsVertexQuery) AddEdge(v Point, direction int) {
+ q.edgeMap[v] += direction
+}
+
+// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is
+// not contained, and 0 if the incident edges consisted of matched sibling pairs.
+func (q *ContainsVertexQuery) ContainsVertex() int {
+ // Find the unmatched edge that is immediately clockwise from Ortho(P).
+ referenceDir := Point{q.target.Ortho()}
+
+ bestPoint := referenceDir
+ bestDir := 0
+
+ for k, v := range q.edgeMap {
+ if v == 0 {
+ continue // This is a "matched" edge.
+ }
+ if OrderedCCW(referenceDir, bestPoint, k, q.target) {
+ bestPoint = k
+ bestDir = v
+ }
+ }
+ return bestDir
+}
diff --git a/vendor/github.com/golang/geo/s2/convex_hull_query.go b/vendor/github.com/golang/geo/s2/convex_hull_query.go
new file mode 100644
index 000000000..d1e79d0c1
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/convex_hull_query.go
@@ -0,0 +1,239 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "sort"
+)
+
+// ConvexHullQuery builds the convex hull of any collection of points,
+// polylines, loops, and polygons. It returns a single convex loop.
+//
+// The convex hull is defined as the smallest convex region on the sphere that
+// contains all of your input geometry. Recall that a region is "convex" if
+// for every pair of points inside the region, the straight edge between them
+// is also inside the region. In our case, a "straight" edge is a geodesic,
+// i.e. the shortest path on the sphere between two points.
+//
+// Containment of input geometry is defined as follows:
+//
+// - Each input loop and polygon is contained by the convex hull exactly
+// (i.e., according to Polygon's Contains(Polygon)).
+//
+// - Each input point is either contained by the convex hull or is a vertex
+// of the convex hull. (Recall that S2Loops do not necessarily contain their
+// vertices.)
+//
+// - For each input polyline, the convex hull contains all of its vertices
+// according to the rule for points above. (The definition of convexity
+// then ensures that the convex hull also contains the polyline edges.)
+//
+// To use this type, call the various Add... methods to add your input geometry, and
+// then call ConvexHull. Note that ConvexHull does *not* reset the
+// state; you can continue adding geometry if desired and compute the convex
+// hull again. If you want to start from scratch, simply create a new
+// ConvexHullQuery value.
+//
+// This implement Andrew's monotone chain algorithm, which is a variant of the
+// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time
+// complexity is O(n log n), and the space required is O(n). In fact only the
+// call to "sort" takes O(n log n) time; the rest of the algorithm is linear.
+//
+// Demonstration of the algorithm and code:
+// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain
+//
+// This type is not safe for concurrent use.
+type ConvexHullQuery struct {
+ bound Rect
+ points []Point
+}
+
+// NewConvexHullQuery creates a new ConvexHullQuery.
+func NewConvexHullQuery() *ConvexHullQuery {
+ return &ConvexHullQuery{
+ bound: EmptyRect(),
+ }
+}
+
+// AddPoint adds the given point to the input geometry.
+func (q *ConvexHullQuery) AddPoint(p Point) {
+ q.bound = q.bound.AddPoint(LatLngFromPoint(p))
+ q.points = append(q.points, p)
+}
+
+// AddPolyline adds the given polyline to the input geometry.
+func (q *ConvexHullQuery) AddPolyline(p *Polyline) {
+ q.bound = q.bound.Union(p.RectBound())
+ q.points = append(q.points, (*p)...)
+}
+
+// AddLoop adds the given loop to the input geometry.
+func (q *ConvexHullQuery) AddLoop(l *Loop) {
+ q.bound = q.bound.Union(l.RectBound())
+ if l.isEmptyOrFull() {
+ return
+ }
+ q.points = append(q.points, l.vertices...)
+}
+
+// AddPolygon adds the given polygon to the input geometry.
+func (q *ConvexHullQuery) AddPolygon(p *Polygon) {
+ q.bound = q.bound.Union(p.RectBound())
+ for _, l := range p.loops {
+ // Only loops at depth 0 can contribute to the convex hull.
+ if l.depth == 0 {
+ q.AddLoop(l)
+ }
+ }
+}
+
+// CapBound returns a bounding cap for the input geometry provided.
+//
+// Note that this method does not clear the geometry; you can continue
+// adding to it and call this method again if desired.
+func (q *ConvexHullQuery) CapBound() Cap {
+ // We keep track of a rectangular bound rather than a spherical cap because
+ // it is easy to compute a tight bound for a union of rectangles, whereas it
+ // is quite difficult to compute a tight bound around a union of caps.
+ // Also, polygons and polylines implement CapBound() in terms of
+ // RectBound() for this same reason, so it is much better to keep track
+ // of a rectangular bound as we go along and convert it at the end.
+ //
+ // TODO(roberts): We could compute an optimal bound by implementing Welzl's
+ // algorithm. However we would still need to have special handling of loops
+ // and polygons, since if a loop spans more than 180 degrees in any
+ // direction (i.e., if it contains two antipodal points), then it is not
+ // enough just to bound its vertices. In this case the only convex bounding
+ // cap is FullCap(), and the only convex bounding loop is the full loop.
+ return q.bound.CapBound()
+}
+
+// ConvexHull returns a Loop representing the convex hull of the input geometry provided.
+//
+// If there is no geometry, this method returns an empty loop containing no
+// points.
+//
+// If the geometry spans more than half of the sphere, this method returns a
+// full loop containing the entire sphere.
+//
+// If the geometry contains 1 or 2 points, or a single edge, this method
+// returns a very small loop consisting of three vertices (which are a
+// superset of the input vertices).
+//
+// Note that this method does not clear the geometry; you can continue
+// adding to the query and call this method again.
+func (q *ConvexHullQuery) ConvexHull() *Loop {
+ c := q.CapBound()
+ if c.Height() >= 1 {
+ // The bounding cap is not convex. The current bounding cap
+ // implementation is not optimal, but nevertheless it is likely that the
+ // input geometry itself is not contained by any convex polygon. In any
+ // case, we need a convex bounding cap to proceed with the algorithm below
+ // (in order to construct a point "origin" that is definitely outside the
+ // convex hull).
+ return FullLoop()
+ }
+
+ // Remove duplicates. We need to do this before checking whether there are
+ // fewer than 3 points.
+ x := make(map[Point]bool)
+ r, w := 0, 0 // read/write indexes
+ for ; r < len(q.points); r++ {
+ if x[q.points[r]] {
+ continue
+ }
+ q.points[w] = q.points[r]
+ x[q.points[r]] = true
+ w++
+ }
+ q.points = q.points[:w]
+
+ // This code implements Andrew's monotone chain algorithm, which is a simple
+ // variant of the Graham scan. Rather than sorting by x-coordinate, instead
+ // we sort the points in CCW order around an origin O such that all points
+ // are guaranteed to be on one side of some geodesic through O. This
+ // ensures that as we scan through the points, each new point can only
+ // belong at the end of the chain (i.e., the chain is monotone in terms of
+ // the angle around O from the starting point).
+ origin := Point{c.Center().Ortho()}
+ sort.Slice(q.points, func(i, j int) bool {
+ return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise
+ })
+
+ // Special cases for fewer than 3 points.
+ switch len(q.points) {
+ case 0:
+ return EmptyLoop()
+ case 1:
+ return singlePointLoop(q.points[0])
+ case 2:
+ return singleEdgeLoop(q.points[0], q.points[1])
+ }
+
+ // Generate the lower and upper halves of the convex hull. Each half
+ // consists of the maximal subset of vertices such that the edge chain
+ // makes only left (CCW) turns.
+ lower := q.monotoneChain()
+
+ // reverse the points
+ for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 {
+ q.points[left], q.points[right] = q.points[right], q.points[left]
+ }
+ upper := q.monotoneChain()
+
+ // Remove the duplicate vertices and combine the chains.
+ lower = lower[:len(lower)-1]
+ upper = upper[:len(upper)-1]
+ lower = append(lower, upper...)
+
+ return LoopFromPoints(lower)
+}
+
+// monotoneChain iterates through the points, selecting the maximal subset of points
+// such that the edge chain makes only left (CCW) turns.
+func (q *ConvexHullQuery) monotoneChain() []Point {
+ var output []Point
+ for _, p := range q.points {
+ // Remove any points that would cause the chain to make a clockwise turn.
+ for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise {
+ output = output[:len(output)-1]
+ }
+ output = append(output, p)
+ }
+ return output
+}
+
+// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby
+// vertices. Note that ContainsPoint(p) may be false for the resulting loop.
+func singlePointLoop(p Point) *Loop {
+ const offset = 1e-15
+ d0 := p.Ortho()
+ d1 := p.Cross(d0)
+ vertices := []Point{
+ p,
+ {p.Add(d0.Mul(offset)).Normalize()},
+ {p.Add(d1.Mul(offset)).Normalize()},
+ }
+ return LoopFromPoints(vertices)
+}
+
+// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint.
+func singleEdgeLoop(a, b Point) *Loop {
+ vertices := []Point{a, b, {a.Add(b.Vector).Normalize()}}
+ loop := LoopFromPoints(vertices)
+ // The resulting loop may be clockwise, so invert it if necessary.
+ loop.Normalize()
+ return loop
+}
diff --git a/vendor/github.com/golang/geo/s2/crossing_edge_query.go b/vendor/github.com/golang/geo/s2/crossing_edge_query.go
new file mode 100644
index 000000000..51852dab4
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/crossing_edge_query.go
@@ -0,0 +1,409 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "sort"
+
+ "github.com/golang/geo/r2"
+)
+
+// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by
+// a given edge(s).
+//
+// Note that if you need to query many edges, it is more efficient to declare
+// a single CrossingEdgeQuery instance and reuse it.
+//
+// If you want to find *all* the pairs of crossing edges, it is more efficient to
+// use the not yet implemented VisitCrossings in shapeutil.
+type CrossingEdgeQuery struct {
+ index *ShapeIndex
+
+ // temporary values used while processing a query.
+ a, b r2.Point
+ iter *ShapeIndexIterator
+
+ // candidate cells generated when finding crossings.
+ cells []*ShapeIndexCell
+}
+
+// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index.
+func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery {
+ c := &CrossingEdgeQuery{
+ index: index,
+ iter: index.Iterator(),
+ }
+ return c
+}
+
+// Crossings returns the set of edge of the shape S that intersect the given edge AB.
+// If the CrossingType is Interior, then only intersections at a point interior to both
+// edges are reported, while if it is CrossingTypeAll then edges that share a vertex
+// are also reported.
+func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int {
+ edges := c.candidates(a, b, shape)
+ if len(edges) == 0 {
+ return nil
+ }
+
+ crosser := NewEdgeCrosser(a, b)
+ out := 0
+ n := len(edges)
+
+ for in := 0; in < n; in++ {
+ b := shape.Edge(edges[in])
+ sign := crosser.CrossingSign(b.V0, b.V1)
+ if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross {
+ edges[out] = edges[in]
+ out++
+ }
+ }
+
+ if out < n {
+ edges = edges[0:out]
+ }
+ return edges
+}
+
+// EdgeMap stores a sorted set of edge ids for each shape.
+type EdgeMap map[Shape][]int
+
+// CrossingsEdgeMap returns the set of all edges in the index that intersect the given
+// edge AB. If crossType is CrossingTypeInterior, then only intersections at a
+// point interior to both edges are reported, while if it is CrossingTypeAll
+// then edges that share a vertex are also reported.
+//
+// The edges are returned as a mapping from shape to the edges of that shape
+// that intersect AB. Every returned shape has at least one crossing edge.
+func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap {
+ edgeMap := c.candidatesEdgeMap(a, b)
+ if len(edgeMap) == 0 {
+ return nil
+ }
+
+ crosser := NewEdgeCrosser(a, b)
+ for shape, edges := range edgeMap {
+ out := 0
+ n := len(edges)
+ for in := 0; in < n; in++ {
+ edge := shape.Edge(edges[in])
+ sign := crosser.CrossingSign(edge.V0, edge.V1)
+ if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) {
+ edgeMap[shape][out] = edges[in]
+ out++
+ }
+ }
+
+ if out == 0 {
+ delete(edgeMap, shape)
+ } else {
+ if out < n {
+ edgeMap[shape] = edgeMap[shape][0:out]
+ }
+ }
+ }
+ return edgeMap
+}
+
+// candidates returns a superset of the edges of the given shape that intersect
+// the edge AB.
+func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int {
+ var edges []int
+
+ // For small loops it is faster to use brute force. The threshold below was
+ // determined using benchmarks.
+ const maxBruteForceEdges = 27
+ maxEdges := shape.NumEdges()
+ if maxEdges <= maxBruteForceEdges {
+ edges = make([]int, maxEdges)
+ for i := 0; i < maxEdges; i++ {
+ edges[i] = i
+ }
+ return edges
+ }
+
+ // Compute the set of index cells intersected by the query edge.
+ c.getCellsForEdge(a, b)
+ if len(c.cells) == 0 {
+ return nil
+ }
+
+ // Gather all the edges that intersect those cells and sort them.
+ // TODO(roberts): Shapes don't track their ID, so we need to range over
+ // the index to find the ID manually.
+ var shapeID int32
+ for k, v := range c.index.shapes {
+ if v == shape {
+ shapeID = k
+ }
+ }
+
+ for _, cell := range c.cells {
+ if cell == nil {
+ continue
+ }
+ clipped := cell.findByShapeID(shapeID)
+ if clipped == nil {
+ continue
+ }
+ edges = append(edges, clipped.edges...)
+ }
+
+ if len(c.cells) > 1 {
+ edges = uniqueInts(edges)
+ }
+
+ return edges
+}
+
+// uniqueInts returns the sorted uniqued values from the given input.
+func uniqueInts(in []int) []int {
+ var edges []int
+ m := make(map[int]bool)
+ for _, i := range in {
+ if m[i] {
+ continue
+ }
+ m[i] = true
+ edges = append(edges, i)
+ }
+ sort.Ints(edges)
+ return edges
+}
+
+// candidatesEdgeMap returns a map from shapes to the superse of edges for that
+// shape that intersect the edge AB.
+//
+// CAVEAT: This method may return shapes that have an empty set of candidate edges.
+// However the return value is non-empty only if at least one shape has a candidate edge.
+func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
+ edgeMap := make(EdgeMap)
+
+ // If there are only a few edges then it's faster to use brute force. We
+ // only bother with this optimization when there is a single shape.
+ if len(c.index.shapes) == 1 {
+ // Typically this method is called many times, so it is worth checking
+ // whether the edge map is empty or already consists of a single entry for
+ // this shape, and skip clearing edge map in that case.
+ shape := c.index.Shape(0)
+
+ // Note that we leave the edge map non-empty even if there are no candidates
+ // (i.e., there is a single entry with an empty set of edges).
+ edgeMap[shape] = c.candidates(a, b, shape)
+ return edgeMap
+ }
+
+ // Compute the set of index cells intersected by the query edge.
+ c.getCellsForEdge(a, b)
+ if len(c.cells) == 0 {
+ return edgeMap
+ }
+
+ // Gather all the edges that intersect those cells and sort them.
+ for _, cell := range c.cells {
+ for _, clipped := range cell.shapes {
+ s := c.index.Shape(clipped.shapeID)
+ for j := 0; j < clipped.numEdges(); j++ {
+ edgeMap[s] = append(edgeMap[s], clipped.edges[j])
+ }
+ }
+ }
+
+ if len(c.cells) > 1 {
+ for s, edges := range edgeMap {
+ edgeMap[s] = uniqueInts(edges)
+ }
+ }
+
+ return edgeMap
+}
+
+// getCells returns the set of ShapeIndexCells that might contain edges intersecting
+// the edge AB in the given cell root. This method is used primarily by loop and shapeutil.
+func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
+ aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
+ if ok {
+ c.a = aUV
+ c.b = bUV
+ edgeBound := r2.RectFromPoints(c.a, c.b)
+ if root.Bound().Intersects(edgeBound) {
+ c.computeCellsIntersected(root, edgeBound)
+ }
+ }
+
+ if len(c.cells) == 0 {
+ return nil
+ }
+
+ return c.cells
+}
+
+// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB.
+func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) {
+ c.cells = nil
+
+ segments := FaceSegments(a, b)
+ for _, segment := range segments {
+ c.a = segment.a
+ c.b = segment.b
+
+ // Optimization: rather than always starting the recursive subdivision at
+ // the top level face cell, instead we start at the smallest S2CellId that
+ // contains the edge (the edge root cell). This typically lets us skip
+ // quite a few levels of recursion since most edges are short.
+ edgeBound := r2.RectFromPoints(c.a, c.b)
+ pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0)
+ edgeRoot := pcell.ShrinkToFit(edgeBound)
+
+ // Now we need to determine how the edge root cell is related to the cells
+ // in the spatial index (cellMap). There are three cases:
+ //
+ // 1. edgeRoot is an index cell or is contained within an index cell.
+ // In this case we only need to look at the contents of that cell.
+ // 2. edgeRoot is subdivided into one or more index cells. In this case
+ // we recursively subdivide to find the cells intersected by AB.
+ // 3. edgeRoot does not intersect any index cells. In this case there
+ // is nothing to do.
+ relation := c.iter.LocateCellID(edgeRoot)
+ if relation == Indexed {
+ // edgeRoot is an index cell or is contained by an index cell (case 1).
+ c.cells = append(c.cells, c.iter.IndexCell())
+ } else if relation == Subdivided {
+ // edgeRoot is subdivided into one or more index cells (case 2). We
+ // find the cells intersected by AB using recursive subdivision.
+ if !edgeRoot.isFace() {
+ pcell = PaddedCellFromCellID(edgeRoot, 0)
+ }
+ c.computeCellsIntersected(pcell, edgeBound)
+ }
+ }
+}
+
+// computeCellsIntersected computes the index cells intersected by the current
+// edge that are descendants of pcell and adds them to this queries set of cells.
+func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) {
+
+ c.iter.seek(pcell.id.RangeMin())
+ if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() {
+ // The index does not contain pcell or any of its descendants.
+ return
+ }
+ if c.iter.CellID() == pcell.id {
+ // The index contains this cell exactly.
+ c.cells = append(c.cells, c.iter.IndexCell())
+ return
+ }
+
+ // Otherwise, split the edge among the four children of pcell.
+ center := pcell.Middle().Lo()
+
+ if edgeBound.X.Hi < center.X {
+ // Edge is entirely contained in the two left children.
+ c.clipVAxis(edgeBound, center.Y, 0, pcell)
+ return
+ } else if edgeBound.X.Lo >= center.X {
+ // Edge is entirely contained in the two right children.
+ c.clipVAxis(edgeBound, center.Y, 1, pcell)
+ return
+ }
+
+ childBounds := c.splitUBound(edgeBound, center.X)
+ if edgeBound.Y.Hi < center.Y {
+ // Edge is entirely contained in the two lower children.
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0])
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1])
+ } else if edgeBound.Y.Lo >= center.Y {
+ // Edge is entirely contained in the two upper children.
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0])
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1])
+ } else {
+ // The edge bound spans all four children. The edge itself intersects
+ // at most three children (since no padding is being used).
+ c.clipVAxis(childBounds[0], center.Y, 0, pcell)
+ c.clipVAxis(childBounds[1], center.Y, 1, pcell)
+ }
+}
+
+// clipVAxis computes the intersected cells recursively for a given padded cell.
+// Given either the left (i=0) or right (i=1) side of a padded cell pcell,
+// determine whether the current edge intersects the lower child, upper child,
+// or both children, and call c.computeCellsIntersected recursively on those children.
+// The center is the v-coordinate at the center of pcell.
+func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) {
+ if edgeBound.Y.Hi < center {
+ // Edge is entirely contained in the lower child.
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound)
+ } else if edgeBound.Y.Lo >= center {
+ // Edge is entirely contained in the upper child.
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound)
+ } else {
+ // The edge intersects both children.
+ childBounds := c.splitVBound(edgeBound, center)
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0])
+ c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1])
+ }
+}
+
+// splitUBound returns the bound for two children as a result of spliting the
+// current edge at the given value U.
+func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect {
+ v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y))
+ // diag indicates which diagonal of the bounding box is spanned by AB:
+ // it is 0 if AB has positive slope, and 1 if AB has negative slope.
+ var diag int
+ if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
+ diag = 1
+ }
+ return splitBound(edgeBound, 0, diag, u, v)
+}
+
+// splitVBound returns the bound for two children as a result of spliting the
+// current edge into two child edges at the given value V.
+func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect {
+ u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X))
+ var diag int
+ if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
+ diag = 1
+ }
+ return splitBound(edgeBound, diag, 0, u, v)
+}
+
+// splitBound returns the bounds for the two childrenn as a result of spliting
+// the current edge into two child edges at the given point (u,v). uEnd and vEnd
+// indicate which bound endpoints of the first child will be updated.
+func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect {
+ var childBounds = [2]r2.Rect{
+ edgeBound,
+ edgeBound,
+ }
+
+ if uEnd == 1 {
+ childBounds[0].X.Lo = u
+ childBounds[1].X.Hi = u
+ } else {
+ childBounds[0].X.Hi = u
+ childBounds[1].X.Lo = u
+ }
+
+ if vEnd == 1 {
+ childBounds[0].Y.Lo = v
+ childBounds[1].Y.Hi = v
+ } else {
+ childBounds[0].Y.Hi = v
+ childBounds[1].Y.Lo = v
+ }
+
+ return childBounds
+}
diff --git a/vendor/github.com/golang/geo/s2/distance_target.go b/vendor/github.com/golang/geo/s2/distance_target.go
new file mode 100644
index 000000000..066bbacfa
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/distance_target.go
@@ -0,0 +1,149 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "github.com/golang/geo/s1"
+)
+
+// The distance interface represents a set of common methods used by algorithms
+// that compute distances between various S2 types.
+type distance interface {
+ // chordAngle returns this type as a ChordAngle.
+ chordAngle() s1.ChordAngle
+
+ // fromChordAngle is used to type convert a ChordAngle to this type.
+ // This is to work around needing to be clever in parts of the code
+ // where a distanceTarget interface method expects distances, but the
+ // user only supplies a ChordAngle, and we need to dynamically cast it
+ // to an appropriate distance interface types.
+ fromChordAngle(o s1.ChordAngle) distance
+
+ // zero returns a zero distance.
+ zero() distance
+ // negative returns a value smaller than any valid value.
+ negative() distance
+ // infinity returns a value larger than any valid value.
+ infinity() distance
+
+ // less is similar to the Less method in Sort. To get minimum values,
+ // this would be a less than type operation. For maximum, this would
+ // be a greater than type operation.
+ less(other distance) bool
+
+ // sub subtracts the other value from this one and returns the new value.
+ // This is done as a method and not simple mathematical operation to
+ // allow closest and furthest to implement this in opposite ways.
+ sub(other distance) distance
+
+ // chordAngleBound reports the upper bound on a ChordAngle corresponding
+ // to this distance. For example, if distance measures WGS84 ellipsoid
+ // distance then the corresponding angle needs to be 0.56% larger.
+ chordAngleBound() s1.ChordAngle
+
+ // updateDistance may update the value this distance represents
+ // based on the given input. The updated value and a boolean reporting
+ // if the value was changed are returned.
+ updateDistance(other distance) (distance, bool)
+}
+
+// distanceTarget is an interface that represents a geometric type to which distances
+// are measured.
+//
+// For example, there are implementations that measure distances to a Point,
+// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry
+// stored in ShapeIndex.
+//
+// The distanceTarget types are provided for the benefit of types that measure
+// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery,
+// ClosestPointQuery, and ClosestCellQuery, etc.
+type distanceTarget interface {
+ // capBound returns a Cap that bounds the set of points whose distance to the
+ // target is distance.zero().
+ capBound() Cap
+
+ // updateDistanceToPoint updates the distance if the distance to
+ // the point P is within than the given dist.
+ // The boolean reports if the value was updated.
+ updateDistanceToPoint(p Point, dist distance) (distance, bool)
+
+ // updateDistanceToEdge updates the distance if the distance to
+ // the edge E is within than the given dist.
+ // The boolean reports if the value was updated.
+ updateDistanceToEdge(e Edge, dist distance) (distance, bool)
+
+ // updateDistanceToCell updates the distance if the distance to the cell C
+ // (including its interior) is within than the given dist.
+ // The boolean reports if the value was updated.
+ updateDistanceToCell(c Cell, dist distance) (distance, bool)
+
+ // setMaxError potentially updates the value of MaxError, and reports if
+ // the specific type supports altering it. Whenever one of the
+ // updateDistanceTo... methods above returns true, the returned distance
+ // is allowed to be up to maxError larger than the true minimum distance.
+ // In other words, it gives this target object permission to terminate its
+ // distance calculation as soon as it has determined that (1) the minimum
+ // distance is less than minDist and (2) the best possible further
+ // improvement is less than maxError.
+ //
+ // If the target takes advantage of maxError to optimize its distance
+ // calculation, this method must return true. (Most target types will
+ // default to return false.)
+ setMaxError(maxErr s1.ChordAngle) bool
+
+ // maxBruteForceIndexSize reports the maximum number of indexed objects for
+ // which it is faster to compute the distance by brute force (e.g., by testing
+ // every edge) rather than by using an index.
+ //
+ // The following method is provided as a convenience for types that compute
+ // distances to a collection of indexed geometry, such as ClosestEdgeQuery
+ // and ClosestPointQuery.
+ //
+ // Types that do not support this should return a -1.
+ maxBruteForceIndexSize() int
+
+ // distance returns an instance of the underlying distance type this
+ // target uses. This is to work around the use of Templates in the C++.
+ distance() distance
+
+ // visitContainingShapes finds all polygons in the given index that
+ // completely contain a connected component of the target geometry. (For
+ // example, if the target consists of 10 points, this method finds
+ // polygons that contain any of those 10 points.) For each such polygon,
+ // the visit function is called with the Shape of the polygon along with
+ // a point of the target geometry that is contained by that polygon.
+ //
+ // Optionally, any polygon that intersects the target geometry may also be
+ // returned. In other words, this method returns all polygons that
+ // contain any connected component of the target, along with an arbitrary
+ // subset of the polygons that intersect the target.
+ //
+ // For example, suppose that the index contains two abutting polygons
+ // A and B. If the target consists of two points "a" contained by A and
+ // "b" contained by B, then both A and B are returned. But if the target
+ // consists of the edge "ab", then any subset of {A, B} could be returned
+ // (because both polygons intersect the target but neither one contains
+ // the edge "ab").
+ //
+ // If the visit function returns false, this method terminates early and
+ // returns false as well. Otherwise returns true.
+ //
+ // NOTE(roberts): This method exists only for the purpose of implementing
+ // edgeQuery IncludeInteriors efficiently.
+ visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool
+}
+
+// shapePointVisitorFunc defines a type of function the visitContainingShapes can call.
+type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool
diff --git a/vendor/github.com/golang/geo/s2/doc.go b/vendor/github.com/golang/geo/s2/doc.go
new file mode 100644
index 000000000..43e7a6344
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package s2 is a library for working with geometry in S² (spherical geometry).
+
+Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹),
+r2 (operates on ℝ²) and r3 (operates on ℝ³).
+
+This package provides types and functions for the S2 cell hierarchy and coordinate systems.
+The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²)
+into ``cells''; it is highly efficient, scales from continental size to under 1 cm²
+and preserves spatial locality (nearby cells have close IDs).
+
+More information including an in-depth introduction to S2 can be found on the
+S2 website https://s2geometry.io/
+*/
+package s2
diff --git a/vendor/github.com/golang/geo/s2/edge_clipping.go b/vendor/github.com/golang/geo/s2/edge_clipping.go
new file mode 100644
index 000000000..57a53bf0f
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_clipping.go
@@ -0,0 +1,672 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file contains a collection of methods for:
+//
+// (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube
+// (see s2stuv), and
+//
+// (2) Robustly clipping 2D edges against 2D rectangles.
+//
+// These functions can be used to efficiently find the set of CellIDs that
+// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery).
+
+import (
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/r3"
+)
+
+const (
+ // edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
+ // compared to the exact result, assuming that the points A and B are in
+ // the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
+ edgeClipErrorUVCoord = 2.25 * dblEpsilon
+
+ // edgeClipErrorUVDist is the maximum distance from a clipped point to
+ // the corresponding exact result. It is equal to the error in a single
+ // coordinate because at most one coordinate is subject to error.
+ edgeClipErrorUVDist = 2.25 * dblEpsilon
+
+ // faceClipErrorRadians is the maximum angle between a returned vertex
+ // and the nearest point on the exact edge AB. It is equal to the
+ // maximum directional error in PointCross, plus the error when
+ // projecting points onto a cube face.
+ faceClipErrorRadians = 3 * dblEpsilon
+
+ // faceClipErrorDist is the same angle expressed as a maximum distance
+ // in (u,v)-space. In other words, a returned vertex is at most this far
+ // from the exact edge AB projected into (u,v)-space.
+ faceClipErrorUVDist = 9 * dblEpsilon
+
+ // faceClipErrorUVCoord is the maximum angle between a returned vertex
+ // and the nearest point on the exact edge AB expressed as the maximum error
+ // in an individual u- or v-coordinate. In other words, for each
+ // returned vertex there is a point on the exact edge AB whose u- and
+ // v-coordinates differ from the vertex by at most this amount.
+ faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
+
+ // intersectsRectErrorUVDist is the maximum error when computing if a point
+ // intersects with a given Rect. If some point of AB is inside the
+ // rectangle by at least this distance, the result is guaranteed to be true;
+ // if all points of AB are outside the rectangle by at least this distance,
+ // the result is guaranteed to be false. This bound assumes that rect is
+ // a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
+ // (e.g., by 1e-10 or less).
+ intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
+)
+
+// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, or false if the edge AB does not intersect.
+// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
+// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
+// the results may differ from those produced by FaceSegments.
+func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
+ return ClipToPaddedFace(a, b, face, 0.0)
+}
+
+// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
+// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
+// Padding must be non-negative.
+func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
+ // Fast path: both endpoints are on the given face.
+ if face(a.Vector) == f && face(b.Vector) == f {
+ au, av := validFaceXYZToUV(f, a.Vector)
+ bu, bv := validFaceXYZToUV(f, b.Vector)
+ return r2.Point{au, av}, r2.Point{bu, bv}, true
+ }
+
+ // Convert everything into the (u,v,w) coordinates of the given face. Note
+ // that the cross product *must* be computed in the original (x,y,z)
+ // coordinate system because PointCross (unlike the mathematical cross
+ // product) can produce different results in different coordinate systems
+ // when one argument is a linear multiple of the other, due to the use of
+ // symbolic perturbations.
+ normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
+ aUVW := pointUVW(faceXYZtoUVW(f, a))
+ bUVW := pointUVW(faceXYZtoUVW(f, b))
+
+ // Padding is handled by scaling the u- and v-components of the normal.
+ // Letting R=1+padding, this means that when we compute the dot product of
+ // the normal with a cube face vertex (such as (-1,-1,1)), we will actually
+ // compute the dot product with the scaled vertex (-R,-R,1). This allows
+ // methods such as intersectsFace, exitAxis, etc, to handle padding
+ // with no further modifications.
+ scaleUV := 1 + padding
+ scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
+ if !scaledN.intersectsFace() {
+ return aUV, bUV, false
+ }
+
+ // TODO(roberts): This is a workaround for extremely small vectors where some
+ // loss of precision can occur in Normalize causing underflow. When PointCross
+ // is updated to work around this, this can be removed.
+ if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
+ normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
+ }
+
+ normUVW = pointUVW{normUVW.Normalize()}
+
+ aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
+ bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
+
+ // As described in clipDestination, if the sum of the scores from clipping the two
+ // endpoints is 3 or more, then the segment does not intersect this face.
+ aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
+ bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
+
+ return aUV, bUV, aScore+bScore < 3
+}
+
+// ClipEdge returns the portion of the edge defined by AB that is contained by the
+// given rectangle. If there is no intersection, false is returned and aClip and bClip
+// are undefined.
+func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
+ // Compute the bounding rectangle of AB, clip it, and then extract the new
+ // endpoints from the clipped bound.
+ bound := r2.RectFromPoints(a, b)
+ if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
+ return aClip, bClip, false
+ }
+ ai := 0
+ if a.X > b.X {
+ ai = 1
+ }
+ aj := 0
+ if a.Y > b.Y {
+ aj = 1
+ }
+
+ return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
+}
+
+// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges)
+// all compare a sum (u + v) to a third value w. They are implemented in such a
+// way that they produce an exact result even though all calculations are done
+// with ordinary floating-point operations. Here are the principles on which these
+// functions are based:
+//
+// A. If u + v < w in floating-point, then u + v < w in exact arithmetic.
+//
+// B. If u + v < w in exact arithmetic, then at least one of the following
+// expressions is true in floating-point:
+// u + v < w
+// u < w - v
+// v < w - u
+//
+// Proof: By rearranging terms and substituting ">" for "<", we can assume
+// that all values are non-negative. Now clearly "w" is not the smallest
+// value, so assume WLOG that "u" is the smallest. We want to show that
+// u < w - v in floating-point. If v >= w/2, the calculation of w - v is
+// exact since the result is smaller in magnitude than either input value,
+// so the result holds. Otherwise we have u <= v < w/2 and w - v >= w/2
+// (even in floating point), so the result also holds.
+
+// sumEqual reports whether u + v == w exactly.
+func sumEqual(u, v, w float64) bool {
+ return (u+v == w) && (u == w-v) && (v == w-u)
+}
+
+// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
+type pointUVW Point
+
+// intersectsFace reports whether a given directed line L intersects the cube face F.
+// The line L is defined by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsFace() bool {
+ // L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
+ // products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
+ // and (-1,1,1) do not all have the same sign. This is true exactly when
+ // |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
+ u := math.Abs(p.X)
+ v := math.Abs(p.Y)
+ w := math.Abs(p.Z)
+
+ // We only need to consider the cases where u or v is the smallest value,
+ // since if w is the smallest then both expressions below will have a
+ // positive LHS and a negative RHS.
+ return (v >= w-u) && (u >= w-v)
+}
+
+// intersectsOppositeEdges reports whether a directed line L intersects two
+// opposite edges of a cube face F. This includs the case where L passes
+// exactly through a corner vertex of F. The directed line L is defined
+// by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsOppositeEdges() bool {
+ // The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
+ // and only exactly two of the corner vertices lie on each side of L. This
+ // is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
+ // expression exactly.
+ u := math.Abs(p.X)
+ v := math.Abs(p.Y)
+ w := math.Abs(p.Z)
+
+ // If w is the smallest, the following line returns an exact result.
+ if math.Abs(u-v) != w {
+ return math.Abs(u-v) >= w
+ }
+
+ // Otherwise u - v = w exactly, or w is not the smallest value. In either
+ // case the following returns the correct result.
+ if u >= v {
+ return u-w >= v
+ }
+ return v-w >= u
+}
+
+// axis represents the possible results of exitAxis.
+type axis int
+
+const (
+ axisU axis = iota
+ axisV
+)
+
+// exitAxis reports which axis the directed line L exits the cube face F on.
+// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
+// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
+// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
+// through a corner vertex of the cube face.
+func (p pointUVW) exitAxis() axis {
+ if p.intersectsOppositeEdges() {
+ // The line passes through through opposite edges of the face.
+ // It exits through the v=+1 or v=-1 edge if the u-component of N has a
+ // larger absolute magnitude than the v-component.
+ if math.Abs(p.X) >= math.Abs(p.Y) {
+ return axisV
+ }
+ return axisU
+ }
+
+ // The line passes through through two adjacent edges of the face.
+ // It exits the v=+1 or v=-1 edge if an even number of the components of N
+ // are negative. We test this using signbit() rather than multiplication
+ // to avoid the possibility of underflow.
+ var x, y, z int
+ if math.Signbit(p.X) {
+ x = 1
+ }
+ if math.Signbit(p.Y) {
+ y = 1
+ }
+ if math.Signbit(p.Z) {
+ z = 1
+ }
+
+ if x^y^z == 0 {
+ return axisV
+ }
+ return axisU
+}
+
+// exitPoint returns the UV coordinates of the point where a directed line L (represented
+// by the CCW normal of this point), exits the cube face this point is derived from along
+// the given axis.
+func (p pointUVW) exitPoint(a axis) r2.Point {
+ if a == axisU {
+ u := -1.0
+ if p.Y > 0 {
+ u = 1.0
+ }
+ return r2.Point{u, (-u*p.X - p.Z) / p.Y}
+ }
+
+ v := -1.0
+ if p.X < 0 {
+ v = 1.0
+ }
+ return r2.Point{(-v*p.Y - p.Z) / p.X, v}
+}
+
+// clipDestination returns a score which is used to indicate if the clipped edge AB
+// on the given face intersects the face at all. This function returns the score for
+// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
+// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
+//
+// First, it clips the line segment AB to find the clipped destination B' on a given
+// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
+// coordinates of that face.) Second, it partially computes whether the segment AB
+// intersects this face at all. The actual condition is fairly complicated, but it
+// turns out that it can be expressed as a "score" that can be computed independently
+// when clipping the two endpoints A and B.
+func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
+ var uv r2.Point
+
+ // Optimization: if B is within the safe region of the face, use it.
+ maxSafeUVCoord := 1 - faceClipErrorUVCoord
+ if b.Z > 0 {
+ uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+ if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
+ return uv, 0
+ }
+ }
+
+ // Otherwise find the point B' where the line AB exits the face.
+ uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
+
+ p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
+
+ // Determine if the exit point B' is contained within the segment. We do this
+ // by computing the dot products with two inward-facing tangent vectors at A
+ // and B. If either dot product is negative, we say that B' is on the "wrong
+ // side" of that point. As the point B' moves around the great circle AB past
+ // the segment endpoint B, it is initially on the wrong side of B only; as it
+ // moves further it is on the wrong side of both endpoints; and then it is on
+ // the wrong side of A only. If the exit point B' is on the wrong side of
+ // either endpoint, we can't use it; instead the segment is clipped at the
+ // original endpoint B.
+ //
+ // We reject the segment if the sum of the scores of the two endpoints is 3
+ // or more. Here is what that rule encodes:
+ // - If B' is on the wrong side of A, then the other clipped endpoint A'
+ // must be in the interior of AB (otherwise AB' would go the wrong way
+ // around the circle). There is a similar rule for A'.
+ // - If B' is on the wrong side of either endpoint (and therefore we must
+ // use the original endpoint B instead), then it must be possible to
+ // project B onto this face (i.e., its w-coordinate must be positive).
+ // This rule is only necessary to handle certain zero-length edges (A=B).
+ score := 0
+ if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
+ score = 2 // B' is on wrong side of A.
+ } else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
+ score = 1 // B' is on wrong side of B.
+ }
+
+ if score > 0 { // B' is not in the interior of AB.
+ if b.Z <= 0 {
+ score = 3 // B cannot be projected onto this face.
+ } else {
+ uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+ }
+ }
+
+ return uv, score
+}
+
+// updateEndpoint returns the interval with the specified endpoint updated to
+// the given value. If the value lies beyond the opposite endpoint, nothing is
+// changed and false is returned.
+func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
+ if !highEndpoint {
+ if bound.Hi < value {
+ return bound, false
+ }
+ if bound.Lo < value {
+ bound.Lo = value
+ }
+ return bound, true
+ }
+
+ if bound.Lo > value {
+ return bound, false
+ }
+ if bound.Hi > value {
+ bound.Hi = value
+ }
+ return bound, true
+}
+
+// clipBoundAxis returns the clipped versions of the bounding intervals for the given
+// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
+// given clip interval. negSlope is a precomputed helper variable that indicates which
+// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
+// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
+// false is returned.
+func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
+ negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
+
+ if bound0.Lo < clip.Lo {
+ // If the upper bound is below the clips lower bound, there is nothing to do.
+ if bound0.Hi < clip.Lo {
+ return bound0, bound1, false
+ }
+ // narrow the intervals lower bound to the clip bound.
+ bound0.Lo = clip.Lo
+ if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated {
+ return bound0, bound1, false
+ }
+ }
+
+ if bound0.Hi > clip.Hi {
+ // If the lower bound is above the clips upper bound, there is nothing to do.
+ if bound0.Lo > clip.Hi {
+ return bound0, bound1, false
+ }
+ // narrow the intervals upper bound to the clip bound.
+ bound0.Hi = clip.Hi
+ if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated {
+ return bound0, bound1, false
+ }
+ }
+ return bound0, bound1, true
+}
+
+// edgeIntersectsRect reports whether the edge defined by AB intersects the
+// given closed rectangle to within the error bound.
+func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
+ // First check whether the bounds of a Rect around AB intersects the given rect.
+ if !r.Intersects(r2.RectFromPoints(a, b)) {
+ return false
+ }
+
+ // Otherwise AB intersects the rect if and only if all four vertices of rect
+ // do not lie on the same side of the extended line AB. We test this by finding
+ // the two vertices of rect with minimum and maximum projections onto the normal
+ // of AB, and computing their dot products with the edge normal.
+ n := b.Sub(a).Ortho()
+
+ i := 0
+ if n.X >= 0 {
+ i = 1
+ }
+ j := 0
+ if n.Y >= 0 {
+ j = 1
+ }
+
+ max := n.Dot(r.VertexIJ(i, j).Sub(a))
+ min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
+
+ return (max >= 0) && (min <= 0)
+}
+
+// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
+// by AB intersected by clip. The resulting bound may be empty. This is a convenience
+// function built on top of clipEdgeBound.
+func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
+ bound := r2.RectFromPoints(a, b)
+ if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
+ return b1
+ }
+ return r2.EmptyRect()
+}
+
+// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
+// It represents the clipped edges by their bounding boxes rather than as a pair of
+// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
+// a tight bound of A'B'. This function returns the bound that is a tight bound
+// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
+// it returns false and the original bound.
+func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
+ // negSlope indicates which diagonal of the bounding box is spanned by AB: it
+ // is false if AB has positive slope, and true if AB has negative slope. This is
+ // used to determine which interval endpoints need to be updated each time
+ // the edge is clipped.
+ negSlope := (a.X > b.X) != (a.Y > b.Y)
+
+ b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
+ if !up1 {
+ return bound, false
+ }
+ b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
+ if !up2 {
+ return r2.Rect{b0x, b0y}, false
+ }
+ return r2.Rect{X: b1x, Y: b1y}, true
+}
+
+// interpolateFloat64 returns a value with the same combination of a1 and b1 as the
+// given value x is of a and b. This function makes the following guarantees:
+// - If x == a, then x1 = a1 (exactly).
+// - If x == b, then x1 = b1 (exactly).
+// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
+// This requires a != b.
+func interpolateFloat64(x, a, b, a1, b1 float64) float64 {
+ // To get results that are accurate near both A and B, we interpolate
+ // starting from the closer of the two points.
+ if math.Abs(a-x) <= math.Abs(b-x) {
+ return a1 + (b1-a1)*(x-a)/(b-a)
+ }
+ return b1 + (a1-b1)*(x-b)/(a-b)
+}
+
+// FaceSegment represents an edge AB clipped to an S2 cube face. It is
+// represented by a face index and a pair of (u,v) coordinates.
+type FaceSegment struct {
+ face int
+ a, b r2.Point
+}
+
+// FaceSegments subdivides the given edge AB at every point where it crosses the
+// boundary between two S2 cube faces and returns the corresponding FaceSegments.
+// The segments are returned in order from A toward B. The input points must be
+// unit length.
+//
+// This function guarantees that the returned segments form a continuous path
+// from A to B, and that all vertices are within faceClipErrorUVDist of the
+// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles.
+// The results are consistent with Sign, i.e. the edge is well-defined even its
+// endpoints are antipodal.
+// TODO(roberts): Extend the implementation of PointCross so that this is true.
+func FaceSegments(a, b Point) []FaceSegment {
+ var segment FaceSegment
+
+ // Fast path: both endpoints are on the same face.
+ var aFace, bFace int
+ aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector)
+ bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector)
+ if aFace == bFace {
+ segment.face = aFace
+ return []FaceSegment{segment}
+ }
+
+ // Starting at A, we follow AB from face to face until we reach the face
+ // containing B. The following code is designed to ensure that we always
+ // reach B, even in the presence of numerical errors.
+ //
+ // First we compute the normal to the plane containing A and B. This normal
+ // becomes the ultimate definition of the line AB; it is used to resolve all
+ // questions regarding where exactly the line goes. Unfortunately due to
+ // numerical errors, the line may not quite intersect the faces containing
+ // the original endpoints. We handle this by moving A and/or B slightly if
+ // necessary so that they are on faces intersected by the line AB.
+ ab := a.PointCross(b)
+
+ aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a)
+ bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b)
+
+ // Now we simply follow AB from face to face until we reach B.
+ var segments []FaceSegment
+ segment.face = aFace
+ bSaved := segment.b
+
+ for face := aFace; face != bFace; {
+ // Complete the current segment by finding the point where AB
+ // exits the current face.
+ z := faceXYZtoUVW(face, ab)
+ n := pointUVW{z.Vector}
+
+ exitAxis := n.exitAxis()
+ segment.b = n.exitPoint(exitAxis)
+ segments = append(segments, segment)
+
+ // Compute the next face intersected by AB, and translate the exit
+ // point of the current segment into the (u,v) coordinates of the
+ // next face. This becomes the first point of the next segment.
+ exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y)
+ face = nextFace(face, segment.b, exitAxis, n, bFace)
+ exitUvw := faceXYZtoUVW(face, Point{exitXyz})
+ segment.face = face
+ segment.a = r2.Point{exitUvw.X, exitUvw.Y}
+ }
+ // Finish the last segment.
+ segment.b = bSaved
+ return append(segments, segment)
+}
+
+// moveOriginToValidFace updates the origin point to a valid face if necessary.
+// Given a line segment AB whose origin A has been projected onto a given cube
+// face, determine whether it is necessary to project A onto a different face
+// instead. This can happen because the normal of the line AB is not computed
+// exactly, so that the line AB (defined as the set of points perpendicular to
+// the normal) may not intersect the cube face containing A. Even if it does
+// intersect the face, the exit point of the line from that face may be on
+// the wrong side of A (i.e., in the direction away from B). If this happens,
+// we reproject A onto the adjacent face where the line AB approaches A most
+// closely. This moves the origin by a small amount, but never more than the
+// error tolerances.
+func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) {
+ // Fast path: if the origin is sufficiently far inside the face, it is
+ // always safe to use it.
+ const maxSafeUVCoord = 1 - faceClipErrorUVCoord
+ if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord {
+ return face, aUV
+ }
+
+ // Otherwise check whether the normal AB even intersects this face.
+ z := faceXYZtoUVW(face, ab)
+ n := pointUVW{z.Vector}
+ if n.intersectsFace() {
+ // Check whether the point where the line AB exits this face is on the
+ // wrong side of A (by more than the acceptable error tolerance).
+ uv := n.exitPoint(n.exitAxis())
+ exit := faceUVToXYZ(face, uv.X, uv.Y)
+ aTangent := ab.Normalize().Cross(a.Vector)
+
+ // We can use the given face.
+ if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians {
+ return face, aUV
+ }
+ }
+
+ // Otherwise we reproject A to the nearest adjacent face. (If line AB does
+ // not pass through a given face, it must pass through all adjacent faces.)
+ var dir int
+ if math.Abs((aUV).X) >= math.Abs((aUV).Y) {
+ // U-axis
+ if aUV.X > 0 {
+ dir = 1
+ }
+ face = uvwFace(face, 0, dir)
+ } else {
+ // V-axis
+ if aUV.Y > 0 {
+ dir = 1
+ }
+ face = uvwFace(face, 1, dir)
+ }
+
+ aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector)
+ aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X))
+ aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y))
+
+ return face, aUV
+}
+
+// nextFace returns the next face that should be visited by FaceSegments, given that
+// we have just visited face and we are following the line AB (represented
+// by its normal N in the (u,v,w) coordinates of that face). The other
+// arguments include the point where AB exits face, the corresponding
+// exit axis, and the target face containing the destination point B.
+func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int {
+ // this bit is to work around C++ cleverly casting bools to ints for you.
+ exitA := exit.X
+ exit1MinusA := exit.Y
+
+ if axis == axisV {
+ exitA = exit.Y
+ exit1MinusA = exit.X
+ }
+ exitAPos := 0
+ if exitA > 0 {
+ exitAPos = 1
+ }
+ exit1MinusAPos := 0
+ if exit1MinusA > 0 {
+ exit1MinusAPos = 1
+ }
+
+ // We return the face that is adjacent to the exit point along the given
+ // axis. If line AB exits *exactly* through a corner of the face, there are
+ // two possible next faces. If one is the target face containing B, then
+ // we guarantee that we advance to that face directly.
+ //
+ // The three conditions below check that (1) AB exits approximately through
+ // a corner, (2) the adjacent face along the non-exit axis is the target
+ // face, and (3) AB exits *exactly* through the corner. (The sumEqual
+ // code checks whether the dot product of (u,v,1) and n is exactly zero.)
+ if math.Abs(exit1MinusA) == 1 &&
+ uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace &&
+ sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) {
+ return targetFace
+ }
+
+ // Otherwise return the face that is adjacent to the exit point in the
+ // direction of the exit axis.
+ return uvwFace(face, int(axis), exitAPos)
+}
diff --git a/vendor/github.com/golang/geo/s2/edge_crosser.go b/vendor/github.com/golang/geo/s2/edge_crosser.go
new file mode 100644
index 000000000..69c6da6b9
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_crosser.go
@@ -0,0 +1,227 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+)
+
+// EdgeCrosser allows edges to be efficiently tested for intersection with a
+// given fixed edge AB. It is especially efficient when testing for
+// intersection with an edge chain connecting vertices v0, v1, v2, ...
+//
+// Example usage:
+//
+// func CountIntersections(a, b Point, edges []Edge) int {
+// count := 0
+// crosser := NewEdgeCrosser(a, b)
+// for _, edge := range edges {
+// if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross {
+// count++
+// }
+// }
+// return count
+// }
+//
+type EdgeCrosser struct {
+ a Point
+ b Point
+ aXb Point
+
+ // To reduce the number of calls to expensiveSign, we compute an
+ // outward-facing tangent at A and B if necessary. If the plane
+ // perpendicular to one of these tangents separates AB from CD (i.e., one
+ // edge on each side) then there is no intersection.
+ aTangent Point // Outward-facing tangent at A.
+ bTangent Point // Outward-facing tangent at B.
+
+ // The fields below are updated for each vertex in the chain.
+ c Point // Previous vertex in the vertex chain.
+ acb Direction // The orientation of triangle ACB.
+}
+
+// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
+func NewEdgeCrosser(a, b Point) *EdgeCrosser {
+ norm := a.PointCross(b)
+ return &EdgeCrosser{
+ a: a,
+ b: b,
+ aXb: Point{a.Cross(b.Vector)},
+ aTangent: Point{a.Cross(norm.Vector)},
+ bTangent: Point{norm.Cross(b.Vector)},
+ }
+}
+
+// CrossingSign reports whether the edge AB intersects the edge CD. If any two
+// vertices from different edges are the same, returns MaybeCross. If either edge
+// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross.
+//
+// Properties of CrossingSign:
+//
+// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
+// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
+// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
+// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
+//
+// Note that if you want to check an edge against a chain of other edges,
+// it is slightly more efficient to use the single-argument version
+// ChainCrossingSign below.
+func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
+ if c != e.c {
+ e.RestartAt(c)
+ }
+ return e.ChainCrossingSign(d)
+}
+
+// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
+// CD share a vertex and VertexCrossing(a, b, c, d) is true.
+//
+// This method extends the concept of a "crossing" to the case where AB
+// and CD have a vertex in common. The two edges may or may not cross,
+// according to the rules defined in VertexCrossing above. The rules
+// are designed so that point containment tests can be implemented simply
+// by counting edge crossings. Similarly, determining whether one edge
+// chain crosses another edge chain can be implemented by counting.
+func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
+ if c != e.c {
+ e.RestartAt(c)
+ }
+ return e.EdgeOrVertexChainCrossing(d)
+}
+
+// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
+// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
+//
+// You don't need to use this or any of the chain functions unless you're trying to
+// squeeze out every last drop of performance. Essentially all you are saving is a test
+// whether the first vertex of the current edge is the same as the second vertex of the
+// previous edge.
+func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
+ e := NewEdgeCrosser(a, b)
+ e.RestartAt(c)
+ return e
+}
+
+// RestartAt sets the current point of the edge crosser to be c.
+// Call this method when your chain 'jumps' to a new place.
+// The argument must point to a value that persists until the next call.
+func (e *EdgeCrosser) RestartAt(c Point) {
+ e.c = c
+ e.acb = -triageSign(e.a, e.b, e.c)
+}
+
+// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
+// the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
+ // For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
+ // all be oriented the same way (CW or CCW). We keep the orientation of ACB
+ // as part of our state. When each new point D arrives, we compute the
+ // orientation of BDA and check whether it matches ACB. This checks whether
+ // the points C and D are on opposite sides of the great circle through AB.
+
+ // Recall that triageSign is invariant with respect to rotating its
+ // arguments, i.e. ABD has the same orientation as BDA.
+ bda := triageSign(e.a, e.b, d)
+ if e.acb == -bda && bda != Indeterminate {
+ // The most common case -- triangles have opposite orientations. Save the
+ // current vertex D as the next vertex C, and also save the orientation of
+ // the new triangle ACB (which is opposite to the current triangle BDA).
+ e.c = d
+ e.acb = -bda
+ return DoNotCross
+ }
+ return e.crossingSign(d, bda)
+}
+
+// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
+// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
+ // We need to copy e.c since it is clobbered by ChainCrossingSign.
+ c := e.c
+ switch e.ChainCrossingSign(d) {
+ case DoNotCross:
+ return false
+ case Cross:
+ return true
+ }
+ return VertexCrossing(e.a, e.b, c, d)
+}
+
+// crossingSign handle the slow path of CrossingSign.
+func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
+ // Compute the actual result, and then save the current vertex D as the next
+ // vertex C, and save the orientation of the next triangle ACB (which is
+ // opposite to the current triangle BDA).
+ defer func() {
+ e.c = d
+ e.acb = -bda
+ }()
+
+ // At this point, a very common situation is that A,B,C,D are four points on
+ // a line such that AB does not overlap CD. (For example, this happens when
+ // a line or curve is sampled finely, or when geometry is constructed by
+ // computing the union of S2CellIds.) Most of the time, we can determine
+ // that AB and CD do not intersect using the two outward-facing
+ // tangents at A and B (parallel to AB) and testing whether AB and CD are on
+ // opposite sides of the plane perpendicular to one of these tangents. This
+ // is moderately expensive but still much cheaper than expensiveSign.
+
+ // The error in RobustCrossProd is insignificant. The maximum error in
+ // the call to CrossProd (i.e., the maximum norm of the error vector) is
+ // (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
+ // DotProd below is dblEpsilon. (There is also a small relative error
+ // term that is insignificant because we are comparing the result against a
+ // constant that is very close to zero.)
+ maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
+ if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
+ return DoNotCross
+ }
+
+ // Otherwise, eliminate the cases where two vertices from different edges are
+ // equal. (These cases could be handled in the code below, but we would rather
+ // avoid calling ExpensiveSign if possible.)
+ if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
+ return MaybeCross
+ }
+
+ // Eliminate the cases where an input edge is degenerate. (Note that in
+ // most cases, if CD is degenerate then this method is not even called
+ // because acb and bda have different signs.)
+ if e.a == e.b || e.c == d {
+ return DoNotCross
+ }
+
+ // Otherwise it's time to break out the big guns.
+ if e.acb == Indeterminate {
+ e.acb = -expensiveSign(e.a, e.b, e.c)
+ }
+ if bda == Indeterminate {
+ bda = expensiveSign(e.a, e.b, d)
+ }
+
+ if bda != e.acb {
+ return DoNotCross
+ }
+
+ cbd := -RobustSign(e.c, d, e.b)
+ if cbd != e.acb {
+ return DoNotCross
+ }
+ dac := RobustSign(e.c, d, e.a)
+ if dac != e.acb {
+ return DoNotCross
+ }
+ return Cross
+}
diff --git a/vendor/github.com/golang/geo/s2/edge_crossings.go b/vendor/github.com/golang/geo/s2/edge_crossings.go
new file mode 100644
index 000000000..a98ec76ff
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_crossings.go
@@ -0,0 +1,396 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ // intersectionError can be set somewhat arbitrarily, because the algorithm
+ // uses more precision if necessary in order to achieve the specified error.
+ // The only strict requirement is that intersectionError >= dblEpsilon
+ // radians. However, using a larger error tolerance makes the algorithm more
+ // efficient because it reduces the number of cases where exact arithmetic is
+ // needed.
+ intersectionError = s1.Angle(8 * dblError)
+
+ // intersectionMergeRadius is used to ensure that intersection points that
+ // are supposed to be coincident are merged back together into a single
+ // vertex. This is required in order for various polygon operations (union,
+ // intersection, etc) to work correctly. It is twice the intersection error
+ // because two coincident intersection points might have errors in
+ // opposite directions.
+ intersectionMergeRadius = 2 * intersectionError
+)
+
+// A Crossing indicates how edges cross.
+type Crossing int
+
+const (
+ // Cross means the edges cross.
+ Cross Crossing = iota
+ // MaybeCross means two vertices from different edges are the same.
+ MaybeCross
+ // DoNotCross means the edges do not cross.
+ DoNotCross
+)
+
+func (c Crossing) String() string {
+ switch c {
+ case Cross:
+ return "Cross"
+ case MaybeCross:
+ return "MaybeCross"
+ case DoNotCross:
+ return "DoNotCross"
+ default:
+ return fmt.Sprintf("(BAD CROSSING %d)", c)
+ }
+}
+
+// CrossingSign reports whether the edge AB intersects the edge CD.
+// If AB crosses CD at a point that is interior to both edges, Cross is returned.
+// If any two vertices from different edges are the same it returns MaybeCross.
+// Otherwise it returns DoNotCross.
+// If either edge is degenerate (A == B or C == D), the return value is MaybeCross
+// if two vertices from different edges are the same and DoNotCross otherwise.
+//
+// Properties of CrossingSign:
+//
+// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
+// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
+// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
+// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
+//
+// This method implements an exact, consistent perturbation model such
+// that no three points are ever considered to be collinear. This means
+// that even if you have 4 points A, B, C, D that lie exactly in a line
+// (say, around the equator), C and D will be treated as being slightly to
+// one side or the other of AB. This is done in a way such that the
+// results are always consistent (see RobustSign).
+func CrossingSign(a, b, c, d Point) Crossing {
+ crosser := NewChainEdgeCrosser(a, b, c)
+ return crosser.ChainCrossingSign(d)
+}
+
+// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
+// containment tests can be implemented by counting the number of edge crossings.
+//
+// Given two edges AB and CD where at least two vertices are identical
+// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
+// occurs if AB is encountered after CD during a CCW sweep around the shared
+// vertex starting from a fixed reference point.
+//
+// Note that according to this rule, if AB crosses CD then in general CD
+// does not cross AB. However, this leads to the correct result when
+// counting polygon edge crossings. For example, suppose that A,B,C are
+// three consecutive vertices of a CCW polygon. If we now consider the edge
+// crossings of a segment BP as P sweeps around B, the crossing number
+// changes parity exactly when BP crosses BA or BC.
+//
+// Useful properties of VertexCrossing (VC):
+//
+// (1) VC(a,a,c,d) == VC(a,b,c,c) == false
+// (2) VC(a,b,a,b) == VC(a,b,b,a) == true
+// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
+// (3) If exactly one of a,b equals one of c,d, then exactly one of
+// VC(a,b,c,d) and VC(c,d,a,b) is true
+//
+// It is an error to call this method with 4 distinct vertices.
+func VertexCrossing(a, b, c, d Point) bool {
+ // If A == B or C == D there is no intersection. We need to check this
+ // case first in case 3 or more input points are identical.
+ if a == b || c == d {
+ return false
+ }
+
+ // If any other pair of vertices is equal, there is a crossing if and only
+ // if OrderedCCW indicates that the edge AB is further CCW around the
+ // shared vertex O (either A or B) than the edge CD, starting from an
+ // arbitrary fixed reference point.
+
+ // Optimization: if AB=CD or AB=DC, we can avoid most of the calculations.
+ switch {
+ case a == c:
+ return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a)
+ case b == d:
+ return OrderedCCW(Point{b.Ortho()}, c, a, b)
+ case a == d:
+ return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a)
+ case b == c:
+ return OrderedCCW(Point{b.Ortho()}, d, a, b)
+ }
+
+ return false
+}
+
+// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to
+// handle cases where all four vertices are distinct, and VertexCrossing to
+// handle cases where two or more vertices are the same. This defines a crossing
+// function such that point-in-polygon containment tests can be implemented
+// by simply counting edge crossings.
+func EdgeOrVertexCrossing(a, b, c, d Point) bool {
+ switch CrossingSign(a, b, c, d) {
+ case DoNotCross:
+ return false
+ case Cross:
+ return true
+ default:
+ return VertexCrossing(a, b, c, d)
+ }
+}
+
+// Intersection returns the intersection point of two edges AB and CD that cross
+// (CrossingSign(a,b,c,d) == Crossing).
+//
+// Useful properties of Intersection:
+//
+// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
+// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
+//
+// The returned intersection point X is guaranteed to be very close to the
+// true intersection point of AB and CD, even if the edges intersect at a
+// very small angle.
+func Intersection(a0, a1, b0, b1 Point) Point {
+ // It is difficult to compute the intersection point of two edges accurately
+ // when the angle between the edges is very small. Previously we handled
+ // this by only guaranteeing that the returned intersection point is within
+ // intersectionError of each edge. However, this means that when the edges
+ // cross at a very small angle, the computed result may be very far from the
+ // true intersection point.
+ //
+ // Instead this function now guarantees that the result is always within
+ // intersectionError of the true intersection. This requires using more
+ // sophisticated techniques and in some cases extended precision.
+ //
+ // - intersectionStable computes the intersection point using
+ // projection and interpolation, taking care to minimize cancellation
+ // error.
+ //
+ // - intersectionExact computes the intersection point using precision
+ // arithmetic and converts the final result back to an Point.
+ pt, ok := intersectionStable(a0, a1, b0, b1)
+ if !ok {
+ pt = intersectionExact(a0, a1, b0, b1)
+ }
+
+ // Make sure the intersection point is on the correct side of the sphere.
+ // Since all vertices are unit length, and edges are less than 180 degrees,
+ // (a0 + a1) and (b0 + b1) both have positive dot product with the
+ // intersection point. We use the sum of all vertices to make sure that the
+ // result is unchanged when the edges are swapped or reversed.
+ if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 {
+ pt = Point{pt.Mul(-1)}
+ }
+
+ return pt
+}
+
+// Computes the cross product of two vectors, normalized to be unit length.
+// Also returns the length of the cross
+// product before normalization, which is useful for estimating the amount of
+// error in the result. For numerical stability, the vectors should both be
+// approximately unit length.
+func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) {
+ var pt r3.Vector
+ // This computes 2 * (x.Cross(y)), but has much better numerical
+ // stability when x and y are unit length.
+ tmp := x.Sub(y).Cross(x.Add(y))
+ length := tmp.Norm()
+ if length != 0 {
+ pt = tmp.Mul(1 / length)
+ }
+ return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y))
+}
+
+/*
+// intersectionSimple is not used by the C++ so it is skipped here.
+*/
+
+// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound
+// on the error in the result. aNorm is not necessarily unit length.
+//
+// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints
+// a0 and a1) allow this dot product to be computed more accurately and efficiently.
+func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) {
+ // The error in the dot product is proportional to the lengths of the input
+ // vectors, so rather than using x itself (a unit-length vector) we use
+ // the vectors from x to the closer of the two edge endpoints. This
+ // typically reduces the error by a huge factor.
+ x0 := x.Sub(a0.Vector)
+ x1 := x.Sub(a1.Vector)
+ x0Dist2 := x0.Norm2()
+ x1Dist2 := x1.Norm2()
+
+ // If both distances are the same, we need to be careful to choose one
+ // endpoint deterministically so that the result does not change if the
+ // order of the endpoints is reversed.
+ var dist float64
+ if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) {
+ dist = math.Sqrt(x0Dist2)
+ proj = x0.Dot(aNorm)
+ } else {
+ dist = math.Sqrt(x1Dist2)
+ proj = x1.Dot(aNorm)
+ }
+
+ // This calculation bounds the error from all sources: the computation of
+ // the normal, the subtraction of one endpoint, and the dot product itself.
+ // dblError appears because the input points are assumed to be
+ // normalized in double precision.
+ //
+ // For reference, the bounds that went into this calculation are:
+ // ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon
+ // |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
+ // ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
+ bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon
+ return proj, bound
+}
+
+// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total
+// ordering on edges that is invariant under edge reversals.
+func compareEdges(a0, a1, b0, b1 Point) bool {
+ if a0.Cmp(a1.Vector) != -1 {
+ a0, a1 = a1, a0
+ }
+ if b0.Cmp(b1.Vector) != -1 {
+ b0, b1 = b1, b0
+ }
+ return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1)
+}
+
+// intersectionStable returns the intersection point of the edges (a0,a1) and
+// (b0,b1) if it can be computed to within an error of at most intersectionError
+// by this function.
+//
+// The intersection point is not guaranteed to have the correct sign because we
+// choose to use the longest of the two edges first. The sign is corrected by
+// Intersection.
+func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) {
+ // Sort the two edges so that (a0,a1) is longer, breaking ties in a
+ // deterministic way that does not depend on the ordering of the endpoints.
+ // This is desirable for two reasons:
+ // - So that the result doesn't change when edges are swapped or reversed.
+ // - It reduces error, since the first edge is used to compute the edge
+ // normal (where a longer edge means less error), and the second edge
+ // is used for interpolation (where a shorter edge means less error).
+ aLen2 := a1.Sub(a0.Vector).Norm2()
+ bLen2 := b1.Sub(b0.Vector).Norm2()
+ if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) {
+ return intersectionStableSorted(b0, b1, a0, a1)
+ }
+ return intersectionStableSorted(a0, a1, b0, b1)
+}
+
+// intersectionStableSorted is a helper function for intersectionStable.
+// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that
+// the first edge passed in is longer.
+func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
+ var pt Point
+
+ // Compute the normal of the plane through (a0, a1) in a stable way.
+ aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector))
+ aNormLen := aNorm.Norm()
+ bLen := b1.Sub(b0.Vector).Norm()
+
+ // Compute the projection (i.e., signed distance) of b0 and b1 onto the
+ // plane through (a0, a1). Distances are scaled by the length of aNorm.
+ b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1)
+ b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1)
+
+ // The total distance from b0 to b1 measured perpendicularly to (a0,a1) is
+ // |b0Dist - b1Dist|. Note that b0Dist and b1Dist generally have
+ // opposite signs because b0 and b1 are on opposite sides of (a0, a1). The
+ // code below finds the intersection point by interpolating along the edge
+ // (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist).
+ //
+ // It can be shown that the maximum error in the interpolation fraction is
+ //
+ // (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum))
+ //
+ // We save ourselves some work by scaling the result and the error bound by
+ // "distSum", since the result is normalized to be unit length anyway.
+ distSum := math.Abs(b0Dist - b1Dist)
+ errorSum := b0Error + b1Error
+ if distSum <= errorSum {
+ return pt, false // Error is unbounded in this case.
+ }
+
+ x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist))
+ err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/
+ (distSum-errorSum) + 2*distSum*epsilon
+
+ // Finally we normalize the result, compute the corresponding error, and
+ // check whether the total error is acceptable.
+ xLen := x.Norm()
+ maxError := intersectionError
+ if err > (float64(maxError)-epsilon)*xLen {
+ return pt, false
+ }
+
+ return Point{x.Mul(1 / xLen)}, true
+}
+
+// intersectionExact returns the intersection point of (a0, a1) and (b0, b1)
+// using precise arithmetic. Note that the result is not exact because it is
+// rounded down to double precision at the end. Also, the intersection point
+// is not guaranteed to have the correct sign (i.e., the return value may need
+// to be negated).
+func intersectionExact(a0, a1, b0, b1 Point) Point {
+ // Since we are using presice arithmetic, we don't need to worry about
+ // numerical stability.
+ a0P := r3.PreciseVectorFromVector(a0.Vector)
+ a1P := r3.PreciseVectorFromVector(a1.Vector)
+ b0P := r3.PreciseVectorFromVector(b0.Vector)
+ b1P := r3.PreciseVectorFromVector(b1.Vector)
+ aNormP := a0P.Cross(a1P)
+ bNormP := b0P.Cross(b1P)
+ xP := aNormP.Cross(bNormP)
+
+ // The final Normalize() call is done in double precision, which creates a
+ // directional error of up to 2*dblError. (Precise conversion and Normalize()
+ // each contribute up to dblError of directional error.)
+ x := xP.Vector()
+
+ if x == (r3.Vector{}) {
+ // The two edges are exactly collinear, but we still consider them to be
+ // "crossing" because of simulation of simplicity. Out of the four
+ // endpoints, exactly two lie in the interior of the other edge. Of
+ // those two we return the one that is lexicographically smallest.
+ x = r3.Vector{10, 10, 10} // Greater than any valid S2Point
+
+ aNorm := Point{aNormP.Vector()}
+ bNorm := Point{bNormP.Vector()}
+ if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 {
+ return a0
+ }
+ if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 {
+ return a1
+ }
+ if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 {
+ return b0
+ }
+ if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 {
+ return b1
+ }
+ }
+
+ return Point{x}
+}
diff --git a/vendor/github.com/golang/geo/s2/edge_distances.go b/vendor/github.com/golang/geo/s2/edge_distances.go
new file mode 100644
index 000000000..ca197af1d
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_distances.go
@@ -0,0 +1,408 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file defines a collection of methods for computing the distance to an edge,
+// interpolating along an edge, projecting points onto edges, etc.
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// DistanceFromSegment returns the distance of point X from line segment AB.
+// The points are expected to be normalized. The result is very accurate for small
+// distances but may have some numerical error if the distance is large
+// (approximately pi/2 or greater). The case A == B is handled correctly.
+func DistanceFromSegment(x, a, b Point) s1.Angle {
+ var minDist s1.ChordAngle
+ minDist, _ = updateMinDistance(x, a, b, minDist, true)
+ return minDist.Angle()
+}
+
+// IsDistanceLess reports whether the distance from X to the edge AB is less
+// than limit. (For less than or equal to, specify limit.Successor()).
+// This method is faster than DistanceFromSegment(). If you want to
+// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
+// once and save the value, since this conversion is relatively expensive.
+func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
+ _, less := UpdateMinDistance(x, a, b, limit)
+ return less
+}
+
+// UpdateMinDistance checks if the distance from X to the edge AB is less
+// than minDist, and if so, returns the updated value and true.
+// The case A == B is handled correctly.
+//
+// Use this method when you want to compute many distances and keep track of
+// the minimum. It is significantly faster than using DistanceFromSegment
+// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it
+// can save a lot of work by not actually computing the distance when it is
+// obviously larger than the current minimum.
+func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+ return updateMinDistance(x, a, b, minDist, false)
+}
+
+// UpdateMaxDistance checks if the distance from X to the edge AB is greater
+// than maxDist, and if so, returns the updated value and true.
+// Otherwise it returns false. The case A == B is handled correctly.
+func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
+ dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b))
+ if dist > s1.RightChordAngle {
+ dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true)
+ dist = s1.StraightChordAngle - dist
+ }
+ if maxDist < dist {
+ return dist, true
+ }
+
+ return maxDist, false
+}
+
+// IsInteriorDistanceLess reports whether the minimum distance from X to the edge
+// AB is attained at an interior point of AB (i.e., not an endpoint), and that
+// distance is less than limit. (Specify limit.Successor() for less than or equal to).
+func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
+ _, less := UpdateMinInteriorDistance(x, a, b, limit)
+ return less
+}
+
+// UpdateMinInteriorDistance reports whether the minimum distance from X to AB
+// is attained at an interior point of AB (i.e., not an endpoint), and that distance
+// is less than minDist. If so, the value of minDist is updated and true is returned.
+// Otherwise it is unchanged and returns false.
+func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+ return interiorDist(x, a, b, minDist, false)
+}
+
+// Project returns the point along the edge AB that is closest to the point X.
+// The fractional distance of this point along the edge AB can be obtained
+// using DistanceFraction.
+//
+// This requires that all points are unit length.
+func Project(x, a, b Point) Point {
+ aXb := a.PointCross(b)
+ // Find the closest point to X along the great circle through AB.
+ p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
+
+ // If this point is on the edge AB, then it's the closest point.
+ if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
+ return Point{p.Normalize()}
+ }
+
+ // Otherwise, the closest point is either A or B.
+ if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
+ return a
+ }
+ return b
+}
+
+// DistanceFraction returns the distance ratio of the point X along an edge AB.
+// If X is on the line segment AB, this is the fraction T such
+// that X == Interpolate(T, A, B).
+//
+// This requires that A and B are distinct.
+func DistanceFraction(x, a, b Point) float64 {
+ d0 := x.Angle(a.Vector)
+ d1 := x.Angle(b.Vector)
+ return float64(d0 / (d0 + d1))
+}
+
+// Interpolate returns the point X along the line segment AB whose distance from A
+// is the given fraction "t" of the distance AB. Does NOT require that "t" be
+// between 0 and 1. Note that all distances are measured on the surface of
+// the sphere, so this is more complicated than just computing (1-t)*a + t*b
+// and normalizing the result.
+func Interpolate(t float64, a, b Point) Point {
+ if t == 0 {
+ return a
+ }
+ if t == 1 {
+ return b
+ }
+ ab := a.Angle(b.Vector)
+ return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
+}
+
+// InterpolateAtDistance returns the point X along the line segment AB whose
+// distance from A is the angle ax.
+func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
+ aRad := ax.Radians()
+
+ // Use PointCross to compute the tangent vector at A towards B. The
+ // result is always perpendicular to A, even if A=B or A=-B, but it is not
+ // necessarily unit length. (We effectively normalize it below.)
+ normal := a.PointCross(b)
+ tangent := normal.Vector.Cross(a.Vector)
+
+ // Now compute the appropriate linear combination of A and "tangent". With
+ // infinite precision the result would always be unit length, but we
+ // normalize it anyway to ensure that the error is within acceptable bounds.
+ // (Otherwise errors can build up when the result of one interpolation is
+ // fed into another interpolation.)
+ return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
+}
+
+// minUpdateDistanceMaxError returns the maximum error in the result of
+// UpdateMinDistance (and the associated functions such as
+// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all
+// input points are normalized to within the bounds guaranteed by r3.Vector's
+// Normalize. The error can be added or subtracted from an s1.ChordAngle
+// using its Expanded method.
+func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 {
+ // There are two cases for the maximum error in UpdateMinDistance(),
+ // depending on whether the closest point is interior to the edge.
+ return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError())
+}
+
+// minUpdateInteriorDistanceMaxError returns the maximum error in the result of
+// UpdateMinInteriorDistance, assuming that all input points are normalized
+// to within the bounds guaranteed by Point's Normalize. The error can be added
+// or subtracted from an s1.ChordAngle using its Expanded method.
+//
+// Note that accuracy goes down as the distance approaches 0 degrees or 180
+// degrees (for different reasons). Near 0 degrees the error is acceptable
+// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For
+// exactly antipodal points the maximum error is quite high (0.5 meters),
+// but this error drops rapidly as the points move away from antipodality
+// (approximately 1 millimeter for points that are 50 meters from antipodal,
+// and 1 micrometer for points that are 50km from antipodal).
+//
+// TODO(roberts): Currently the error bound does not hold for edges whose endpoints
+// are antipodal to within about 1e-15 radians (less than 1 micron). This could
+// be fixed by extending PointCross to use higher precision when necessary.
+func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
+ // If a point is more than 90 degrees from an edge, then the minimum
+ // distance is always to one of the endpoints, not to the edge interior.
+ if dist >= s1.RightChordAngle {
+ return 0.0
+ }
+
+ // This bound includes all source of error, assuming that the input points
+ // are normalized. a and b are components of chord length that are
+ // perpendicular and parallel to a plane containing the edge respectively.
+ b := math.Min(1.0, 0.5*float64(dist))
+ a := math.Sqrt(b * (2 - b))
+ return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
+ (2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
+ (23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon
+}
+
+// updateMinDistance computes the distance from a point X to a line segment AB,
+// and if either the distance was less than the given minDist, or alwaysUpdate is
+// true, the value and whether it was updated are returned.
+func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
+ if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok {
+ // Minimum distance is attained along the edge interior.
+ return d, true
+ }
+
+ // Otherwise the minimum distance is to one of the endpoints.
+ xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+ dist := s1.ChordAngle(math.Min(xa2, xb2))
+ if !alwaysUpdate && dist >= minDist {
+ return minDist, false
+ }
+ return dist, true
+}
+
+// interiorDist returns the shortest distance from point x to edge ab, assuming
+// that the closest point to X is interior to AB. If the closest point is not
+// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to
+// false, the distance is only updated when the value exceeds certain the given minDist.
+func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
+ // Chord distance of x to both end points a and b.
+ xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+
+ // The closest point on AB could either be one of the two vertices (the
+ // vertex case) or in the interior (the interior case). Let C = A x B.
+ // If X is in the spherical wedge extending from A to B around the axis
+ // through C, then we are in the interior case. Otherwise we are in the
+ // vertex case.
+ //
+ // Check whether we might be in the interior case. For this to be true, XAB
+ // and XBA must both be acute angles. Checking this condition exactly is
+ // expensive, so instead we consider the planar triangle ABX (which passes
+ // through the sphere's interior). The planar angles XAB and XBA are always
+ // less than the corresponding spherical angles, so if we are in the
+ // interior case then both of these angles must be acute.
+ //
+ // We check this by computing the squared edge lengths of the planar
+ // triangle ABX, and testing whether angles XAB and XBA are both acute using
+ // the law of cosines:
+ //
+ // | XA^2 - XB^2 | < AB^2 (*)
+ //
+ // This test must be done conservatively (taking numerical errors into
+ // account) since otherwise we might miss a situation where the true minimum
+ // distance is achieved by a point on the edge interior.
+ //
+ // There are two sources of error in the expression above (*). The first is
+ // that points are not normalized exactly; they are only guaranteed to be
+ // within 2 * dblEpsilon of unit length. Under the assumption that the two
+ // sides of (*) are nearly equal, the total error due to normalization errors
+ // can be shown to be at most
+ //
+ // 2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
+ //
+ // The other source of error is rounding of results in the calculation of (*).
+ // Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon,
+ // plus an additional relative error of 0.5 * dblEpsilon in the final
+ // subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 +
+ // AB^2) for convenience. This yields a final error bound of
+ //
+ // 4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 .
+ ab2 := a.Sub(b.Vector).Norm2()
+ maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon)
+ if math.Abs(xa2-xb2) >= ab2+maxError {
+ return minDist, false
+ }
+
+ // The minimum distance might be to a point on the edge interior. Let R
+ // be closest point to X that lies on the great circle through AB. Rather
+ // than computing the geodesic distance along the surface of the sphere,
+ // instead we compute the "chord length" through the sphere's interior.
+ //
+ // The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
+ // is the point X projected onto the plane through the great circle AB.
+ // The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
+ // We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
+ // is faster and the corresponding distance on the Earth's surface is
+ // accurate to within 1% for distances up to about 1800km.
+ c := a.PointCross(b)
+ c2 := c.Norm2()
+ xDotC := x.Dot(c.Vector)
+ xDotC2 := xDotC * xDotC
+ if !alwaysUpdate && xDotC2 > c2*float64(minDist) {
+ // The closest point on the great circle AB is too far away. We need to
+ // test this using ">" rather than ">=" because the actual minimum bound
+ // on the distance is (xDotC2 / c2), which can be rounded differently
+ // than the (more efficient) multiplicative test above.
+ return minDist, false
+ }
+
+ // Otherwise we do the exact, more expensive test for the interior case.
+ // This test is very likely to succeed because of the conservative planar
+ // test we did initially.
+ //
+ // TODO(roberts): Ensure that the errors in test are accurately reflected in the
+ // minUpdateInteriorDistanceMaxError.
+ cx := c.Cross(x.Vector)
+ if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 {
+ return minDist, false
+ }
+
+ // Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
+ // This calculation has good accuracy for all chord lengths since it
+ // is based on both the dot product and cross product (rather than
+ // deriving one from the other). However, note that the chord length
+ // representation itself loses accuracy as the angle approaches π.
+ qr := 1 - math.Sqrt(cx.Norm2()/c2)
+ dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr))
+
+ if !alwaysUpdate && dist >= minDist {
+ return minDist, false
+ }
+
+ return dist, true
+}
+
+// updateEdgePairMinDistance computes the minimum distance between the given
+// pair of edges. If the two edges cross, the distance is zero. The cases
+// a0 == a1 and b0 == b1 are handled correctly.
+func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
+ if minDist == 0 {
+ return 0, false
+ }
+ if CrossingSign(a0, a1, b0, b1) == Cross {
+ minDist = 0
+ return 0, true
+ }
+
+ // Otherwise, the minimum distance is achieved at an endpoint of at least
+ // one of the two edges. We ensure that all four possibilities are always checked.
+ //
+ // The calculation below computes each of the six vertex-vertex distances
+ // twice (this could be optimized).
+ var ok1, ok2, ok3, ok4 bool
+ minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist)
+ minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist)
+ minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist)
+ minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist)
+ return minDist, ok1 || ok2 || ok3 || ok4
+}
+
+// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges.
+// If one edge crosses the antipodal reflection of the other, the distance is pi.
+func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
+ if maxDist == s1.StraightChordAngle {
+ return s1.StraightChordAngle, false
+ }
+ if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross {
+ return s1.StraightChordAngle, true
+ }
+
+ // Otherwise, the maximum distance is achieved at an endpoint of at least
+ // one of the two edges. We ensure that all four possibilities are always checked.
+ //
+ // The calculation below computes each of the six vertex-vertex distances
+ // twice (this could be optimized).
+ var ok1, ok2, ok3, ok4 bool
+ maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist)
+ maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist)
+ maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist)
+ maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist)
+ return maxDist, ok1 || ok2 || ok3 || ok4
+}
+
+// EdgePairClosestPoints returns the pair of points (a, b) that achieves the
+// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and
+// b is a point on b0b1. If the two edges intersect, a and b are both equal to
+// the intersection point. Handles a0 == a1 and b0 == b1 correctly.
+func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
+ if CrossingSign(a0, a1, b0, b1) == Cross {
+ x := Intersection(a0, a1, b0, b1)
+ return x, x
+ }
+ // We save some work by first determining which vertex/edge pair achieves
+ // the minimum distance, and then computing the closest point on that edge.
+ var minDist s1.ChordAngle
+ var ok bool
+
+ minDist, ok = updateMinDistance(a0, b0, b1, minDist, true)
+ closestVertex := 0
+ if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok {
+ closestVertex = 1
+ }
+ if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok {
+ closestVertex = 2
+ }
+ if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
+ closestVertex = 3
+ }
+ switch closestVertex {
+ case 0:
+ return a0, Project(a0, b0, b1)
+ case 1:
+ return a1, Project(a1, b0, b1)
+ case 2:
+ return Project(b0, a0, a1), b0
+ case 3:
+ return Project(b1, a0, a1), b1
+ default:
+ panic("illegal case reached")
+ }
+}
diff --git a/vendor/github.com/golang/geo/s2/edge_query.go b/vendor/github.com/golang/geo/s2/edge_query.go
new file mode 100644
index 000000000..3942c2bc5
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_query.go
@@ -0,0 +1,512 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "sort"
+
+ "github.com/golang/geo/s1"
+)
+
+// EdgeQueryOptions holds the options for controlling how EdgeQuery operates.
+//
+// Options can be chained together builder-style:
+//
+// opts = NewClosestEdgeQueryOptions().
+// MaxResults(1).
+// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)).
+// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree))
+// query = NewClosestEdgeQuery(index, opts)
+//
+// or set individually:
+//
+// opts = NewClosestEdgeQueryOptions()
+// opts.IncludeInteriors(true)
+//
+// or just inline:
+//
+// query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3))
+//
+// If you pass a nil as the options you get the default values for the options.
+type EdgeQueryOptions struct {
+ common *queryOptions
+}
+
+// DistanceLimit specifies that only edges whose distance to the target is
+// within, this distance should be returned. Edges whose distance is equal
+// are not returned. To include values that are equal, specify the limit with
+// the next largest representable distance. i.e. limit.Successor().
+func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions {
+ e.common = e.common.DistanceLimit(limit)
+ return e
+}
+
+// IncludeInteriors specifies whether polygon interiors should be
+// included when measuring distances.
+func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions {
+ e.common = e.common.IncludeInteriors(x)
+ return e
+}
+
+// UseBruteForce sets or disables the use of brute force in a query.
+func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions {
+ e.common = e.common.UseBruteForce(x)
+ return e
+}
+
+// MaxError specifies that edges up to dist away than the true
+// matching edges may be substituted in the result set, as long as such
+// edges satisfy all the remaining search criteria (such as DistanceLimit).
+// This option only has an effect if MaxResults is also specified;
+// otherwise all edges closer than MaxDistance will always be returned.
+func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions {
+ e.common = e.common.MaxError(dist)
+ return e
+}
+
+// MaxResults specifies that at most MaxResults edges should be returned.
+// This must be at least 1.
+func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions {
+ e.common = e.common.MaxResults(n)
+ return e
+}
+
+// NewClosestEdgeQueryOptions returns a set of edge query options suitable
+// for performing closest edge queries.
+func NewClosestEdgeQueryOptions() *EdgeQueryOptions {
+ return &EdgeQueryOptions{
+ common: newQueryOptions(minDistance(0)),
+ }
+}
+
+// NewFurthestEdgeQueryOptions returns a set of edge query options suitable
+// for performing furthest edge queries.
+func NewFurthestEdgeQueryOptions() *EdgeQueryOptions {
+ return &EdgeQueryOptions{
+ common: newQueryOptions(maxDistance(0)),
+ }
+}
+
+// EdgeQueryResult represents an edge that meets the target criteria for the
+// query. Note the following special cases:
+//
+// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape.
+// Such results may be returned when the option IncludeInteriors is true.
+//
+// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge
+// satisfies the requested query options.
+type EdgeQueryResult struct {
+ distance distance
+ shapeID int32
+ edgeID int32
+}
+
+// Distance reports the distance between the edge in this shape that satisfied
+// the query's parameters.
+func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() }
+
+// ShapeID reports the ID of the Shape this result is for.
+func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID }
+
+// EdgeID reports the ID of the edge in the results Shape.
+func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID }
+
+// newEdgeQueryResult returns a result instance with default values.
+func newEdgeQueryResult(target distanceTarget) EdgeQueryResult {
+ return EdgeQueryResult{
+ distance: target.distance().infinity(),
+ shapeID: -1,
+ edgeID: -1,
+ }
+}
+
+// IsInterior reports if this result represents the interior of a Shape.
+func (e EdgeQueryResult) IsInterior() bool {
+ return e.shapeID >= 0 && e.edgeID < 0
+}
+
+// IsEmpty reports if this has no edge that satisfies the given edge query options.
+// This result is only returned in one special case, namely when FindEdge() does
+// not find any suitable edges.
+func (e EdgeQueryResult) IsEmpty() bool {
+ return e.shapeID < 0
+}
+
+// Less reports if this results is less that the other first by distance,
+// then by (shapeID, edgeID). This is used for sorting.
+func (e EdgeQueryResult) Less(other EdgeQueryResult) bool {
+ if e.distance.less(other.distance) {
+ return true
+ }
+ if other.distance.less(e.distance) {
+ return false
+ }
+ if e.shapeID < other.shapeID {
+ return true
+ }
+ if other.shapeID < e.shapeID {
+ return false
+ }
+ return e.edgeID < other.edgeID
+}
+
+// EdgeQuery is used to find the edge(s) between two geometries that match a
+// given set of options. It is flexible enough so that it can be adapted to
+// compute maximum distances and even potentially Hausdorff distances.
+//
+// By using the appropriate options, this type can answer questions such as:
+//
+// - Find the minimum distance between two geometries A and B.
+// - Find all edges of geometry A that are within a distance D of geometry B.
+// - Find the k edges of geometry A that are closest to a given point P.
+//
+// You can also specify whether polygons should include their interiors (i.e.,
+// if a point is contained by a polygon, should the distance be zero or should
+// it be measured to the polygon boundary?)
+//
+// The input geometries may consist of any number of points, polylines, and
+// polygons (collectively referred to as "shapes"). Shapes do not need to be
+// disjoint; they may overlap or intersect arbitrarily. The implementation is
+// designed to be fast for both simple and complex geometries.
+type EdgeQuery struct {
+ index *ShapeIndex
+ opts *queryOptions
+ target distanceTarget
+
+ // True if opts.maxError must be subtracted from ShapeIndex cell distances
+ // in order to ensure that such distances are measured conservatively. This
+ // is true only if the target takes advantage of maxError in order to
+ // return faster results, and 0 < maxError < distanceLimit.
+ useConservativeCellDistance bool
+
+ // The decision about whether to use the brute force algorithm is based on
+ // counting the total number of edges in the index. However if the index
+ // contains a large number of shapes, this in itself might take too long.
+ // So instead we only count edges up to (maxBruteForceIndexSize() + 1)
+ // for the current target type (stored as indexNumEdgesLimit).
+ indexNumEdges int
+ indexNumEdgesLimit int
+
+ // The distance beyond which we can safely ignore further candidate edges.
+ // (Candidates that are exactly at the limit are ignored; this is more
+ // efficient for UpdateMinDistance and should not affect clients since
+ // distance measurements have a small amount of error anyway.)
+ //
+ // Initially this is the same as the maximum distance specified by the user,
+ // but it can also be updated by the algorithm (see maybeAddResult).
+ distanceLimit distance
+
+ // The current set of results of the query.
+ results []EdgeQueryResult
+
+ // This field is true when duplicates must be avoided explicitly. This
+ // is achieved by maintaining a separate set keyed by (shapeID, edgeID)
+ // only, and checking whether each edge is in that set before computing the
+ // distance to it.
+ avoidDuplicates bool
+
+ // testedEdges tracks the set of shape and edges that have already been tested.
+ testedEdges map[ShapeEdgeID]uint32
+}
+
+// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the
+// closest edge(s) to a given Point, Edge, Cell, or geometry collection.
+//
+// You can find either the k closest edges, or all edges within a given
+// radius, or both (i.e., the k closest edges up to a given maximum radius).
+// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in
+// the options.
+//
+// By default *all* edges are returned, so you should always specify either
+// MaxResults or DistanceLimit options or both.
+//
+// Note that by default, distances are measured to the boundary and interior
+// of polygons. For example, if a point is inside a polygon then its distance
+// is zero. To change this behavior, set the IncludeInteriors option to false.
+//
+// If you only need to test whether the distance is above or below a given
+// threshold (e.g., 10 km), you can use the IsDistanceLess() method. This is
+// much faster than actually calculating the distance with FindEdge,
+// since the implementation can stop as soon as it can prove that the minimum
+// distance is either above or below the threshold.
+func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
+ if opts == nil {
+ opts = NewClosestEdgeQueryOptions()
+ }
+ return &EdgeQuery{
+ testedEdges: make(map[ShapeEdgeID]uint32),
+ index: index,
+ opts: opts.common,
+ }
+}
+
+// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the
+// furthest edge(s) to a given Point, Edge, Cell, or geometry collection.
+//
+// The furthest edge is defined as the one which maximizes the
+// distance from any point on that edge to any point on the target geometry.
+//
+// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges
+// from a given Point:
+func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery {
+ if opts == nil {
+ opts = NewFurthestEdgeQueryOptions()
+ }
+ return &EdgeQuery{
+ testedEdges: make(map[ShapeEdgeID]uint32),
+ index: index,
+ opts: opts.common,
+ }
+}
+
+// FindEdges returns the edges for the given target that satisfy the current options.
+//
+// Note that if opts.IncludeInteriors is true, the results may include some
+// entries with edge_id == -1. This indicates that the target intersects
+// the indexed polygon with the given ShapeID.
+func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult {
+ return e.findEdges(target, e.opts)
+}
+
+// Distance reports the distance to the target. If the index or target is empty,
+// returns the EdgeQuery's maximal sentinel.
+//
+// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the
+// distance against a threshold value, since it is often much faster.
+func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle {
+ return e.findEdge(target, e.opts).Distance()
+}
+
+// IsDistanceLess reports if the distance to target is less than the given limit.
+//
+// This method is usually much faster than Distance(), since it is much
+// less work to determine whether the minimum distance is above or below a
+// threshold than it is to calculate the actual minimum distance.
+//
+// If you wish to check if the distance is less than or equal to the limit, use:
+//
+// query.IsDistanceLess(target, limit.Successor())
+//
+func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool {
+ opts := e.opts
+ opts = opts.MaxResults(1).
+ DistanceLimit(limit).
+ MaxError(s1.StraightChordAngle)
+ return !e.findEdge(target, opts).IsEmpty()
+}
+
+// IsDistanceGreater reports if the distance to target is greater than limit.
+//
+// This method is usually much faster than Distance, since it is much
+// less work to determine whether the maximum distance is above or below a
+// threshold than it is to calculate the actual maximum distance.
+// If you wish to check if the distance is less than or equal to the limit, use:
+//
+// query.IsDistanceGreater(target, limit.Predecessor())
+//
+func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool {
+ return e.IsDistanceLess(target, limit)
+}
+
+// IsConservativeDistanceLessOrEqual reports if the distance to target is less
+// or equal to the limit, where the limit has been expanded by the maximum error
+// for the distance calculation.
+//
+// For example, suppose that we want to test whether two geometries might
+// intersect each other after they are snapped together using Builder
+// (using the IdentitySnapFunction with a given "snap radius"). Since
+// Builder uses exact distance predicates (s2predicates), we need to
+// measure the distance between the two geometries conservatively. If the
+// distance is definitely greater than "snap radius", then the geometries
+// are guaranteed to not intersect after snapping.
+func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
+ return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit)))
+}
+
+// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater
+// than or equal to the given limit with some small tolerance.
+func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool {
+ return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit)))
+}
+
+// findEdges returns the closest edges to the given target that satisfy the given options.
+//
+// Note that if opts.includeInteriors is true, the results may include some
+// entries with edgeID == -1. This indicates that the target intersects the
+// indexed polygon with the given shapeID.
+func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult {
+ e.findEdgesInternal(target, opts)
+ // TODO(roberts): Revisit this if there is a heap or other sorted and
+ // uniquing datastructure we can use instead of just a slice.
+ e.results = sortAndUniqueResults(e.results)
+ if len(e.results) > e.opts.maxResults {
+ e.results = e.results[:e.opts.maxResults]
+ }
+ return e.results
+}
+
+func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult {
+ if len(results) <= 1 {
+ return results
+ }
+ sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) })
+ j := 0
+ for i := 1; i < len(results); i++ {
+ if results[j] == results[i] {
+ continue
+ }
+ j++
+ results[j] = results[i]
+ }
+ return results[:j+1]
+}
+
+// findEdge is a convenience method that returns exactly one edge, and if no
+// edges satisfy the given search criteria, then a default Result is returned.
+//
+// This is primarily to ease the usage of a number of the methods in the DistanceTargets
+// and in EdgeQuery.
+func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult {
+ opts.MaxResults(1)
+ e.findEdges(target, opts)
+ if len(e.results) > 0 {
+ return e.results[0]
+ }
+
+ return newEdgeQueryResult(target)
+}
+
+// findEdgesInternal does the actual work for find edges that match the given options.
+func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) {
+ e.target = target
+ e.opts = opts
+
+ e.testedEdges = make(map[ShapeEdgeID]uint32)
+ e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit)
+ e.results = make([]EdgeQueryResult, 0)
+
+ if e.distanceLimit == target.distance().zero() {
+ return
+ }
+
+ if opts.includeInteriors {
+ shapeIDs := map[int32]struct{}{}
+ e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool {
+ shapeIDs[e.index.idForShape(containingShape)] = struct{}{}
+ return len(shapeIDs) < opts.maxResults
+ })
+ for shapeID := range shapeIDs {
+ e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1})
+ }
+
+ if e.distanceLimit == target.distance().zero() {
+ return
+ }
+ }
+
+ // If maxError > 0 and the target takes advantage of this, then we may
+ // need to adjust the distance estimates to ShapeIndex cells to ensure
+ // that they are always a lower bound on the true distance. For example,
+ // suppose max_distance == 100, maxError == 30, and we compute the distance
+ // to the target from some cell C0 as d(C0) == 80. Then because the target
+ // takes advantage of maxError, the true distance could be as low as 50.
+ // In order not to miss edges contained by such cells, we need to subtract
+ // maxError from the distance estimates. This behavior is controlled by
+ // the useConservativeCellDistance flag.
+ //
+ // However there is one important case where this adjustment is not
+ // necessary, namely when distanceLimit < maxError, This is because
+ // maxError only affects the algorithm once at least maxEdges edges
+ // have been found that satisfy the given distance limit. At that point,
+ // maxError is subtracted from distanceLimit in order to ensure that
+ // any further matches are closer by at least that amount. But when
+ // distanceLimit < maxError, this reduces the distance limit to 0,
+ // i.e. all remaining candidate cells and edges can safely be discarded.
+ // (This is how IsDistanceLess() and friends are implemented.)
+ targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() &&
+ e.target.setMaxError(opts.maxError)
+
+ // Note that we can't compare maxError and distanceLimit directly
+ // because one is a Delta and one is a Distance. Instead we subtract them.
+ e.useConservativeCellDistance = targetUsesMaxError &&
+ (e.distanceLimit == target.distance().infinity() ||
+ target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError))))
+
+ // Use the brute force algorithm if the index is small enough. To avoid
+ // spending too much time counting edges when there are many shapes, we stop
+ // counting once there are too many edges. We may need to recount the edges
+ // if we later see a target with a larger brute force edge threshold.
+ minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1
+ if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit {
+ e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges)
+ e.indexNumEdgesLimit = minOptimizedEdges
+ }
+
+ if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges {
+ // The brute force algorithm already considers each edge exactly once.
+ e.avoidDuplicates = false
+ e.findEdgesBruteForce()
+ } else {
+ // If the target takes advantage of maxError then we need to avoid
+ // duplicate edges explicitly. (Otherwise it happens automatically.)
+ e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1
+
+ // TODO(roberts): Uncomment when optimized is completed.
+ e.findEdgesBruteForce()
+ //e.findEdgesOptimized()
+ }
+}
+
+func (e *EdgeQuery) addResult(r EdgeQueryResult) {
+ e.results = append(e.results, r)
+ if e.opts.maxResults == 1 {
+ // Optimization for the common case where only the closest edge is wanted.
+ e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError))
+ }
+ // TODO(roberts): Add the other if/else cases when a different data structure
+ // is used for the results.
+}
+
+func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) {
+ if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok {
+ return
+ }
+ edge := shape.Edge(int(edgeID))
+ dist := e.distanceLimit
+
+ if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok {
+ e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID})
+ }
+}
+
+func (e *EdgeQuery) findEdgesBruteForce() {
+ // Range over all shapes in the index. Does order matter here? if so
+ // switch to for i = 0 .. n?
+ for _, shape := range e.index.shapes {
+ // TODO(roberts): can this happen if we are only ranging over current entries?
+ if shape == nil {
+ continue
+ }
+ for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ {
+ e.maybeAddResult(shape, edgeID)
+ }
+ }
+}
+
+// TODO(roberts): Remaining pieces
+// Add clear/reset/re-init method to empty out the state of the query.
+// findEdgesOptimized and related methods.
+// GetEdge
+// Project
diff --git a/vendor/github.com/golang/geo/s2/edge_tessellator.go b/vendor/github.com/golang/geo/s2/edge_tessellator.go
new file mode 100644
index 000000000..5ad63bea2
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edge_tessellator.go
@@ -0,0 +1,167 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ // MinTessellationTolerance is the minimum supported tolerance (which
+ // corresponds to a distance less than 1 micrometer on the Earth's
+ // surface, but is still much larger than the expected projection and
+ // interpolation errors).
+ MinTessellationTolerance s1.Angle = 1e-13
+)
+
+// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into
+// a chain of spherical geodesic edges such that the maximum distance between
+// the original edge and the geodesic edge chain is at most the requested
+// tolerance. Similarly, it can convert a spherical geodesic edge into a chain
+// of edges in a given 2D projection such that the maximum distance between the
+// geodesic edge and the chain of projected edges is at most the requested tolerance.
+//
+// Method | Input | Output
+// ------------|------------------------|-----------------------
+// Projected | S2 geodesics | Planar projected edges
+// Unprojected | Planar projected edges | S2 geodesics
+type EdgeTessellator struct {
+ projection Projection
+ tolerance s1.ChordAngle
+ wrapDistance r2.Point
+}
+
+// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance.
+func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator {
+ return &EdgeTessellator{
+ projection: p,
+ tolerance: s1.ChordAngleFromAngle(maxAngle(tolerance, MinTessellationTolerance)),
+ wrapDistance: p.WrapDistance(),
+ }
+}
+
+// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges
+// in the given projection and returns the corresponding vertices.
+//
+// If the given projection has one or more coordinate axes that wrap, then
+// every vertex's coordinates will be as close as possible to the previous
+// vertex's coordinates. Note that this may yield vertices whose
+// coordinates are outside the usual range. For example, tessellating the
+// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190).
+func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point {
+ pa := e.projection.Project(a)
+ if len(vertices) == 0 {
+ vertices = []r2.Point{pa}
+ } else {
+ pa = e.wrapDestination(vertices[len(vertices)-1], pa)
+ }
+
+ pb := e.wrapDestination(pa, e.projection.Project(b))
+ return e.appendProjected(pa, a, pb, b, vertices)
+}
+
+// appendProjected splits a geodesic edge AB as necessary and returns the
+// projected vertices appended to the given vertices.
+//
+// The maximum recursion depth is (math.Pi / MinTessellationTolerance) < 45
+func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []r2.Point) []r2.Point {
+ // It's impossible to robustly test whether a projected edge is close enough
+ // to a geodesic edge without knowing the details of the projection
+ // function, but the following heuristic works well for a wide range of map
+ // projections. The idea is simply to test whether the midpoint of the
+ // projected edge is close enough to the midpoint of the geodesic edge.
+ //
+ // This measures the distance between the two edges by treating them as
+ // parametric curves rather than geometric ones. The problem with
+ // measuring, say, the minimum distance from the projected midpoint to the
+ // geodesic edge is that this is a lower bound on the value we want, because
+ // the maximum separation between the two curves is generally not attained
+ // at the midpoint of the projected edge. The distance between the curve
+ // midpoints is at least an upper bound on the distance from either midpoint
+ // to opposite curve. It's not necessarily an upper bound on the maximum
+ // distance between the two curves, but it is a powerful requirement because
+ // it demands that the two curves stay parametrically close together. This
+ // turns out to be much more robust with respect for projections with
+ // singularities (e.g., the North and South poles in the rectangular and
+ // Mercator projections) because the curve parameterization speed changes
+ // rapidly near such singularities.
+ mid := Point{a.Add(b.Vector).Normalize()}
+ testMid := e.projection.Unproject(e.projection.Interpolate(0.5, pa, pb))
+
+ if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
+ return append(vertices, pb)
+ }
+
+ pmid := e.wrapDestination(pa, e.projection.Project(mid))
+ vertices = e.appendProjected(pa, a, pmid, mid, vertices)
+ return e.appendProjected(pmid, mid, pb, b, vertices)
+}
+
+// AppendUnprojected converts the planar edge AB in the given projection to a chain of
+// spherical geodesic edges and returns the vertices.
+//
+// Note that to construct a Loop, you must eliminate the duplicate first and last
+// vertex. Note also that if the given projection involves coordinate wrapping
+// (e.g. across the 180 degree meridian) then the first and last vertices may not
+// be exactly the same.
+func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point {
+ pb2 := e.wrapDestination(pa, pb)
+ a := e.projection.Unproject(pa)
+ b := e.projection.Unproject(pb)
+
+ if len(vertices) == 0 {
+ vertices = []Point{a}
+ }
+
+ // Note that coordinate wrapping can create a small amount of error. For
+ // example in the edge chain "0:-175, 0:179, 0:-177", the first edge is
+ // transformed into "0:-175, 0:-181" while the second is transformed into
+ // "0:179, 0:183". The two coordinate pairs for the middle vertex
+ // ("0:-181" and "0:179") may not yield exactly the same S2Point.
+ return e.appendUnprojected(pa, a, pb2, b, vertices)
+}
+
+// appendUnprojected interpolates a projected edge and appends the corresponding
+// points on the sphere.
+func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pb r2.Point, b Point, vertices []Point) []Point {
+ pmid := e.projection.Interpolate(0.5, pa, pb)
+ mid := e.projection.Unproject(pmid)
+ testMid := Point{a.Add(b.Vector).Normalize()}
+
+ if ChordAngleBetweenPoints(mid, testMid) < e.tolerance {
+ return append(vertices, b)
+ }
+
+ vertices = e.appendUnprojected(pa, a, pmid, mid, vertices)
+ return e.appendUnprojected(pmid, mid, pb, b, vertices)
+}
+
+// wrapDestination returns the coordinates of the edge destination wrapped if
+// necessary to obtain the shortest edge.
+func (e *EdgeTessellator) wrapDestination(pa, pb r2.Point) r2.Point {
+ x := pb.X
+ y := pb.Y
+ // The code below ensures that pb is unmodified unless wrapping is required.
+ if e.wrapDistance.X > 0 && math.Abs(x-pa.X) > 0.5*e.wrapDistance.X {
+ x = pa.X + math.Remainder(x-pa.X, e.wrapDistance.X)
+ }
+ if e.wrapDistance.Y > 0 && math.Abs(y-pa.Y) > 0.5*e.wrapDistance.Y {
+ y = pa.Y + math.Remainder(y-pa.Y, e.wrapDistance.Y)
+ }
+ return r2.Point{x, y}
+}
diff --git a/vendor/github.com/golang/geo/s2/encode.go b/vendor/github.com/golang/geo/s2/encode.go
new file mode 100644
index 000000000..49ef364af
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/encode.go
@@ -0,0 +1,237 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+const (
+ // encodingVersion is the current version of the encoding
+ // format that is compatible with C++ and other S2 libraries.
+ encodingVersion = int8(1)
+
+ // encodingCompressedVersion is the current version of the
+ // compressed format.
+ encodingCompressedVersion = int8(4)
+)
+
+// encoder handles the specifics of encoding for S2 types.
+type encoder struct {
+ w io.Writer // the real writer passed to Encode
+ err error
+}
+
+func (e *encoder) writeUvarint(x uint64) {
+ if e.err != nil {
+ return
+ }
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, e.err = e.w.Write(buf[:n])
+}
+
+func (e *encoder) writeBool(x bool) {
+ if e.err != nil {
+ return
+ }
+ var val int8
+ if x {
+ val = 1
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, val)
+}
+
+func (e *encoder) writeInt8(x int8) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt16(x int16) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt32(x int32) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeInt64(x int64) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeUint8(x uint8) {
+ if e.err != nil {
+ return
+ }
+ _, e.err = e.w.Write([]byte{x})
+}
+
+func (e *encoder) writeUint32(x uint32) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeUint64(x uint64) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeFloat32(x float32) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+func (e *encoder) writeFloat64(x float64) {
+ if e.err != nil {
+ return
+ }
+ e.err = binary.Write(e.w, binary.LittleEndian, x)
+}
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// byteReaderAdapter embellishes an io.Reader with a ReadByte method,
+// so that it implements the io.ByteReader interface.
+type byteReaderAdapter struct {
+ io.Reader
+}
+
+func (b byteReaderAdapter) ReadByte() (byte, error) {
+ buf := []byte{0}
+ _, err := io.ReadFull(b, buf)
+ return buf[0], err
+}
+
+func asByteReader(r io.Reader) byteReader {
+ if br, ok := r.(byteReader); ok {
+ return br
+ }
+ return byteReaderAdapter{r}
+}
+
+type decoder struct {
+ r byteReader // the real reader passed to Decode
+ err error
+}
+
+func (d *decoder) readBool() (x bool) {
+ if d.err != nil {
+ return
+ }
+ var val int8
+ d.err = binary.Read(d.r, binary.LittleEndian, &val)
+ return val == 1
+}
+
+func (d *decoder) readInt8() (x int8) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readInt16() (x int16) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readInt32() (x int32) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readInt64() (x int64) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readUint8() (x uint8) {
+ if d.err != nil {
+ return
+ }
+ x, d.err = d.r.ReadByte()
+ return
+}
+
+func (d *decoder) readUint32() (x uint32) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readUint64() (x uint64) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readFloat32() (x float32) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readFloat64() (x float64) {
+ if d.err != nil {
+ return
+ }
+ d.err = binary.Read(d.r, binary.LittleEndian, &x)
+ return
+}
+
+func (d *decoder) readUvarint() (x uint64) {
+ if d.err != nil {
+ return
+ }
+ x, d.err = binary.ReadUvarint(d.r)
+ return
+}
diff --git a/vendor/github.com/golang/geo/s2/interleave.go b/vendor/github.com/golang/geo/s2/interleave.go
new file mode 100644
index 000000000..6ac6ef58d
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/interleave.go
@@ -0,0 +1,143 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+/*
+The lookup table below can convert a sequence of interleaved 8 bits into
+non-interleaved 4 bits. The table can convert both odd and even bits at the
+same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6),
+while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7).
+
+The lookup table below was generated using the following python code:
+
+ def deinterleave(bits):
+ if bits == 0: return 0
+ if bits < 4: return 1
+ return deinterleave(bits / 4) * 2 + deinterleave(bits & 3)
+
+ for i in range(256): print "0x%x," % deinterleave(i),
+*/
+var deinterleaveLookup = [256]uint32{
+ 0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3,
+ 0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3,
+ 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+ 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+ 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+ 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+ 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
+ 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
+
+ 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+ 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+
+ 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+ 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+
+ 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
+ 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+ 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
+ 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
+}
+
+// deinterleaveUint32 decodes the interleaved values.
+func deinterleaveUint32(code uint64) (uint32, uint32) {
+ x := (deinterleaveLookup[code&0x55]) |
+ (deinterleaveLookup[(code>>8)&0x55] << 4) |
+ (deinterleaveLookup[(code>>16)&0x55] << 8) |
+ (deinterleaveLookup[(code>>24)&0x55] << 12) |
+ (deinterleaveLookup[(code>>32)&0x55] << 16) |
+ (deinterleaveLookup[(code>>40)&0x55] << 20) |
+ (deinterleaveLookup[(code>>48)&0x55] << 24) |
+ (deinterleaveLookup[(code>>56)&0x55] << 28)
+ y := (deinterleaveLookup[code&0xaa]) |
+ (deinterleaveLookup[(code>>8)&0xaa] << 4) |
+ (deinterleaveLookup[(code>>16)&0xaa] << 8) |
+ (deinterleaveLookup[(code>>24)&0xaa] << 12) |
+ (deinterleaveLookup[(code>>32)&0xaa] << 16) |
+ (deinterleaveLookup[(code>>40)&0xaa] << 20) |
+ (deinterleaveLookup[(code>>48)&0xaa] << 24) |
+ (deinterleaveLookup[(code>>56)&0xaa] << 28)
+ return x, y
+}
+
+var interleaveLookup = [256]uint64{
+ 0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
+ 0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
+ 0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
+ 0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
+ 0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
+ 0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
+ 0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
+ 0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
+
+ 0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
+ 0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
+ 0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
+ 0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
+ 0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
+ 0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
+ 0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
+ 0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
+
+ 0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
+ 0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
+ 0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
+ 0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
+ 0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
+ 0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
+ 0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
+ 0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
+
+ 0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
+ 0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
+ 0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
+ 0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
+ 0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
+ 0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
+ 0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
+ 0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555,
+}
+
+// interleaveUint32 interleaves the given arguments into the return value.
+//
+// The 0-bit in val0 will be the 0-bit in the return value.
+// The 0-bit in val1 will be the 1-bit in the return value.
+// The 1-bit of val0 will be the 2-bit in the return value, and so on.
+func interleaveUint32(x, y uint32) uint64 {
+ return (interleaveLookup[x&0xff]) |
+ (interleaveLookup[(x>>8)&0xff] << 16) |
+ (interleaveLookup[(x>>16)&0xff] << 32) |
+ (interleaveLookup[x>>24] << 48) |
+ (interleaveLookup[y&0xff] << 1) |
+ (interleaveLookup[(y>>8)&0xff] << 17) |
+ (interleaveLookup[(y>>16)&0xff] << 33) |
+ (interleaveLookup[y>>24] << 49)
+}
diff --git a/vendor/github.com/golang/geo/s2/latlng.go b/vendor/github.com/golang/geo/s2/latlng.go
new file mode 100644
index 000000000..a750304ab
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/latlng.go
@@ -0,0 +1,101 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ northPoleLat = s1.Angle(math.Pi/2) * s1.Radian
+ southPoleLat = -northPoleLat
+)
+
+// LatLng represents a point on the unit sphere as a pair of angles.
+type LatLng struct {
+ Lat, Lng s1.Angle
+}
+
+// LatLngFromDegrees returns a LatLng for the coordinates given in degrees.
+func LatLngFromDegrees(lat, lng float64) LatLng {
+ return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree}
+}
+
+// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π].
+func (ll LatLng) IsValid() bool {
+ return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi
+}
+
+// Normalized returns the normalized version of the LatLng,
+// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π].
+func (ll LatLng) Normalized() LatLng {
+ lat := ll.Lat
+ if lat > northPoleLat {
+ lat = northPoleLat
+ } else if lat < southPoleLat {
+ lat = southPoleLat
+ }
+ lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian
+ return LatLng{lat, lng}
+}
+
+func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) }
+
+// Distance returns the angle between two LatLngs.
+func (ll LatLng) Distance(ll2 LatLng) s1.Angle {
+ // Haversine formula, as used in C++ S2LatLng::GetDistance.
+ lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians()
+ lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians()
+ dlat := math.Sin(0.5 * (lat2 - lat1))
+ dlng := math.Sin(0.5 * (lng2 - lng1))
+ x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2)
+ return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian
+}
+
+// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude
+// functions. Let's see if that's really necessary before exposing the same functionality.
+
+func latitude(p Point) s1.Angle {
+ return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian
+}
+
+func longitude(p Point) s1.Angle {
+ return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian
+}
+
+// PointFromLatLng returns an Point for the given LatLng.
+// The maximum error in the result is 1.5 * dblEpsilon. (This does not
+// include the error of converting degrees, E5, E6, or E7 into radians.)
+func PointFromLatLng(ll LatLng) Point {
+ phi := ll.Lat.Radians()
+ theta := ll.Lng.Radians()
+ cosphi := math.Cos(phi)
+ return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}}
+}
+
+// LatLngFromPoint returns an LatLng for a given Point.
+func LatLngFromPoint(p Point) LatLng {
+ return LatLng{latitude(p), longitude(p)}
+}
+
+// ApproxEqual reports whether the latitude and longitude of the two LatLngs
+// are the same up to a small tolerance.
+func (ll LatLng) ApproxEqual(other LatLng) bool {
+ return ll.Lat.ApproxEqual(other.Lat) && ll.Lng.ApproxEqual(other.Lng)
+}
diff --git a/vendor/github.com/golang/geo/s2/lexicon.go b/vendor/github.com/golang/geo/s2/lexicon.go
new file mode 100644
index 000000000..41cbffdc2
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/lexicon.go
@@ -0,0 +1,175 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "encoding/binary"
+ "hash/adler32"
+ "math"
+ "sort"
+)
+
+// TODO(roberts): If any of these are worth making public, change the
+// method signatures and type names.
+
+// emptySetID represents the last ID that will ever be generated.
+// (Non-negative IDs are reserved for singleton sets.)
+var emptySetID = int32(math.MinInt32)
+
+// idSetLexicon compactly represents a set of non-negative
+// integers such as array indices ("ID sets"). It is especially suitable when
+// either (1) there are many duplicate sets, or (2) there are many singleton
+// or empty sets. See also sequenceLexicon.
+//
+// Each distinct ID set is mapped to a 32-bit integer. Empty and singleton
+// sets take up no additional space; the set itself is represented
+// by the unique ID assigned to the set. Duplicate sets are automatically
+// eliminated. Note also that ID sets are referred to using 32-bit integers
+// rather than pointers.
+type idSetLexicon struct {
+ idSets *sequenceLexicon
+}
+
+func newIDSetLexicon() *idSetLexicon {
+ return &idSetLexicon{
+ idSets: newSequenceLexicon(),
+ }
+}
+
+// add adds the given set of integers to the lexicon if it is not already
+// present, and return the unique ID for this set. The values are automatically
+// sorted and duplicates are removed.
+//
+// The primary difference between this and sequenceLexicon are:
+// 1. Empty and singleton sets are represented implicitly; they use no space.
+// 2. Sets are represented rather than sequences; the ordering of values is
+// not important and duplicates are removed.
+// 3. The values must be 32-bit non-negative integers only.
+func (l *idSetLexicon) add(ids ...int32) int32 {
+ // Empty sets have a special ID chosen not to conflict with other IDs.
+ if len(ids) == 0 {
+ return emptySetID
+ }
+
+ // Singleton sets are represented by their element.
+ if len(ids) == 1 {
+ return ids[0]
+ }
+
+ // Canonicalize the set by sorting and removing duplicates.
+ //
+ // Creates a new slice in order to not alter the supplied values.
+ set := uniqueInt32s(ids)
+
+ // Non-singleton sets are represented by the bitwise complement of the ID
+ // returned by the sequenceLexicon
+ return ^l.idSets.add(set)
+}
+
+// idSet returns the set of integers corresponding to an ID returned by add.
+func (l *idSetLexicon) idSet(setID int32) []int32 {
+ if setID >= 0 {
+ return []int32{setID}
+ }
+ if setID == emptySetID {
+ return []int32{}
+ }
+
+ return l.idSets.sequence(^setID)
+}
+
+func (l *idSetLexicon) clear() {
+ l.idSets.clear()
+}
+
+// sequenceLexicon compactly represents a sequence of values (e.g., tuples).
+// It automatically eliminates duplicates slices, and maps the remaining
+// sequences to sequentially increasing integer IDs. See also idSetLexicon.
+//
+// Each distinct sequence is mapped to a 32-bit integer.
+type sequenceLexicon struct {
+ values []int32
+ begins []uint32
+
+ // idSet is a mapping of a sequence hash to sequence index in the lexicon.
+ idSet map[uint32]int32
+}
+
+func newSequenceLexicon() *sequenceLexicon {
+ return &sequenceLexicon{
+ begins: []uint32{0},
+ idSet: make(map[uint32]int32),
+ }
+}
+
+// clears all data from the lexicon.
+func (l *sequenceLexicon) clear() {
+ l.values = nil
+ l.begins = []uint32{0}
+ l.idSet = make(map[uint32]int32)
+}
+
+// add adds the given value to the lexicon if it is not already present, and
+// returns its ID. IDs are assigned sequentially starting from zero.
+func (l *sequenceLexicon) add(ids []int32) int32 {
+ if id, ok := l.idSet[hashSet(ids)]; ok {
+ return id
+ }
+ for _, v := range ids {
+ l.values = append(l.values, v)
+ }
+ l.begins = append(l.begins, uint32(len(l.values)))
+
+ id := int32(len(l.begins)) - 2
+ l.idSet[hashSet(ids)] = id
+
+ return id
+}
+
+// sequence returns the original sequence of values for the given ID.
+func (l *sequenceLexicon) sequence(id int32) []int32 {
+ return l.values[l.begins[id]:l.begins[id+1]]
+}
+
+// size reports the number of value sequences in the lexicon.
+func (l *sequenceLexicon) size() int {
+ // Subtract one because the list of begins starts out with the first element set to 0.
+ return len(l.begins) - 1
+}
+
+// hash returns a hash of this sequence of int32s.
+func hashSet(s []int32) uint32 {
+ // TODO(roberts): We just need a way to nicely hash all the values down to
+ // a 32-bit value. To ensure no unnecessary dependencies we use the core
+ // library types available to do this. Is there a better option?
+ a := adler32.New()
+ binary.Write(a, binary.LittleEndian, s)
+ return a.Sum32()
+}
+
+// uniqueInt32s returns the sorted and uniqued set of int32s from the input.
+func uniqueInt32s(in []int32) []int32 {
+ var vals []int32
+ m := make(map[int32]bool)
+ for _, i := range in {
+ if m[i] {
+ continue
+ }
+ m[i] = true
+ vals = append(vals, i)
+ }
+ sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] })
+ return vals
+}
diff --git a/vendor/github.com/golang/geo/s2/loop.go b/vendor/github.com/golang/geo/s2/loop.go
new file mode 100644
index 000000000..882d8583c
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/loop.go
@@ -0,0 +1,1816 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Loop represents a simple spherical polygon. It consists of a sequence
+// of vertices where the first vertex is implicitly connected to the
+// last. All loops are defined to have a CCW orientation, i.e. the interior of
+// the loop is on the left side of the edges. This implies that a clockwise
+// loop enclosing a small area is interpreted to be a CCW loop enclosing a
+// very large area.
+//
+// Loops are not allowed to have any duplicate vertices (whether adjacent or
+// not). Non-adjacent edges are not allowed to intersect, and furthermore edges
+// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be
+// antipodal). Loops must have at least 3 vertices (except for the "empty" and
+// "full" loops discussed below).
+//
+// There are two special loops: the "empty" loop contains no points and the
+// "full" loop contains all points. These loops do not have any edges, but to
+// preserve the invariant that every loop can be represented as a vertex
+// chain, they are defined as having exactly one vertex each (see EmptyLoop
+// and FullLoop).
+type Loop struct {
+ vertices []Point
+
+ // originInside keeps a precomputed value whether this loop contains the origin
+ // versus computing from the set of vertices every time.
+ originInside bool
+
+ // depth is the nesting depth of this Loop if it is contained by a Polygon
+ // or other shape and is used to determine if this loop represents a hole
+ // or a filled in portion.
+ depth int
+
+ // bound is a conservative bound on all points contained by this loop.
+ // If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
+ bound Rect
+
+ // Since bound is not exact, it is possible that a loop A contains
+ // another loop B whose bounds are slightly larger. subregionBound
+ // has been expanded sufficiently to account for this error, i.e.
+ // if A.Contains(B), then A.subregionBound.Contains(B.bound).
+ subregionBound Rect
+
+ // index is the spatial index for this Loop.
+ index *ShapeIndex
+}
+
+// LoopFromPoints constructs a loop from the given points.
+func LoopFromPoints(pts []Point) *Loop {
+ l := &Loop{
+ vertices: pts,
+ }
+
+ l.initOriginAndBound()
+ return l
+}
+
+// LoopFromCell constructs a loop corresponding to the given cell.
+//
+// Note that the loop and cell *do not* contain exactly the same set of
+// points, because Loop and Cell have slightly different definitions of
+// point containment. For example, a Cell vertex is contained by all
+// four neighboring Cells, but it is contained by exactly one of four
+// Loops constructed from those cells. As another example, the cell
+// coverings of cell and LoopFromCell(cell) will be different, because the
+// loop contains points on its boundary that actually belong to other cells
+// (i.e., the covering will include a layer of neighboring cells).
+func LoopFromCell(c Cell) *Loop {
+ l := &Loop{
+ vertices: []Point{
+ c.Vertex(0),
+ c.Vertex(1),
+ c.Vertex(2),
+ c.Vertex(3),
+ },
+ }
+
+ l.initOriginAndBound()
+ return l
+}
+
+// These two points are used for the special Empty and Full loops.
+var (
+ emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}}
+ fullLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: -1}}
+)
+
+// EmptyLoop returns a special "empty" loop.
+func EmptyLoop() *Loop {
+ return LoopFromPoints([]Point{emptyLoopPoint})
+}
+
+// FullLoop returns a special "full" loop.
+func FullLoop() *Loop {
+ return LoopFromPoints([]Point{fullLoopPoint})
+}
+
+// initOriginAndBound sets the origin containment for the given point and then calls
+// the initialization for the bounds objects and the internal index.
+func (l *Loop) initOriginAndBound() {
+ if len(l.vertices) < 3 {
+ // Check for the special "empty" and "full" loops (which have one vertex).
+ if !l.isEmptyOrFull() {
+ l.originInside = false
+ return
+ }
+
+ // This is the special empty or full loop, so the origin depends on if
+ // the vertex is in the southern hemisphere or not.
+ l.originInside = l.vertices[0].Z < 0
+ } else {
+ // Point containment testing is done by counting edge crossings starting
+ // at a fixed point on the sphere (OriginPoint). We need to know whether
+ // the reference point (OriginPoint) is inside or outside the loop before
+ // we can construct the ShapeIndex. We do this by first guessing that
+ // it is outside, and then seeing whether we get the correct containment
+ // result for vertex 1. If the result is incorrect, the origin must be
+ // inside the loop.
+ //
+ // A loop with consecutive vertices A,B,C contains vertex B if and only if
+ // the fixed vector R = B.Ortho is contained by the wedge ABC. The
+ // wedge is closed at A and open at C, i.e. the point B is inside the loop
+ // if A = R but not if C = R. This convention is required for compatibility
+ // with VertexCrossing. (Note that we can't use OriginPoint
+ // as the fixed vector because of the possibility that B == OriginPoint.)
+ l.originInside = false
+ v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1])
+ if v1Inside != l.ContainsPoint(l.vertices[1]) {
+ l.originInside = true
+ }
+ }
+
+ // We *must* call initBound before initializing the index, because
+ // initBound calls ContainsPoint which does a bounds check before using
+ // the index.
+ l.initBound()
+
+ // Create a new index and add us to it.
+ l.index = NewShapeIndex()
+ l.index.Add(l)
+}
+
+// initBound sets up the approximate bounding Rects for this loop.
+func (l *Loop) initBound() {
+ // Check for the special "empty" and "full" loops.
+ if l.isEmptyOrFull() {
+ if l.IsEmpty() {
+ l.bound = EmptyRect()
+ } else {
+ l.bound = FullRect()
+ }
+ l.subregionBound = l.bound
+ return
+ }
+
+ // The bounding rectangle of a loop is not necessarily the same as the
+ // bounding rectangle of its vertices. First, the maximal latitude may be
+ // attained along the interior of an edge. Second, the loop may wrap
+ // entirely around the sphere (e.g. a loop that defines two revolutions of a
+ // candy-cane stripe). Third, the loop may include one or both poles.
+ // Note that a small clockwise loop near the equator contains both poles.
+ bounder := NewRectBounder()
+ for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice
+ bounder.AddPoint(l.Vertex(i))
+ }
+ b := bounder.RectBound()
+
+ if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) {
+ b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()}
+ }
+ // If a loop contains the south pole, then either it wraps entirely
+ // around the sphere (full longitude range), or it also contains the
+ // north pole in which case b.Lng.IsFull() due to the test above.
+ // Either way, we only need to do the south pole containment test if
+ // b.Lng.IsFull().
+ if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) {
+ b.Lat.Lo = -math.Pi / 2
+ }
+ l.bound = b
+ l.subregionBound = ExpandForSubregions(l.bound)
+}
+
+// Validate checks whether this is a valid loop.
+func (l *Loop) Validate() error {
+ if err := l.findValidationErrorNoIndex(); err != nil {
+ return err
+ }
+
+ // Check for intersections between non-adjacent edges (including at vertices)
+ // TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this.
+ // return findAnyCrossing(l.index)
+
+ return nil
+}
+
+// findValidationErrorNoIndex reports whether this is not a valid loop, but
+// skips checks that would require a ShapeIndex to be built for the loop. This
+// is primarily used by Polygon to do validation so it doesn't trigger the
+// creation of unneeded ShapeIndices.
+func (l *Loop) findValidationErrorNoIndex() error {
+ // All vertices must be unit length.
+ for i, v := range l.vertices {
+ if !v.IsUnit() {
+ return fmt.Errorf("vertex %d is not unit length", i)
+ }
+ }
+
+ // Loops must have at least 3 vertices (except for empty and full).
+ if len(l.vertices) < 3 {
+ if l.isEmptyOrFull() {
+ return nil // Skip remaining tests.
+ }
+ return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices")
+ }
+
+ // Loops are not allowed to have any duplicate vertices or edge crossings.
+ // We split this check into two parts. First we check that no edge is
+ // degenerate (identical endpoints). Then we check that there are no
+ // intersections between non-adjacent edges (including at vertices). The
+ // second check needs the ShapeIndex, so it does not fall within the scope
+ // of this method.
+ for i, v := range l.vertices {
+ if v == l.Vertex(i+1) {
+ return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i)
+ }
+
+ // Antipodal vertices are not allowed.
+ if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other {
+ return fmt.Errorf("vertices %d and %d are antipodal", i,
+ (i+1)%len(l.vertices))
+ }
+ }
+
+ return nil
+}
+
+// Contains reports whether the region contained by this loop is a superset of the
+// region contained by the given other loop.
+func (l *Loop) Contains(o *Loop) bool {
+ // For a loop A to contain the loop B, all of the following must
+ // be true:
+ //
+ // (1) There are no edge crossings between A and B except at vertices.
+ //
+ // (2) At every vertex that is shared between A and B, the local edge
+ // ordering implies that A contains B.
+ //
+ // (3) If there are no shared vertices, then A must contain a vertex of B
+ // and B must not contain a vertex of A. (An arbitrary vertex may be
+ // chosen in each case.)
+ //
+ // The second part of (3) is necessary to detect the case of two loops whose
+ // union is the entire sphere, i.e. two loops that contains each other's
+ // boundaries but not each other's interiors.
+ if !l.subregionBound.Contains(o.bound) {
+ return false
+ }
+
+ // Special cases to handle either loop being empty or full.
+ if l.isEmptyOrFull() || o.isEmptyOrFull() {
+ return l.IsFull() || o.IsEmpty()
+ }
+
+ // Check whether there are any edge crossings, and also check the loop
+ // relationship at any shared vertices.
+ relation := &containsRelation{}
+ if hasCrossingRelation(l, o, relation) {
+ return false
+ }
+
+ // There are no crossings, and if there are any shared vertices then A
+ // contains B locally at each shared vertex.
+ if relation.foundSharedVertex {
+ return true
+ }
+
+ // Since there are no edge intersections or shared vertices, we just need to
+ // test condition (3) above. We can skip this test if we discovered that A
+ // contains at least one point of B while checking for edge crossings.
+ if !l.ContainsPoint(o.Vertex(0)) {
+ return false
+ }
+
+ // We still need to check whether (A union B) is the entire sphere.
+ // Normally this check is very cheap due to the bounding box precondition.
+ if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) &&
+ o.ContainsPoint(l.Vertex(0)) {
+ return false
+ }
+ return true
+}
+
+// Intersects reports whether the region contained by this loop intersects the region
+// contained by the other loop.
+func (l *Loop) Intersects(o *Loop) bool {
+ // Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B).
+ //
+ // This code is similar to Contains, but is optimized for the case
+ // where both loops enclose less than half of the sphere.
+ if !l.bound.Intersects(o.bound) {
+ return false
+ }
+
+ // Check whether there are any edge crossings, and also check the loop
+ // relationship at any shared vertices.
+ relation := &intersectsRelation{}
+ if hasCrossingRelation(l, o, relation) {
+ return true
+ }
+ if relation.foundSharedVertex {
+ return false
+ }
+
+ // Since there are no edge intersections or shared vertices, the loops
+ // intersect only if A contains B, B contains A, or the two loops contain
+ // each other's boundaries. These checks are usually cheap because of the
+ // bounding box preconditions. Note that neither loop is empty (because of
+ // the bounding box check above), so it is safe to access vertex(0).
+
+ // Check whether A contains B, or A and B contain each other's boundaries.
+ // (Note that A contains all the vertices of B in either case.)
+ if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() {
+ if l.ContainsPoint(o.Vertex(0)) {
+ return true
+ }
+ }
+ // Check whether B contains A.
+ if o.subregionBound.Contains(l.bound) {
+ if o.ContainsPoint(l.Vertex(0)) {
+ return true
+ }
+ }
+ return false
+}
+
+// Equal reports whether two loops have the same vertices in the same linear order
+// (i.e., cyclic rotations are not allowed).
+func (l *Loop) Equal(other *Loop) bool {
+ if len(l.vertices) != len(other.vertices) {
+ return false
+ }
+
+ for i, v := range l.vertices {
+ if v != other.Vertex(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// BoundaryEqual reports whether the two loops have the same boundary. This is
+// true if and only if the loops have the same vertices in the same cyclic order
+// (i.e., the vertices may be cyclically rotated). The empty and full loops are
+// considered to have different boundaries.
+func (l *Loop) BoundaryEqual(o *Loop) bool {
+ if len(l.vertices) != len(o.vertices) {
+ return false
+ }
+
+ // Special case to handle empty or full loops. Since they have the same
+ // number of vertices, if one loop is empty/full then so is the other.
+ if l.isEmptyOrFull() {
+ return l.IsEmpty() == o.IsEmpty()
+ }
+
+ // Loop through the vertices to find the first of ours that matches the
+ // starting vertex of the other loop. Use that offset to then 'align' the
+ // vertices for comparison.
+ for offset, vertex := range l.vertices {
+ if vertex == o.Vertex(0) {
+ // There is at most one starting offset since loop vertices are unique.
+ for i := 0; i < len(l.vertices); i++ {
+ if l.Vertex(i+offset) != o.Vertex(i) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+ return false
+}
+
+// compareBoundary returns +1 if this loop contains the boundary of the other loop,
+// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two
+// loops cross. Shared edges are handled as follows:
+//
+// If XY is a shared edge, define Reversed(XY) to be true if XY
+// appears in opposite directions in both loops.
+// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole.
+// (Intuitively, this checks whether this loop contains a vanishingly small region
+// extending from the boundary of the other toward the interior of the polygon to
+// which the other belongs.)
+//
+// This function is used for testing containment and intersection of
+// multi-loop polygons. Note that this method is not symmetric, since the
+// result depends on the direction of this loop but not on the direction of
+// the other loop (in the absence of shared edges).
+//
+// This requires that neither loop is empty, and if other loop IsFull, then it must not
+// be a hole.
+func (l *Loop) compareBoundary(o *Loop) int {
+ // The bounds must intersect for containment or crossing.
+ if !l.bound.Intersects(o.bound) {
+ return -1
+ }
+
+ // Full loops are handled as though the loop surrounded the entire sphere.
+ if l.IsFull() {
+ return 1
+ }
+ if o.IsFull() {
+ return -1
+ }
+
+ // Check whether there are any edge crossings, and also check the loop
+ // relationship at any shared vertices.
+ relation := newCompareBoundaryRelation(o.IsHole())
+ if hasCrossingRelation(l, o, relation) {
+ return 0
+ }
+ if relation.foundSharedVertex {
+ if relation.containsEdge {
+ return 1
+ }
+ return -1
+ }
+
+ // There are no edge intersections or shared vertices, so we can check
+ // whether A contains an arbitrary vertex of B.
+ if l.ContainsPoint(o.Vertex(0)) {
+ return 1
+ }
+ return -1
+}
+
+// ContainsOrigin reports true if this loop contains s2.OriginPoint().
+func (l *Loop) ContainsOrigin() bool {
+ return l.originInside
+}
+
+// ReferencePoint returns the reference point for this loop.
+func (l *Loop) ReferencePoint() ReferencePoint {
+ return OriginReferencePoint(l.originInside)
+}
+
+// NumEdges returns the number of edges in this shape.
+func (l *Loop) NumEdges() int {
+ if l.isEmptyOrFull() {
+ return 0
+ }
+ return len(l.vertices)
+}
+
+// Edge returns the endpoints for the given edge index.
+func (l *Loop) Edge(i int) Edge {
+ return Edge{l.Vertex(i), l.Vertex(i + 1)}
+}
+
+// NumChains reports the number of contiguous edge chains in the Loop.
+func (l *Loop) NumChains() int {
+ if l.IsEmpty() {
+ return 0
+ }
+ return 1
+}
+
+// Chain returns the i-th edge chain in the Shape.
+func (l *Loop) Chain(chainID int) Chain {
+ return Chain{0, l.NumEdges()}
+}
+
+// ChainEdge returns the j-th edge of the i-th edge chain.
+func (l *Loop) ChainEdge(chainID, offset int) Edge {
+ return Edge{l.Vertex(offset), l.Vertex(offset + 1)}
+}
+
+// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the
+// j-th edge of the Loop.
+func (l *Loop) ChainPosition(edgeID int) ChainPosition {
+ return ChainPosition{0, edgeID}
+}
+
+// Dimension returns the dimension of the geometry represented by this Loop.
+func (l *Loop) Dimension() int { return 2 }
+
+func (l *Loop) typeTag() typeTag { return typeTagNone }
+
+func (l *Loop) privateInterface() {}
+
+// IsEmpty reports true if this is the special empty loop that contains no points.
+func (l *Loop) IsEmpty() bool {
+ return l.isEmptyOrFull() && !l.ContainsOrigin()
+}
+
+// IsFull reports true if this is the special full loop that contains all points.
+func (l *Loop) IsFull() bool {
+ return l.isEmptyOrFull() && l.ContainsOrigin()
+}
+
+// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops.
+func (l *Loop) isEmptyOrFull() bool {
+ return len(l.vertices) == 1
+}
+
+// Vertices returns the vertices in the loop.
+func (l *Loop) Vertices() []Point {
+ return l.vertices
+}
+
+// RectBound returns a tight bounding rectangle. If the loop contains the point,
+// the bound also contains it.
+func (l *Loop) RectBound() Rect {
+ return l.bound
+}
+
+// CapBound returns a bounding cap that may have more padding than the corresponding
+// RectBound. The bound is conservative such that if the loop contains a point P,
+// the bound also contains it.
+func (l *Loop) CapBound() Cap {
+ return l.bound.CapBound()
+}
+
+// Vertex returns the vertex for the given index. For convenience, the vertex indices
+// wrap automatically for methods that do index math such as Edge.
+// i.e., Vertex(NumEdges() + n) is the same as Vertex(n).
+func (l *Loop) Vertex(i int) Point {
+ return l.vertices[i%len(l.vertices)]
+}
+
+// OrientedVertex returns the vertex in reverse order if the loop represents a polygon
+// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where
+// n == len(vertices). This ensures that the interior of the polygon is always to
+// the left of the vertex chain.
+//
+// This requires: 0 <= i < 2 * len(vertices)
+func (l *Loop) OrientedVertex(i int) Point {
+ j := i - len(l.vertices)
+ if j < 0 {
+ j = i
+ }
+ if l.IsHole() {
+ j = len(l.vertices) - 1 - j
+ }
+ return l.Vertex(j)
+}
+
+// NumVertices returns the number of vertices in this loop.
+func (l *Loop) NumVertices() int {
+ return len(l.vertices)
+}
+
+// bruteForceContainsPoint reports if the given point is contained by this loop.
+// This method does not use the ShapeIndex, so it is only preferable below a certain
+// size of loop.
+func (l *Loop) bruteForceContainsPoint(p Point) bool {
+ origin := OriginPoint()
+ inside := l.originInside
+ crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0))
+ for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice
+ inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i))
+ }
+ return inside
+}
+
+// ContainsPoint returns true if the loop contains the point.
+func (l *Loop) ContainsPoint(p Point) bool {
+ // Empty and full loops don't need a special case, but invalid loops with
+ // zero vertices do, so we might as well handle them all at once.
+ if len(l.vertices) < 3 {
+ return l.originInside
+ }
+
+ // For small loops, and during initial construction, it is faster to just
+ // check all the crossing.
+ const maxBruteForceVertices = 32
+ if len(l.vertices) < maxBruteForceVertices || l.index == nil {
+ return l.bruteForceContainsPoint(p)
+ }
+
+ // Otherwise, look up the point in the index.
+ it := l.index.Iterator()
+ if !it.LocatePoint(p) {
+ return false
+ }
+ return l.iteratorContainsPoint(it, p)
+}
+
+// ContainsCell reports whether the given Cell is contained by this Loop.
+func (l *Loop) ContainsCell(target Cell) bool {
+ it := l.index.Iterator()
+ relation := it.LocateCellID(target.ID())
+
+ // If "target" is disjoint from all index cells, it is not contained.
+ // Similarly, if "target" is subdivided into one or more index cells then it
+ // is not contained, since index cells are subdivided only if they (nearly)
+ // intersect a sufficient number of edges. (But note that if "target" itself
+ // is an index cell then it may be contained, since it could be a cell with
+ // no edges in the loop interior.)
+ if relation != Indexed {
+ return false
+ }
+
+ // Otherwise check if any edges intersect "target".
+ if l.boundaryApproxIntersects(it, target) {
+ return false
+ }
+
+ // Otherwise check if the loop contains the center of "target".
+ return l.iteratorContainsPoint(it, target.Center())
+}
+
+// IntersectsCell reports whether this Loop intersects the given cell.
+func (l *Loop) IntersectsCell(target Cell) bool {
+ it := l.index.Iterator()
+ relation := it.LocateCellID(target.ID())
+
+ // If target does not overlap any index cell, there is no intersection.
+ if relation == Disjoint {
+ return false
+ }
+ // If target is subdivided into one or more index cells, there is an
+ // intersection to within the ShapeIndex error bound (see Contains).
+ if relation == Subdivided {
+ return true
+ }
+ // If target is an index cell, there is an intersection because index cells
+ // are created only if they have at least one edge or they are entirely
+ // contained by the loop.
+ if it.CellID() == target.id {
+ return true
+ }
+ // Otherwise check if any edges intersect target.
+ if l.boundaryApproxIntersects(it, target) {
+ return true
+ }
+ // Otherwise check if the loop contains the center of target.
+ return l.iteratorContainsPoint(it, target.Center())
+}
+
+// CellUnionBound computes a covering of the Loop.
+func (l *Loop) CellUnionBound() []CellID {
+ return l.CapBound().CellUnionBound()
+}
+
+// boundaryApproxIntersects reports if the loop's boundary intersects target.
+// It may also return true when the loop boundary does not intersect target but
+// some edge comes within the worst-case error tolerance.
+//
+// This requires that it.Locate(target) returned Indexed.
+func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool {
+ aClipped := it.IndexCell().findByShapeID(0)
+
+ // If there are no edges, there is no intersection.
+ if len(aClipped.edges) == 0 {
+ return false
+ }
+
+ // We can save some work if target is the index cell itself.
+ if it.CellID() == target.ID() {
+ return true
+ }
+
+ // Otherwise check whether any of the edges intersect target.
+ maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist)
+ bound := target.BoundUV().ExpandedByMargin(maxError)
+ for _, ai := range aClipped.edges {
+ v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError)
+ if ok && edgeIntersectsRect(v0, v1, bound) {
+ return true
+ }
+ }
+ return false
+}
+
+// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell
+// that may contain p, contains the point p.
+func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool {
+ // Test containment by drawing a line segment from the cell center to the
+ // given point and counting edge crossings.
+ aClipped := it.IndexCell().findByShapeID(0)
+ inside := aClipped.containsCenter
+ if len(aClipped.edges) > 0 {
+ center := it.Center()
+ crosser := NewEdgeCrosser(center, p)
+ aiPrev := -2
+ for _, ai := range aClipped.edges {
+ if ai != aiPrev+1 {
+ crosser.RestartAt(l.Vertex(ai))
+ }
+ aiPrev = ai
+ inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1))
+ }
+ }
+ return inside
+}
+
+// RegularLoop creates a loop with the given number of vertices, all
+// located on a circle of the specified radius around the given center.
+func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop {
+ return RegularLoopForFrame(getFrame(center), radius, numVertices)
+}
+
+// RegularLoopForFrame creates a loop centered around the z-axis of the given
+// coordinate frame, with the first vertex in the direction of the positive x-axis.
+func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop {
+ return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices))
+}
+
+// CanonicalFirstVertex returns a first index and a direction (either +1 or -1)
+// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does
+// not change when the loop vertex order is rotated or inverted. This allows the
+// loop vertices to be traversed in a canonical order. The return values are
+// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as
+// expected by the Vertex method.
+func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) {
+ firstIdx = 0
+ n := len(l.vertices)
+ for i := 1; i < n; i++ {
+ if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 {
+ firstIdx = i
+ }
+ }
+
+ // 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1.
+ if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 {
+ return firstIdx, 1
+ }
+
+ // n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0.
+ firstIdx += n
+ return firstIdx, -1
+}
+
+// TurningAngle returns the sum of the turning angles at each vertex. The return
+// value is positive if the loop is counter-clockwise, negative if the loop is
+// clockwise, and zero if the loop is a great circle. Degenerate and
+// nearly-degenerate loops are handled consistently with Sign. So for example,
+// if a loop has zero area (i.e., it is a very small CCW loop) then the turning
+// angle will always be negative.
+//
+// This quantity is also called the "geodesic curvature" of the loop.
+func (l *Loop) TurningAngle() float64 {
+ // For empty and full loops, we return the limit value as the loop area
+ // approaches 0 or 4*Pi respectively.
+ if l.isEmptyOrFull() {
+ if l.ContainsOrigin() {
+ return -2 * math.Pi
+ }
+ return 2 * math.Pi
+ }
+
+ // Don't crash even if the loop is not well-defined.
+ if len(l.vertices) < 3 {
+ return 0
+ }
+
+ // To ensure that we get the same result when the vertex order is rotated,
+ // and that the result is negated when the vertex order is reversed, we need
+ // to add up the individual turn angles in a consistent order. (In general,
+ // adding up a set of numbers in a different order can change the sum due to
+ // rounding errors.)
+ //
+ // Furthermore, if we just accumulate an ordinary sum then the worst-case
+ // error is quadratic in the number of vertices. (This can happen with
+ // spiral shapes, where the partial sum of the turning angles can be linear
+ // in the number of vertices.) To avoid this we use the Kahan summation
+ // algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm).
+ n := len(l.vertices)
+ i, dir := l.CanonicalFirstVertex()
+ sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n))
+
+ compensation := s1.Angle(0)
+ for n-1 > 0 {
+ i += dir
+ angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir))
+ oldSum := sum
+ angle += compensation
+ sum += angle
+ compensation = (oldSum - sum) + angle
+ n--
+ }
+ return float64(dir) * float64(sum+compensation)
+}
+
+// turningAngleMaxError return the maximum error in TurningAngle. The value is not
+// constant; it depends on the loop.
+func (l *Loop) turningAngleMaxError() float64 {
+ // The maximum error can be bounded as follows:
+ // 2.24 * dblEpsilon for RobustCrossProd(b, a)
+ // 2.24 * dblEpsilon for RobustCrossProd(c, b)
+ // 3.25 * dblEpsilon for Angle()
+ // 2.00 * dblEpsilon for each addition in the Kahan summation
+ // ------------------
+ // 9.73 * dblEpsilon
+ maxErrorPerVertex := 9.73 * dblEpsilon
+ return maxErrorPerVertex * float64(len(l.vertices))
+}
+
+// IsHole reports whether this loop represents a hole in its containing polygon.
+func (l *Loop) IsHole() bool { return l.depth&1 != 0 }
+
+// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise.
+func (l *Loop) Sign() int {
+ if l.IsHole() {
+ return -1
+ }
+ return 1
+}
+
+// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are
+// handled consistently with Sign, i.e., if a loop can be
+// expressed as the union of degenerate or nearly-degenerate CCW triangles,
+// then it will always be considered normalized.
+func (l *Loop) IsNormalized() bool {
+ // Optimization: if the longitude span is less than 180 degrees, then the
+ // loop covers less than half the sphere and is therefore normalized.
+ if l.bound.Lng.Length() < math.Pi {
+ return true
+ }
+
+ // We allow some error so that hemispheres are always considered normalized.
+ // TODO(roberts): This is no longer required by the Polygon implementation,
+ // so alternatively we could create the invariant that a loop is normalized
+ // if and only if its complement is not normalized.
+ return l.TurningAngle() >= -l.turningAngleMaxError()
+}
+
+// Normalize inverts the loop if necessary so that the area enclosed by the loop
+// is at most 2*pi.
+func (l *Loop) Normalize() {
+ if !l.IsNormalized() {
+ l.Invert()
+ }
+}
+
+// Invert reverses the order of the loop vertices, effectively complementing the
+// region represented by the loop. For example, the loop ABCD (with edges
+// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD).
+// Notice that the last edge is the same in both cases except that its
+// direction has been reversed.
+func (l *Loop) Invert() {
+ l.index.Reset()
+ if l.isEmptyOrFull() {
+ if l.IsFull() {
+ l.vertices[0] = emptyLoopPoint
+ } else {
+ l.vertices[0] = fullLoopPoint
+ }
+ } else {
+ // For non-special loops, reverse the slice of vertices.
+ for i := len(l.vertices)/2 - 1; i >= 0; i-- {
+ opp := len(l.vertices) - 1 - i
+ l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i]
+ }
+ }
+
+ // originInside must be set correctly before building the ShapeIndex.
+ l.originInside = !l.originInside
+ if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 {
+ // The complement of this loop contains both poles.
+ l.bound = FullRect()
+ l.subregionBound = l.bound
+ } else {
+ l.initBound()
+ }
+ l.index.Add(l)
+}
+
+// findVertex returns the index of the vertex at the given Point in the range
+// 1..numVertices, and a boolean indicating if a vertex was found.
+func (l *Loop) findVertex(p Point) (index int, ok bool) {
+ const notFound = 0
+ if len(l.vertices) < 10 {
+ // Exhaustive search for loops below a small threshold.
+ for i := 1; i <= len(l.vertices); i++ {
+ if l.Vertex(i) == p {
+ return i, true
+ }
+ }
+ return notFound, false
+ }
+
+ it := l.index.Iterator()
+ if !it.LocatePoint(p) {
+ return notFound, false
+ }
+
+ aClipped := it.IndexCell().findByShapeID(0)
+ for i := aClipped.numEdges() - 1; i >= 0; i-- {
+ ai := aClipped.edges[i]
+ if l.Vertex(ai) == p {
+ if ai == 0 {
+ return len(l.vertices), true
+ }
+ return ai, true
+ }
+
+ if l.Vertex(ai+1) == p {
+ return ai + 1, true
+ }
+ }
+ return notFound, false
+}
+
+// ContainsNested reports whether the given loops is contained within this loop.
+// This function does not test for edge intersections. The two loops must meet
+// all of the Polygon requirements; for example this implies that their
+// boundaries may not cross or have any shared edges (although they may have
+// shared vertices).
+func (l *Loop) ContainsNested(other *Loop) bool {
+ if !l.subregionBound.Contains(other.bound) {
+ return false
+ }
+
+ // Special cases to handle either loop being empty or full. Also bail out
+ // when B has no vertices to avoid heap overflow on the vertex(1) call
+ // below. (This method is called during polygon initialization before the
+ // client has an opportunity to call IsValid().)
+ if l.isEmptyOrFull() || other.NumVertices() < 2 {
+ return l.IsFull() || other.IsEmpty()
+ }
+
+ // We are given that A and B do not share any edges, and that either one
+ // loop contains the other or they do not intersect.
+ m, ok := l.findVertex(other.Vertex(1))
+ if !ok {
+ // Since other.vertex(1) is not shared, we can check whether A contains it.
+ return l.ContainsPoint(other.Vertex(1))
+ }
+
+ // Check whether the edge order around other.Vertex(1) is compatible with
+ // A containing B.
+ return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2))
+}
+
+// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x)
+// over the loop interior, given a function f(A,B,C) that returns the
+// corresponding integral over the spherical triangle ABC. Here "oriented
+// surface integral" means:
+//
+// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise,
+// and the integral of -f if ABC is clockwise.
+//
+// (2) The result of this function is *either* the integral of f over the
+// loop interior, or the integral of (-f) over the loop exterior.
+//
+// Note that there are at least two common situations where it easy to work
+// around property (2) above:
+//
+// - If the integral of f over the entire sphere is zero, then it doesn't
+// matter which case is returned because they are always equal.
+//
+// - If f is non-negative, then it is easy to detect when the integral over
+// the loop exterior has been returned, and the integral over the loop
+// interior can be obtained by adding the integral of f over the entire
+// unit sphere (a constant) to the result.
+//
+// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well.
+func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 {
+ // We sum f over a collection T of oriented triangles, possibly
+ // overlapping. Let the sign of a triangle be +1 if it is CCW and -1
+ // otherwise, and let the sign of a point x be the sum of the signs of the
+ // triangles containing x. Then the collection of triangles T is chosen
+ // such that either:
+ //
+ // (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or
+ // (2) Each point in the loop exterior has sign -1, and sign 0 otherwise.
+ //
+ // The triangles basically consist of a fan from vertex 0 to every loop
+ // edge that does not include vertex 0. These triangles will always satisfy
+ // either (1) or (2). However, what makes this a bit tricky is that
+ // spherical edges become numerically unstable as their length approaches
+ // 180 degrees. Of course there is not much we can do if the loop itself
+ // contains such edges, but we would like to make sure that all the triangle
+ // edges under our control (i.e., the non-loop edges) are stable. For
+ // example, consider a loop around the equator consisting of four equally
+ // spaced points. This is a well-defined loop, but we cannot just split it
+ // into two triangles by connecting vertex 0 to vertex 2.
+ //
+ // We handle this type of situation by moving the origin of the triangle fan
+ // whenever we are about to create an unstable edge. We choose a new
+ // location for the origin such that all relevant edges are stable. We also
+ // create extra triangles with the appropriate orientation so that the sum
+ // of the triangle signs is still correct at every point.
+
+ // The maximum length of an edge for it to be considered numerically stable.
+ // The exact value is fairly arbitrary since it depends on the stability of
+ // the function f. The value below is quite conservative but could be
+ // reduced further if desired.
+ const maxLength = math.Pi - 1e-5
+
+ var sum float64
+ origin := l.Vertex(0)
+ for i := 1; i+1 < len(l.vertices); i++ {
+ // Let V_i be vertex(i), let O be the current origin, and let length(A,B)
+ // be the length of edge (A,B). At the start of each loop iteration, the
+ // "leading edge" of the triangle fan is (O,V_i), and we want to extend
+ // the triangle fan so that the leading edge is (O,V_i+1).
+ //
+ // Invariants:
+ // 1. length(O,V_i) < maxLength for all (i > 1).
+ // 2. Either O == V_0, or O is approximately perpendicular to V_0.
+ // 3. "sum" is the oriented integral of f over the area defined by
+ // (O, V_0, V_1, ..., V_i).
+ if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
+ // We are about to create an unstable edge, so choose a new origin O'
+ // for the triangle fan.
+ oldOrigin := origin
+ if origin == l.Vertex(0) {
+ // The following point is well-separated from V_i and V_0 (and
+ // therefore V_i+1 as well).
+ origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
+ } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
+ // All edges of the triangle (O, V_0, V_i) are stable, so we can
+ // revert to using V_0 as the origin.
+ origin = l.Vertex(0)
+ } else {
+ // (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are
+ // perpendicular. Therefore V_0.CrossProd(O) is approximately
+ // perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose
+ // this point O' as the new origin.
+ origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
+
+ // Advance the edge (V_0,O) to (V_0,O').
+ sum += f(l.Vertex(0), oldOrigin, origin)
+ }
+ // Advance the edge (O,V_i) to (O',V_i).
+ sum += f(oldOrigin, l.Vertex(i), origin)
+ }
+ // Advance the edge (O,V_i) to (O,V_i+1).
+ sum += f(origin, l.Vertex(i), l.Vertex(i+1))
+ }
+ // If the origin is not V_0, we need to sum one more triangle.
+ if origin != l.Vertex(0) {
+ // Advance the edge (O,V_n-1) to (O,V_0).
+ sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0))
+ }
+ return sum
+}
+
+// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points;
+// see that method for commentary. The C++ version uses a templated method.
+// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well.
+func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point {
+ const maxLength = math.Pi - 1e-5
+ var sum r3.Vector
+
+ origin := l.Vertex(0)
+ for i := 1; i+1 < len(l.vertices); i++ {
+ if l.Vertex(i+1).Angle(origin.Vector) > maxLength {
+ oldOrigin := origin
+ if origin == l.Vertex(0) {
+ origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()}
+ } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength {
+ origin = l.Vertex(0)
+ } else {
+ origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)}
+ sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector)
+ }
+ sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector)
+ }
+ sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector)
+ }
+ if origin != l.Vertex(0) {
+ sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector)
+ }
+ return Point{sum}
+}
+
+// Area returns the area of the loop interior, i.e. the region on the left side of
+// the loop. The return value is between 0 and 4*pi. (Note that the return
+// value is not affected by whether this loop is a "hole" or a "shell".)
+func (l *Loop) Area() float64 {
+ // It is surprisingly difficult to compute the area of a loop robustly. The
+ // main issues are (1) whether degenerate loops are considered to be CCW or
+ // not (i.e., whether their area is close to 0 or 4*pi), and (2) computing
+ // the areas of small loops with good relative accuracy.
+ //
+ // With respect to degeneracies, we would like Area to be consistent
+ // with ContainsPoint in that loops that contain many points
+ // should have large areas, and loops that contain few points should have
+ // small areas. For example, if a degenerate triangle is considered CCW
+ // according to s2predicates Sign, then it will contain very few points and
+ // its area should be approximately zero. On the other hand if it is
+ // considered clockwise, then it will contain virtually all points and so
+ // its area should be approximately 4*pi.
+ //
+ // More precisely, let U be the set of Points for which IsUnitLength
+ // is true, let P(U) be the projection of those points onto the mathematical
+ // unit sphere, and let V(P(U)) be the Voronoi diagram of the projected
+ // points. Then for every loop x, we would like Area to approximately
+ // equal the sum of the areas of the Voronoi regions of the points p for
+ // which x.ContainsPoint(p) is true.
+ //
+ // The second issue is that we want to compute the area of small loops
+ // accurately. This requires having good relative precision rather than
+ // good absolute precision. For example, if the area of a loop is 1e-12 and
+ // the error is 1e-15, then the area only has 3 digits of accuracy. (For
+ // reference, 1e-12 is about 40 square meters on the surface of the earth.)
+ // We would like to have good relative accuracy even for small loops.
+ //
+ // To achieve these goals, we combine two different methods of computing the
+ // area. This first method is based on the Gauss-Bonnet theorem, which says
+ // that the area enclosed by the loop equals 2*pi minus the total geodesic
+ // curvature of the loop (i.e., the sum of the "turning angles" at all the
+ // loop vertices). The big advantage of this method is that as long as we
+ // use Sign to compute the turning angle at each vertex, then
+ // degeneracies are always handled correctly. In other words, if a
+ // degenerate loop is CCW according to the symbolic perturbations used by
+ // Sign, then its turning angle will be approximately 2*pi.
+ //
+ // The disadvantage of the Gauss-Bonnet method is that its absolute error is
+ // about 2e-15 times the number of vertices (see turningAngleMaxError).
+ // So, it cannot compute the area of small loops accurately.
+ //
+ // The second method is based on splitting the loop into triangles and
+ // summing the area of each triangle. To avoid the difficulty and expense
+ // of decomposing the loop into a union of non-overlapping triangles,
+ // instead we compute a signed sum over triangles that may overlap (see the
+ // comments for surfaceIntegral). The advantage of this method
+ // is that the area of each triangle can be computed with much better
+ // relative accuracy (using l'Huilier's theorem). The disadvantage is that
+ // the result is a signed area: CCW loops may yield a small positive value,
+ // while CW loops may yield a small negative value (which is converted to a
+ // positive area by adding 4*pi). This means that small errors in computing
+ // the signed area may translate into a very large error in the result (if
+ // the sign of the sum is incorrect).
+ //
+ // So, our strategy is to combine these two methods as follows. First we
+ // compute the area using the "signed sum over triangles" approach (since it
+ // is generally more accurate). We also estimate the maximum error in this
+ // result. If the signed area is too close to zero (i.e., zero is within
+ // the error bounds), then we double-check the sign of the result using the
+ // Gauss-Bonnet method. (In fact we just call IsNormalized, which is
+ // based on this method.) If the two methods disagree, we return either 0
+ // or 4*pi based on the result of IsNormalized. Otherwise we return the
+ // area that we computed originally.
+ if l.isEmptyOrFull() {
+ if l.ContainsOrigin() {
+ return 4 * math.Pi
+ }
+ return 0
+ }
+ area := l.surfaceIntegralFloat64(SignedArea)
+
+ // TODO(roberts): This error estimate is very approximate. There are two
+ // issues: (1) SignedArea needs some improvements to ensure that its error
+ // is actually never higher than GirardArea, and (2) although the number of
+ // triangles in the sum is typically N-2, in theory it could be as high as
+ // 2*N for pathological inputs. But in other respects this error bound is
+ // very conservative since it assumes that the maximum error is achieved on
+ // every triangle.
+ maxError := l.turningAngleMaxError()
+
+ // The signed area should be between approximately -4*pi and 4*pi.
+ if area < 0 {
+ // We have computed the negative of the area of the loop exterior.
+ area += 4 * math.Pi
+ }
+
+ if area > 4*math.Pi {
+ area = 4 * math.Pi
+ }
+ if area < 0 {
+ area = 0
+ }
+
+ // If the area is close enough to zero or 4*pi so that the loop orientation
+ // is ambiguous, then we compute the loop orientation explicitly.
+ if area < maxError && !l.IsNormalized() {
+ return 4 * math.Pi
+ } else if area > (4*math.Pi-maxError) && l.IsNormalized() {
+ return 0
+ }
+
+ return area
+}
+
+// Centroid returns the true centroid of the loop multiplied by the area of the
+// loop. The result is not unit length, so you may want to normalize it. Also
+// note that in general, the centroid may not be contained by the loop.
+//
+// We prescale by the loop area for two reasons: (1) it is cheaper to
+// compute this way, and (2) it makes it easier to compute the centroid of
+// more complicated shapes (by splitting them into disjoint regions and
+// adding their centroids).
+//
+// Note that the return value is not affected by whether this loop is a
+// "hole" or a "shell".
+func (l *Loop) Centroid() Point {
+ // surfaceIntegralPoint() returns either the integral of position over loop
+ // interior, or the negative of the integral of position over the loop
+ // exterior. But these two values are the same (!), because the integral of
+ // position over the entire sphere is (0, 0, 0).
+ return l.surfaceIntegralPoint(TrueCentroid)
+}
+
+// Encode encodes the Loop.
+func (l Loop) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ l.encode(e)
+ return e.err
+}
+
+func (l Loop) encode(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeUint32(uint32(len(l.vertices)))
+ for _, v := range l.vertices {
+ e.writeFloat64(v.X)
+ e.writeFloat64(v.Y)
+ e.writeFloat64(v.Z)
+ }
+
+ e.writeBool(l.originInside)
+ e.writeInt32(int32(l.depth))
+
+ // Encode the bound.
+ l.bound.encode(e)
+}
+
+// Decode decodes a loop.
+func (l *Loop) Decode(r io.Reader) error {
+ *l = Loop{}
+ d := &decoder{r: asByteReader(r)}
+ l.decode(d)
+ return d.err
+}
+
+func (l *Loop) decode(d *decoder) {
+ version := int8(d.readUint8())
+ if d.err != nil {
+ return
+ }
+ if version != encodingVersion {
+ d.err = fmt.Errorf("cannot decode version %d", version)
+ return
+ }
+
+ // Empty loops are explicitly allowed here: a newly created loop has zero vertices
+ // and such loops encode and decode properly.
+ nvertices := d.readUint32()
+ if nvertices > maxEncodedVertices {
+ if d.err == nil {
+ d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+
+ }
+ return
+ }
+ l.vertices = make([]Point, nvertices)
+ for i := range l.vertices {
+ l.vertices[i].X = d.readFloat64()
+ l.vertices[i].Y = d.readFloat64()
+ l.vertices[i].Z = d.readFloat64()
+ }
+ l.originInside = d.readBool()
+ l.depth = int(d.readUint32())
+ l.bound.decode(d)
+ l.subregionBound = ExpandForSubregions(l.bound)
+
+ l.index = NewShapeIndex()
+ l.index.Add(l)
+}
+
+// Bitmasks to read from properties.
+const (
+ originInside = 1 << iota
+ boundEncoded
+)
+
+func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi {
+ ret := make([]xyzFaceSiTi, len(l.vertices))
+ for i, v := range l.vertices {
+ ret[i].xyz = v
+ ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v)
+ }
+ return ret
+}
+
+func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) {
+ if len(l.vertices) != len(vertices) {
+ panic("encodeCompressed: vertices must be the same length as l.vertices")
+ }
+ if len(vertices) > maxEncodedVertices {
+ if e.err == nil {
+ e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices)
+ }
+ return
+ }
+ e.writeUvarint(uint64(len(vertices)))
+ encodePointsCompressed(e, vertices, snapLevel)
+
+ props := l.compressedEncodingProperties()
+ e.writeUvarint(props)
+ e.writeUvarint(uint64(l.depth))
+ if props&boundEncoded != 0 {
+ l.bound.encode(e)
+ }
+}
+
+func (l *Loop) compressedEncodingProperties() uint64 {
+ var properties uint64
+ if l.originInside {
+ properties |= originInside
+ }
+
+ // Write whether there is a bound so we can change the threshold later.
+ // Recomputing the bound multiplies the decode time taken per vertex
+ // by a factor of about 3.5. Without recomputing the bound, decode
+ // takes approximately 125 ns / vertex. A loop with 63 vertices
+ // encoded without the bound will take ~30us to decode, which is
+ // acceptable. At ~3.5 bytes / vertex without the bound, adding
+ // the bound will increase the size by <15%, which is also acceptable.
+ const minVerticesForBound = 64
+ if len(l.vertices) >= minVerticesForBound {
+ properties |= boundEncoded
+ }
+
+ return properties
+}
+
+func (l *Loop) decodeCompressed(d *decoder, snapLevel int) {
+ nvertices := d.readUvarint()
+ if d.err != nil {
+ return
+ }
+ if nvertices > maxEncodedVertices {
+ d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+ return
+ }
+ l.vertices = make([]Point, nvertices)
+ decodePointsCompressed(d, snapLevel, l.vertices)
+ properties := d.readUvarint()
+
+ // Make sure values are valid before using.
+ if d.err != nil {
+ return
+ }
+
+ l.originInside = (properties & originInside) != 0
+
+ l.depth = int(d.readUvarint())
+
+ if (properties & boundEncoded) != 0 {
+ l.bound.decode(d)
+ if d.err != nil {
+ return
+ }
+ l.subregionBound = ExpandForSubregions(l.bound)
+ } else {
+ l.initBound()
+ }
+
+ l.index = NewShapeIndex()
+ l.index.Add(l)
+}
+
+// crossingTarget is an enum representing the possible crossing target cases for relations.
+type crossingTarget int
+
+const (
+ crossingTargetDontCare crossingTarget = iota
+ crossingTargetDontCross
+ crossingTargetCross
+)
+
+// loopRelation defines the interface for checking a type of relationship between two loops.
+// Some examples of relations are Contains, Intersects, or CompareBoundary.
+type loopRelation interface {
+ // Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit
+ // condition for the loop relation. If any point P is found such that
+ //
+ // A.ContainsPoint(P) == aCrossingTarget() &&
+ // B.ContainsPoint(P) == bCrossingTarget()
+ //
+ // then the loop relation is assumed to be the same as if a pair of crossing
+ // edges were found. For example, the ContainsPoint relation has
+ //
+ // aCrossingTarget() == crossingTargetDontCross
+ // bCrossingTarget() == crossingTargetCross
+ //
+ // because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true
+ // for any point P, then it is equivalent to finding an edge crossing (i.e.,
+ // since Contains returns false in both cases).
+ //
+ // Loop relations that do not have an early-exit condition of this form
+ // should return crossingTargetDontCare for both crossing targets.
+
+ // aCrossingTarget reports whether loop A crosses the target point with
+ // the given relation type.
+ aCrossingTarget() crossingTarget
+ // bCrossingTarget reports whether loop B crosses the target point with
+ // the given relation type.
+ bCrossingTarget() crossingTarget
+
+ // wedgesCross reports if a shared vertex ab1 and the two associated wedges
+ // (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing.
+ // The loop relation is also allowed to maintain its own internal state, and
+ // can return true if it observes any sequence of wedges that are equivalent
+ // to an edge crossing.
+ wedgesCross(a0, ab1, a2, b0, b2 Point) bool
+}
+
+// loopCrosser is a helper type for determining whether two loops cross.
+// It is instantiated twice for each pair of loops to be tested, once for the
+// pair (A,B) and once for the pair (B,A), in order to be able to process
+// edges in either loop nesting order.
+type loopCrosser struct {
+ a, b *Loop
+ relation loopRelation
+ swapped bool
+ aCrossingTarget crossingTarget
+ bCrossingTarget crossingTarget
+
+ // state maintained by startEdge and edgeCrossesCell.
+ crosser *EdgeCrosser
+ aj, bjPrev int
+
+ // temporary data declared here to avoid repeated memory allocations.
+ bQuery *CrossingEdgeQuery
+ bCells []*ShapeIndexCell
+}
+
+// newLoopCrosser creates a loopCrosser from the given values. If swapped is true,
+// the loops A and B have been swapped. This affects how arguments are passed to
+// the given loop relation, since for example A.Contains(B) is not the same as
+// B.Contains(A).
+func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser {
+ l := &loopCrosser{
+ a: a,
+ b: b,
+ relation: relation,
+ swapped: swapped,
+ aCrossingTarget: relation.aCrossingTarget(),
+ bCrossingTarget: relation.bCrossingTarget(),
+ bQuery: NewCrossingEdgeQuery(b.index),
+ }
+ if swapped {
+ l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget
+ }
+
+ return l
+}
+
+// startEdge sets the crossers state for checking the given edge of loop A.
+func (l *loopCrosser) startEdge(aj int) {
+ l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1))
+ l.aj = aj
+ l.bjPrev = -2
+}
+
+// edgeCrossesCell reports whether the current edge of loop A has any crossings with
+// edges of the index cell of loop B.
+func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool {
+ // Test the current edge of A against all edges of bClipped
+ bNumEdges := bClipped.numEdges()
+ for j := 0; j < bNumEdges; j++ {
+ bj := bClipped.edges[j]
+ if bj != l.bjPrev+1 {
+ l.crosser.RestartAt(l.b.Vertex(bj))
+ }
+ l.bjPrev = bj
+ if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross {
+ continue
+ } else if crossing == Cross {
+ return true
+ }
+
+ // We only need to check each shared vertex once, so we only
+ // consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1).
+ if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) {
+ if l.swapped {
+ if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) {
+ return true
+ }
+ } else {
+ if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+// cellCrossesCell reports whether there are any edge crossings or wedge crossings
+// within the two given cells.
+func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool {
+ // Test all edges of aClipped against all edges of bClipped.
+ for _, edge := range aClipped.edges {
+ l.startEdge(edge)
+ if l.edgeCrossesCell(bClipped) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// cellCrossesAnySubcell reports whether given an index cell of A, if there are any
+// edge or wedge crossings with any index cell of B contained within bID.
+func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool {
+ // Test all edges of aClipped against all edges of B. The relevant B
+ // edges are guaranteed to be children of bID, which lets us find the
+ // correct index cells more efficiently.
+ bRoot := PaddedCellFromCellID(bID, 0)
+ for _, aj := range aClipped.edges {
+ // Use an CrossingEdgeQuery starting at bRoot to find the index cells
+ // of B that might contain crossing edges.
+ l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot)
+ if len(l.bCells) == 0 {
+ continue
+ }
+ l.startEdge(aj)
+ for c := 0; c < len(l.bCells); c++ {
+ if l.edgeCrossesCell(l.bCells[c].shapes[0]) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// hasCrossing reports whether given two iterators positioned such that
+// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing
+// anywhere within ai.cellID(). This function advances bi only past ai.cellID().
+func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool {
+ // If ai.CellID() intersects many edges of B, then it is faster to use
+ // CrossingEdgeQuery to narrow down the candidates. But if it intersects
+ // only a few edges, it is faster to check all the crossings directly.
+ // We handle this by advancing bi and keeping track of how many edges we
+ // would need to test.
+ const edgeQueryMinEdges = 20 // Tuned from benchmarks.
+ var totalEdges int
+ l.bCells = nil
+
+ for {
+ if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 {
+ totalEdges += n
+ if totalEdges >= edgeQueryMinEdges {
+ // There are too many edges to test them directly, so use CrossingEdgeQuery.
+ if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) {
+ return true
+ }
+ bi.seekBeyond(ai)
+ return false
+ }
+ l.bCells = append(l.bCells, bi.indexCell())
+ }
+ bi.next()
+ if bi.cellID() > ai.rangeMax {
+ break
+ }
+ }
+
+ // Test all the edge crossings directly.
+ for _, c := range l.bCells {
+ if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds
+// to the crossing target type given. (This is to work around C++ allowing false == 0,
+// true == 1 type implicit conversions and comparisons)
+func containsCenterMatches(a *clippedShape, target crossingTarget) bool {
+ return (!a.containsCenter && target == crossingTargetDontCross) ||
+ (a.containsCenter && target == crossingTargetCross)
+}
+
+// hasCrossingRelation reports whether given two iterators positioned such that
+// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship
+// anywhere within ai.cellID(). Specifically, this method returns true if there
+// is an edge crossing, a wedge crossing, or a point P that matches both relations
+// crossing targets. This function advances both iterators past ai.cellID.
+func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool {
+ aClipped := ai.it.IndexCell().shapes[0]
+ if aClipped.numEdges() != 0 {
+ // The current cell of A has at least one edge, so check for crossings.
+ if l.hasCrossing(ai, bi) {
+ return true
+ }
+ ai.next()
+ return false
+ }
+
+ if containsCenterMatches(aClipped, l.aCrossingTarget) {
+ // The crossing target for A is not satisfied, so we skip over these cells of B.
+ bi.seekBeyond(ai)
+ ai.next()
+ return false
+ }
+
+ // All points within ai.cellID() satisfy the crossing target for A, so it's
+ // worth iterating through the cells of B to see whether any cell
+ // centers also satisfy the crossing target for B.
+ for bi.cellID() <= ai.rangeMax {
+ bClipped := bi.it.IndexCell().shapes[0]
+ if containsCenterMatches(bClipped, l.bCrossingTarget) {
+ return true
+ }
+ bi.next()
+ }
+ ai.next()
+ return false
+}
+
+// hasCrossingRelation checks all edges of loop A for intersection against all edges
+// of loop B and reports if there are any that satisfy the given relation. If there
+// is any shared vertex, the wedges centered at this vertex are sent to the given
+// relation to be tested.
+//
+// If the two loop boundaries cross, this method is guaranteed to return
+// true. It also returns true in certain cases if the loop relationship is
+// equivalent to crossing. For example, if the relation is Contains and a
+// point P is found such that B contains P but A does not contain P, this
+// method will return true to indicate that the result is the same as though
+// a pair of crossing edges were found (since Contains returns false in
+// both cases).
+//
+// See Contains, Intersects and CompareBoundary for the three uses of this function.
+func hasCrossingRelation(a, b *Loop, relation loopRelation) bool {
+ // We look for CellID ranges where the indexes of A and B overlap, and
+ // then test those edges for crossings.
+ ai := newRangeIterator(a.index)
+ bi := newRangeIterator(b.index)
+
+ ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B
+ ba := newLoopCrosser(b, a, relation, true) // Tests edges of B against A
+
+ for !ai.done() || !bi.done() {
+ if ai.rangeMax < bi.rangeMin {
+ // The A and B cells don't overlap, and A precedes B.
+ ai.seekTo(bi)
+ } else if bi.rangeMax < ai.rangeMin {
+ // The A and B cells don't overlap, and B precedes A.
+ bi.seekTo(ai)
+ } else {
+ // One cell contains the other. Determine which cell is larger.
+ abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb())
+ if abRelation > 0 {
+ // A's index cell is larger.
+ if ab.hasCrossingRelation(ai, bi) {
+ return true
+ }
+ } else if abRelation < 0 {
+ // B's index cell is larger.
+ if ba.hasCrossingRelation(bi, ai) {
+ return true
+ }
+ } else {
+ // The A and B cells are the same. Since the two cells
+ // have the same center point P, check whether P satisfies
+ // the crossing targets.
+ aClipped := ai.it.IndexCell().shapes[0]
+ bClipped := bi.it.IndexCell().shapes[0]
+ if containsCenterMatches(aClipped, ab.aCrossingTarget) &&
+ containsCenterMatches(bClipped, ab.bCrossingTarget) {
+ return true
+ }
+ // Otherwise test all the edge crossings directly.
+ if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) {
+ return true
+ }
+ ai.next()
+ bi.next()
+ }
+ }
+ }
+ return false
+}
+
+// containsRelation implements loopRelation for a contains operation. If
+// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent
+// to having an edge crossing (i.e., Contains returns false).
+type containsRelation struct {
+ foundSharedVertex bool
+}
+
+func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross }
+func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
+func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+ c.foundSharedVertex = true
+ return !WedgeContains(a0, ab1, a2, b0, b2)
+}
+
+// intersectsRelation implements loopRelation for an intersects operation. Given
+// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true,
+// it is equivalent to having an edge crossing (i.e., Intersects returns true).
+type intersectsRelation struct {
+ foundSharedVertex bool
+}
+
+func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross }
+func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross }
+func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+ i.foundSharedVertex = true
+ return WedgeIntersects(a0, ab1, a2, b0, b2)
+}
+
+// compareBoundaryRelation implements loopRelation for comparing boundaries.
+//
+// The compare boundary relation does not have a useful early-exit condition,
+// so we return crossingTargetDontCare for both crossing targets.
+//
+// Aside: A possible early exit condition could be based on the following.
+// If A contains a point of both B and ~B, then A intersects Boundary(B).
+// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B).
+// So if the intersections of {A, ~A} with {B, ~B} are all non-empty,
+// the return value is 0, i.e., Boundary(A) intersects Boundary(B).
+// Unfortunately it isn't worth detecting this situation because by the
+// time we have seen a point in all four intersection regions, we are also
+// guaranteed to have seen at least one pair of crossing edges.
+type compareBoundaryRelation struct {
+ reverse bool // True if the other loop should be reversed.
+ foundSharedVertex bool // True if any wedge was processed.
+ containsEdge bool // True if any edge of the other loop is contained by this loop.
+ excludesEdge bool // True if any edge of the other loop is excluded by this loop.
+}
+
+func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation {
+ return &compareBoundaryRelation{reverse: reverse}
+}
+
+func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare }
+func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare }
+func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool {
+ // Because we don't care about the interior of the other, only its boundary,
+ // it is sufficient to check whether this one contains the semiwedge (ab1, b2).
+ c.foundSharedVertex = true
+ if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) {
+ c.containsEdge = true
+ } else {
+ c.excludesEdge = true
+ }
+ return c.containsEdge && c.excludesEdge
+}
+
+// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the
+// "semiwedge" defined as any non-empty open set of rays immediately CCW from
+// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW;
+// this simulates what would happen if the direction of the other loop was reversed.
+func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool {
+ if b2 == a0 || b2 == a2 {
+ // We have a shared or reversed edge.
+ return (b2 == a0) == reverse
+ }
+ return OrderedCCW(a0, a2, b2, ab1)
+}
+
+// containsNonCrossingBoundary reports whether given two loops whose boundaries
+// do not cross (see compareBoundary), if this loop contains the boundary of the
+// other loop. If reverse is true, the boundary of the other loop is reversed
+// first (which only affects the result when there are shared edges). This method
+// is cheaper than compareBoundary because it does not test for edge intersections.
+//
+// This function requires that neither loop is empty, and that if the other is full,
+// then reverse == false.
+func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool {
+ // The bounds must intersect for containment.
+ if !l.bound.Intersects(other.bound) {
+ return false
+ }
+
+ // Full loops are handled as though the loop surrounded the entire sphere.
+ if l.IsFull() {
+ return true
+ }
+ if other.IsFull() {
+ return false
+ }
+
+ m, ok := l.findVertex(other.Vertex(0))
+ if !ok {
+ // Since the other loops vertex 0 is not shared, we can check if this contains it.
+ return l.ContainsPoint(other.Vertex(0))
+ }
+ // Otherwise check whether the edge (b0, b1) is contained by this loop.
+ return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1),
+ other.Vertex(1), reverseOther)
+}
+
+// TODO(roberts): Differences from the C++ version:
+// DistanceToPoint
+// DistanceToBoundary
+// Project
+// ProjectToBoundary
+// BoundaryApproxEqual
+// BoundaryNear
diff --git a/vendor/github.com/golang/geo/s2/matrix3x3.go b/vendor/github.com/golang/geo/s2/matrix3x3.go
new file mode 100644
index 000000000..01696fe83
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/matrix3x3.go
@@ -0,0 +1,127 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+
+ "github.com/golang/geo/r3"
+)
+
+// matrix3x3 represents a traditional 3x3 matrix of floating point values.
+// This is not a full fledged matrix. It only contains the pieces needed
+// to satisfy the computations done within the s2 package.
+type matrix3x3 [3][3]float64
+
+// col returns the given column as a Point.
+func (m *matrix3x3) col(col int) Point {
+ return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}}
+}
+
+// row returns the given row as a Point.
+func (m *matrix3x3) row(row int) Point {
+ return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}}
+}
+
+// setCol sets the specified column to the value in the given Point.
+func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 {
+ m[0][col] = p.X
+ m[1][col] = p.Y
+ m[2][col] = p.Z
+
+ return m
+}
+
+// setRow sets the specified row to the value in the given Point.
+func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 {
+ m[row][0] = p.X
+ m[row][1] = p.Y
+ m[row][2] = p.Z
+
+ return m
+}
+
+// scale multiplies the matrix by the given value.
+func (m *matrix3x3) scale(f float64) *matrix3x3 {
+ return &matrix3x3{
+ [3]float64{f * m[0][0], f * m[0][1], f * m[0][2]},
+ [3]float64{f * m[1][0], f * m[1][1], f * m[1][2]},
+ [3]float64{f * m[2][0], f * m[2][1], f * m[2][2]},
+ }
+}
+
+// mul returns the multiplication of m by the Point p and converts the
+// resulting 1x3 matrix into a Point.
+func (m *matrix3x3) mul(p Point) Point {
+ return Point{r3.Vector{
+ m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z,
+ m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z,
+ m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z,
+ }}
+}
+
+// det returns the determinant of this matrix.
+func (m *matrix3x3) det() float64 {
+ // | a b c |
+ // det | d e f | = aei + bfg + cdh - ceg - bdi - afh
+ // | g h i |
+ return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] -
+ m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1]
+}
+
+// transpose reflects the matrix along its diagonal and returns the result.
+func (m *matrix3x3) transpose() *matrix3x3 {
+ m[0][1], m[1][0] = m[1][0], m[0][1]
+ m[0][2], m[2][0] = m[2][0], m[0][2]
+ m[1][2], m[2][1] = m[2][1], m[1][2]
+
+ return m
+}
+
+// String formats the matrix into an easier to read layout.
+func (m *matrix3x3) String() string {
+ return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]",
+ m[0][0], m[0][1], m[0][2],
+ m[1][0], m[1][1], m[1][2],
+ m[2][0], m[2][1], m[2][2],
+ )
+}
+
+// getFrame returns the orthonormal frame for the given point on the unit sphere.
+func getFrame(p Point) matrix3x3 {
+ // Given the point p on the unit sphere, extend this into a right-handed
+ // coordinate frame of unit-length column vectors m = (x,y,z). Note that
+ // the vectors (x,y) are an orthonormal frame for the tangent space at point p,
+ // while p itself is an orthonormal frame for the normal space at p.
+ m := matrix3x3{}
+ m.setCol(2, p)
+ m.setCol(1, Point{p.Ortho()})
+ m.setCol(0, Point{m.col(1).Cross(p.Vector)})
+ return m
+}
+
+// toFrame returns the coordinates of the given point with respect to its orthonormal basis m.
+// The resulting point q satisfies the identity (m * q == p).
+func toFrame(m matrix3x3, p Point) Point {
+ // The inverse of an orthonormal matrix is its transpose.
+ return m.transpose().mul(p)
+}
+
+// fromFrame returns the coordinates of the given point in standard axis-aligned basis
+// from its orthonormal basis m.
+// The resulting point p satisfies the identity (p == m * q).
+func fromFrame(m matrix3x3, q Point) Point {
+ return m.mul(q)
+}
diff --git a/vendor/github.com/golang/geo/s2/max_distance_targets.go b/vendor/github.com/golang/geo/s2/max_distance_targets.go
new file mode 100644
index 000000000..589231890
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/max_distance_targets.go
@@ -0,0 +1,306 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// maxDistance implements distance as the supplementary distance (Pi - x) to find
+// results that are the furthest using the distance related algorithms.
+type maxDistance s1.ChordAngle
+
+func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
+func (m maxDistance) zero() distance { return maxDistance(s1.StraightChordAngle) }
+func (m maxDistance) negative() distance { return maxDistance(s1.InfChordAngle()) }
+func (m maxDistance) infinity() distance { return maxDistance(s1.NegativeChordAngle) }
+func (m maxDistance) less(other distance) bool { return m.chordAngle() > other.chordAngle() }
+func (m maxDistance) sub(other distance) distance {
+ return maxDistance(m.chordAngle() + other.chordAngle())
+}
+func (m maxDistance) chordAngleBound() s1.ChordAngle {
+ return s1.StraightChordAngle - m.chordAngle()
+}
+func (m maxDistance) updateDistance(dist distance) (distance, bool) {
+ if dist.less(m) {
+ m = maxDistance(dist.chordAngle())
+ return m, true
+ }
+ return m, false
+}
+
+func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance {
+ return maxDistance(o)
+}
+
+// MaxDistanceToPointTarget is used for computing the maximum distance to a Point.
+type MaxDistanceToPointTarget struct {
+ point Point
+ dist distance
+}
+
+// NewMaxDistanceToPointTarget returns a new target for the given Point.
+func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget {
+ m := maxDistance(0)
+ return &MaxDistanceToPointTarget{point: point, dist: &m}
+}
+
+func (m *MaxDistanceToPointTarget) capBound() Cap {
+ return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0)))
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point)))
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(maxDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(cell.MaxDistance(m.point)))
+}
+
+func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // For furthest points, we visit the polygons whose interior contains
+ // the antipode of the target point. These are the polygons whose
+ // distance to the target is maxDistance.zero()
+ q := NewContainsPointQuery(index, VertexModelSemiOpen)
+ return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool {
+ return v(shape, m.point)
+ })
+}
+
+func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int { return 300 }
+func (m *MaxDistanceToPointTarget) distance() distance { return m.dist }
+
+// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge.
+type MaxDistanceToEdgeTarget struct {
+ e Edge
+ dist distance
+}
+
+// NewMaxDistanceToEdgeTarget returns a new target for the given Edge.
+func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget {
+ m := maxDistance(0)
+ return &MaxDistanceToEdgeTarget{e: e, dist: m}
+}
+
+// capBound returns a Cap that bounds the antipode of the target. (This
+// is the set of points whose maxDistance to the target is maxDistance.zero)
+func (m *MaxDistanceToEdgeTarget) capBound() Cap {
+ // The following computes a radius equal to half the edge length in an
+ // efficient and numerically stable way.
+ d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
+ r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
+ return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(maxDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(maxDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1)))
+}
+
+func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // We only need to test one edge point. That is because the method *must*
+ // visit a polygon if it fully contains the target, and *is allowed* to
+ // visit a polygon if it intersects the target. If the tested vertex is not
+ // contained, we know the full edge is not contained; if the tested vertex is
+ // contained, then the edge either is fully contained (must be visited) or it
+ // intersects (is allowed to be visited). We visit the center of the edge so
+ // that edge AB gives identical results to BA.
+ target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+ return target.visitContainingShapes(index, v)
+}
+
+func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 110 }
+func (m *MaxDistanceToEdgeTarget) distance() distance { return m.dist }
+
+// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell.
+type MaxDistanceToCellTarget struct {
+ cell Cell
+ dist distance
+}
+
+// NewMaxDistanceToCellTarget returns a new target for the given Cell.
+func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget {
+ m := maxDistance(0)
+ return &MaxDistanceToCellTarget{cell: cell, dist: m}
+}
+
+func (m *MaxDistanceToCellTarget) capBound() Cap {
+ c := m.cell.CapBound()
+ return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius())
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(m.cell.MaxDistance(p)))
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1)))
+}
+
+func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell)))
+}
+
+func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // We only need to check one point here - cell center is simplest.
+ // See comment at MaxDistanceToEdgeTarget's visitContainingShapes.
+ target := NewMaxDistanceToPointTarget(m.cell.Center())
+ return target.visitContainingShapes(index, v)
+}
+
+func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int { return 100 }
+func (m *MaxDistanceToCellTarget) distance() distance { return m.dist }
+
+// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex.
+type MaxDistanceToShapeIndexTarget struct {
+ index *ShapeIndex
+ query *EdgeQuery
+ dist distance
+}
+
+// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
+func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget {
+ m := maxDistance(0)
+ return &MaxDistanceToShapeIndexTarget{
+ index: index,
+ dist: m,
+ query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()),
+ }
+}
+
+// capBound returns a Cap that bounds the antipode of the target. This
+// is the set of points whose maxDistance to the target is maxDistance.zero()
+func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
+ // TODO(roberts): Depends on ShapeIndexRegion
+ // c := makeShapeIndexRegion(m.index).CapBound()
+ // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
+ panic("not implemented yet")
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMaxDistanceToPointTarget(p)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMaxDistanceToEdgeTarget(edge)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMaxDistanceToCellTarget(cell)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+// visitContainingShapes returns the polygons containing the antipodal
+// reflection of *any* connected component for target types consisting of
+// multiple connected components. It is sufficient to test containment of
+// one vertex per connected component, since this allows us to also return
+// any polygon whose boundary has distance.zero() to the target.
+func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // It is sufficient to find the set of chain starts in the target index
+ // (i.e., one vertex per connected component of edges) that are contained by
+ // the query index, except for one special case to handle full polygons.
+ //
+ // TODO(roberts): Do this by merge-joining the two ShapeIndexes and share
+ // the code with BooleanOperation.
+ for _, shape := range m.index.shapes {
+ numChains := shape.NumChains()
+ // Shapes that don't have any edges require a special case (below).
+ testedPoint := false
+ for c := 0; c < numChains; c++ {
+ chain := shape.Chain(c)
+ if chain.Length == 0 {
+ continue
+ }
+ testedPoint = true
+ target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
+ if !target.visitContainingShapes(index, v) {
+ return false
+ }
+ }
+ if !testedPoint {
+ // Special case to handle full polygons.
+ ref := shape.ReferencePoint()
+ if !ref.Contained {
+ continue
+ }
+ target := NewMaxDistanceToPointTarget(ref.Point)
+ if !target.visitContainingShapes(index, v) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
+ m.query.opts.maxError = maxErr
+ return true
+}
+func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 70 }
+func (m *MaxDistanceToShapeIndexTarget) distance() distance { return m.dist }
+func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool) {
+ m.query.opts.includeInteriors = b
+}
+func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b }
+
+// TODO(roberts): Remaining methods
+//
+// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap {
+// CellUnionTarget
diff --git a/vendor/github.com/golang/geo/s2/metric.go b/vendor/github.com/golang/geo/s2/metric.go
new file mode 100644
index 000000000..53db3d317
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/metric.go
@@ -0,0 +1,164 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file implements functions for various S2 measurements.
+
+import "math"
+
+// A Metric is a measure for cells. It is used to describe the shape and size
+// of cells. They are useful for deciding which cell level to use in order to
+// satisfy a given condition (e.g. that cell vertices must be no further than
+// "x" apart). You can use the Value(level) method to compute the corresponding
+// length or area on the unit sphere for cells at a given level. The minimum
+// and maximum bounds are valid for cells at all levels, but they may be
+// somewhat conservative for very large cells (e.g. face cells).
+type Metric struct {
+ // Dim is either 1 or 2, for a 1D or 2D metric respectively.
+ Dim int
+ // Deriv is the scaling factor for the metric.
+ Deriv float64
+}
+
+// Defined metrics.
+// Of the projection methods defined in C++, Go only supports the quadratic projection.
+
+// Each cell is bounded by four planes passing through its four edges and
+// the center of the sphere. These metrics relate to the angle between each
+// pair of opposite bounding planes, or equivalently, between the planes
+// corresponding to two different s-values or two different t-values.
+var (
+ MinAngleSpanMetric = Metric{1, 4.0 / 3}
+ AvgAngleSpanMetric = Metric{1, math.Pi / 2}
+ MaxAngleSpanMetric = Metric{1, 1.704897179199218452}
+)
+
+// The width of geometric figure is defined as the distance between two
+// parallel bounding lines in a given direction. For cells, the minimum
+// width is always attained between two opposite edges, and the maximum
+// width is attained between two opposite vertices. However, for our
+// purposes we redefine the width of a cell as the perpendicular distance
+// between a pair of opposite edges. A cell therefore has two widths, one
+// in each direction. The minimum width according to this definition agrees
+// with the classic geometric one, but the maximum width is different. (The
+// maximum geometric width corresponds to MaxDiag defined below.)
+//
+// The average width in both directions for all cells at level k is approximately
+// AvgWidthMetric.Value(k).
+//
+// The width is useful for bounding the minimum or maximum distance from a
+// point on one edge of a cell to the closest point on the opposite edge.
+// For example, this is useful when growing regions by a fixed distance.
+var (
+ MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3}
+ AvgWidthMetric = Metric{1, 1.434523672886099389}
+ MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv}
+)
+
+// The edge length metrics can be used to bound the minimum, maximum,
+// or average distance from the center of one cell to the center of one of
+// its edge neighbors. In particular, it can be used to bound the distance
+// between adjacent cell centers along the space-filling Hilbert curve for
+// cells at any given level.
+var (
+ MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3}
+ AvgEdgeMetric = Metric{1, 1.459213746386106062}
+ MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv}
+
+ // MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level,
+ // where the edge aspect ratio of a cell is defined as the ratio of its longest
+ // edge length to its shortest edge length.
+ MaxEdgeAspect = 1.442615274452682920
+
+ MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9}
+ AvgAreaMetric = Metric{2, 4 * math.Pi / 6}
+ MaxAreaMetric = Metric{2, 2.635799256963161491}
+)
+
+// The maximum diagonal is also the maximum diameter of any cell,
+// and also the maximum geometric width (see the comment for widths). For
+// example, the distance from an arbitrary point to the closest cell center
+// at a given level is at most half the maximum diagonal length.
+var (
+ MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9}
+ AvgDiagMetric = Metric{1, 2.060422738998471683}
+ MaxDiagMetric = Metric{1, 2.438654594434021032}
+
+ // MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any
+ // level, where the diagonal aspect ratio of a cell is defined as the ratio
+ // of its longest diagonal length to its shortest diagonal length.
+ MaxDiagAspect = math.Sqrt(3)
+)
+
+// Value returns the value of the metric at the given level.
+func (m Metric) Value(level int) float64 {
+ return math.Ldexp(m.Deriv, -m.Dim*level)
+}
+
+// MinLevel returns the minimum level such that the metric is at most
+// the given value, or maxLevel (30) if there is no such level.
+//
+// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal
+// lengths are 0.1 or smaller. The returned value is always a valid level.
+//
+// In C++, this is called GetLevelForMaxValue.
+func (m Metric) MinLevel(val float64) int {
+ if val < 0 {
+ return maxLevel
+ }
+
+ level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1))
+ if level > maxLevel {
+ level = maxLevel
+ }
+ if level < 0 {
+ level = 0
+ }
+ return level
+}
+
+// MaxLevel returns the maximum level such that the metric is at least
+// the given value, or zero if there is no such level.
+//
+// For example, MaxLevel(0.1) returns the maximum level such that all cells have a
+// minimum width of 0.1 or larger. The returned value is always a valid level.
+//
+// In C++, this is called GetLevelForMinValue.
+func (m Metric) MaxLevel(val float64) int {
+ if val <= 0 {
+ return maxLevel
+ }
+
+ level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1)
+ if level > maxLevel {
+ level = maxLevel
+ }
+ if level < 0 {
+ level = 0
+ }
+ return level
+}
+
+// ClosestLevel returns the level at which the metric has approximately the given
+// value. The return value is always a valid level. For example,
+// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge
+// length is approximately 0.1.
+func (m Metric) ClosestLevel(val float64) int {
+ x := math.Sqrt2
+ if m.Dim == 2 {
+ x = 2
+ }
+ return m.MinLevel(x * val)
+}
diff --git a/vendor/github.com/golang/geo/s2/min_distance_targets.go b/vendor/github.com/golang/geo/s2/min_distance_targets.go
new file mode 100644
index 000000000..b1948b203
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/min_distance_targets.go
@@ -0,0 +1,362 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// minDistance implements distance interface to find closest distance types.
+type minDistance s1.ChordAngle
+
+func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) }
+func (m minDistance) zero() distance { return minDistance(0) }
+func (m minDistance) negative() distance { return minDistance(s1.NegativeChordAngle) }
+func (m minDistance) infinity() distance { return minDistance(s1.InfChordAngle()) }
+func (m minDistance) less(other distance) bool { return m.chordAngle() < other.chordAngle() }
+func (m minDistance) sub(other distance) distance {
+ return minDistance(m.chordAngle() - other.chordAngle())
+}
+func (m minDistance) chordAngleBound() s1.ChordAngle {
+ return m.chordAngle().Expanded(m.chordAngle().MaxAngleError())
+}
+
+// updateDistance updates its own value if the other value is less() than it is,
+// and reports if it updated.
+func (m minDistance) updateDistance(dist distance) (distance, bool) {
+ if dist.less(m) {
+ m = minDistance(dist.chordAngle())
+ return m, true
+ }
+ return m, false
+}
+
+func (m minDistance) fromChordAngle(o s1.ChordAngle) distance {
+ return minDistance(o)
+}
+
+// MinDistanceToPointTarget is a type for computing the minimum distance to a Point.
+type MinDistanceToPointTarget struct {
+ point Point
+ dist distance
+}
+
+// NewMinDistanceToPointTarget returns a new target for the given Point.
+func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget {
+ m := minDistance(0)
+ return &MinDistanceToPointTarget{point: point, dist: &m}
+}
+
+func (m *MinDistanceToPointTarget) capBound() Cap {
+ return CapFromCenterChordAngle(m.point, s1.ChordAngle(0))
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ var ok bool
+ dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point)))
+ return dist, ok
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(minDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ var ok bool
+ dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point)))
+ return dist, ok
+}
+
+func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // For furthest points, we visit the polygons whose interior contains
+ // the antipode of the target point. These are the polygons whose
+ // distance to the target is maxDistance.zero()
+ q := NewContainsPointQuery(index, VertexModelSemiOpen)
+ return q.visitContainingShapes(m.point, func(shape Shape) bool {
+ return v(shape, m.point)
+ })
+}
+
+func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int { return 120 }
+func (m *MinDistanceToPointTarget) distance() distance { return m.dist }
+
+// ----------------------------------------------------------
+
+// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge.
+type MinDistanceToEdgeTarget struct {
+ e Edge
+ dist distance
+}
+
+// NewMinDistanceToEdgeTarget returns a new target for the given Edge.
+func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget {
+ m := minDistance(0)
+ return &MinDistanceToEdgeTarget{e: e, dist: m}
+}
+
+// capBound returns a Cap that bounds the antipode of the target. (This
+// is the set of points whose maxDistance to the target is maxDistance.zero)
+func (m *MinDistanceToEdgeTarget) capBound() Cap {
+ // The following computes a radius equal to half the edge length in an
+ // efficient and numerically stable way.
+ d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1))
+ r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2))
+ return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2))
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(minDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok {
+ dist, _ = dist.updateDistance(minDistance(d))
+ return dist, true
+ }
+ return dist, false
+}
+
+func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1)))
+}
+
+func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // We test the center of the edge in order to ensure that edge targets AB
+ // and BA yield identical results (which is not guaranteed by the API but
+ // users might expect). Other options would be to test both endpoints, or
+ // return different results for AB and BA in some cases.
+ target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+ return target.visitContainingShapes(index, v)
+}
+
+func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 60 }
+func (m *MinDistanceToEdgeTarget) distance() distance { return m.dist }
+
+// ----------------------------------------------------------
+
+// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell.
+type MinDistanceToCellTarget struct {
+ cell Cell
+ dist distance
+}
+
+// NewMinDistanceToCellTarget returns a new target for the given Cell.
+func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget {
+ m := minDistance(0)
+ return &MinDistanceToCellTarget{cell: cell, dist: m}
+}
+
+func (m *MinDistanceToCellTarget) capBound() Cap {
+ return m.cell.CapBound()
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ return dist.updateDistance(minDistance(m.cell.Distance(p)))
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1)))
+}
+
+func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell)))
+}
+
+func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // The simplest approach is simply to return the polygons that contain the
+ // cell center. Alternatively, if the index cell is smaller than the target
+ // cell then we could return all polygons that are present in the
+ // shapeIndexCell, but since the index is built conservatively this may
+ // include some polygons that don't quite intersect the cell. So we would
+ // either need to recheck for intersection more accurately, or weaken the
+ // VisitContainingShapes contract so that it only guarantees approximate
+ // intersection, neither of which seems like a good tradeoff.
+ target := NewMinDistanceToPointTarget(m.cell.Center())
+ return target.visitContainingShapes(index, v)
+}
+func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false }
+func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 }
+func (m *MinDistanceToCellTarget) distance() distance { return m.dist }
+
+// ----------------------------------------------------------
+
+/*
+// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion.
+type MinDistanceToCellUnionTarget struct {
+ cu CellUnion
+ query *ClosestCellQuery
+ dist distance
+}
+
+// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion.
+func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget {
+ m := minDistance(0)
+ return &MinDistanceToCellUnionTarget{cu: cu, dist: m}
+}
+
+func (m *MinDistanceToCellUnionTarget) capBound() Cap {
+ return m.cu.CapBound()
+}
+
+func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ m.query.opts.DistanceLimit = dist.chordAngle()
+ target := NewMinDistanceToPointTarget(p)
+ r := m.query.findEdge(target)
+ if r.ShapeID < 0 {
+ return dist, false
+ }
+ return minDistance(r.Distance), true
+}
+
+func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // We test the center of the edge in order to ensure that edge targets AB
+ // and BA yield identical results (which is not guaranteed by the API but
+ // users might expect). Other options would be to test both endpoints, or
+ // return different results for AB and BA in some cases.
+ target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()})
+ return target.visitContainingShapes(index, v)
+}
+func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool {
+ m.query.opts.MaxError = maxErr
+ return true
+}
+func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int { return 30 }
+func (m *MinDistanceToCellUnionTarget) distance() distance { return m.dist }
+*/
+
+// ----------------------------------------------------------
+
+// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex.
+type MinDistanceToShapeIndexTarget struct {
+ index *ShapeIndex
+ query *EdgeQuery
+ dist distance
+}
+
+// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex.
+func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget {
+ m := minDistance(0)
+ return &MinDistanceToShapeIndexTarget{
+ index: index,
+ dist: m,
+ query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()),
+ }
+}
+
+func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
+ // TODO(roberts): Depends on ShapeIndexRegion existing.
+ // c := makeS2ShapeIndexRegion(m.index).CapBound()
+ // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius())
+ panic("not implemented yet")
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMinDistanceToPointTarget(p)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMinDistanceToEdgeTarget(edge)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) {
+ m.query.opts.distanceLimit = dist.chordAngle()
+ target := NewMinDistanceToCellTarget(cell)
+ r := m.query.findEdge(target, m.query.opts)
+ if r.shapeID < 0 {
+ return dist, false
+ }
+ return r.distance, true
+}
+
+// For target types consisting of multiple connected components (such as this one),
+// this method should return the polygons containing the antipodal reflection of
+// *any* connected component. (It is sufficient to test containment of one vertex per
+// connected component, since this allows us to also return any polygon whose
+// boundary has distance.zero() to the target.)
+func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool {
+ // It is sufficient to find the set of chain starts in the target index
+ // (i.e., one vertex per connected component of edges) that are contained by
+ // the query index, except for one special case to handle full polygons.
+ //
+ // TODO(roberts): Do this by merge-joining the two ShapeIndexes.
+ for _, shape := range m.index.shapes {
+ numChains := shape.NumChains()
+ // Shapes that don't have any edges require a special case (below).
+ testedPoint := false
+ for c := 0; c < numChains; c++ {
+ chain := shape.Chain(c)
+ if chain.Length == 0 {
+ continue
+ }
+ testedPoint = true
+ target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0)
+ if !target.visitContainingShapes(index, v) {
+ return false
+ }
+ }
+ if !testedPoint {
+ // Special case to handle full polygons.
+ ref := shape.ReferencePoint()
+ if !ref.Contained {
+ continue
+ }
+ target := NewMinDistanceToPointTarget(ref.Point)
+ if !target.visitContainingShapes(index, v) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool {
+ m.query.opts.maxError = maxErr
+ return true
+}
+func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 }
+func (m *MinDistanceToShapeIndexTarget) distance() distance { return m.dist }
+func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool) {
+ m.query.opts.includeInteriors = b
+}
+func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b }
+
+// TODO(roberts): Remaining methods
+//
+// func (m *MinDistanceToShapeIndexTarget) capBound() Cap {
+// CellUnionTarget
diff --git a/vendor/github.com/golang/geo/s2/nthderivative.go b/vendor/github.com/golang/geo/s2/nthderivative.go
new file mode 100644
index 000000000..73445d6c9
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/nthderivative.go
@@ -0,0 +1,88 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// nthDerivativeCoder provides Nth Derivative Coding.
+// (In signal processing disciplines, this is known as N-th Delta Coding.)
+//
+// Good for varint coding integer sequences with polynomial trends.
+//
+// Instead of coding a sequence of values directly, code its nth-order discrete
+// derivative. Overflow in integer addition and subtraction makes this a
+// lossless transform.
+//
+// constant linear quadratic
+// trend trend trend
+// / \ / \ / \_
+// input |0 0 0 0 1 2 3 4 9 16 25 36
+// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36
+// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11
+// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2
+// -------------------------------------
+// 0 1 2 3 4 5 6 7 8 9 10 11
+// n in sequence
+//
+// Higher-order codings can break even or be detrimental on other sequences.
+//
+// random oscillating
+// / \ / \_
+// input |5 9 6 1 8 8 2 -2 4 -4 6 -6
+// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6
+// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12
+// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22
+// ---------------------------------------
+// 0 1 2 3 4 5 6 7 8 9 10 11
+// n in sequence
+//
+// Note that the nth derivative isn't available until sequence item n. Earlier
+// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ...
+type nthDerivativeCoder struct {
+ n, m int
+ memory [10]int32
+}
+
+// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative).
+// n must be within [0,10].
+func newNthDerivativeCoder(n int) *nthDerivativeCoder {
+ c := &nthDerivativeCoder{n: n}
+ if n < 0 || n > len(c.memory) {
+ panic("unsupported n. Must be within [0,10].")
+ }
+ return c
+}
+
+func (c *nthDerivativeCoder) encode(k int32) int32 {
+ for i := 0; i < c.m; i++ {
+ delta := k - c.memory[i]
+ c.memory[i] = k
+ k = delta
+ }
+ if c.m < c.n {
+ c.memory[c.m] = k
+ c.m++
+ }
+ return k
+}
+
+func (c *nthDerivativeCoder) decode(k int32) int32 {
+ if c.m < c.n {
+ c.m++
+ }
+ for i := c.m - 1; i >= 0; i-- {
+ c.memory[i] += k
+ k = c.memory[i]
+ }
+ return k
+}
diff --git a/vendor/github.com/golang/geo/s2/paddedcell.go b/vendor/github.com/golang/geo/s2/paddedcell.go
new file mode 100644
index 000000000..ac304a6cc
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/paddedcell.go
@@ -0,0 +1,252 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+)
+
+// PaddedCell represents a Cell whose (u,v)-range has been expanded on
+// all sides by a given amount of "padding". Unlike Cell, its methods and
+// representation are optimized for clipping edges against Cell boundaries
+// to determine which cells are intersected by a given set of edges.
+type PaddedCell struct {
+ id CellID
+ padding float64
+ bound r2.Rect
+ middle r2.Rect // A rect in (u, v)-space that belongs to all four children.
+ iLo, jLo int // Minimum (i,j)-coordinates of this cell before padding
+ orientation int // Hilbert curve orientation of this cell.
+ level int
+}
+
+// PaddedCellFromCellID constructs a padded cell with the given padding.
+func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell {
+ p := &PaddedCell{
+ id: id,
+ padding: padding,
+ middle: r2.EmptyRect(),
+ }
+
+ // Fast path for constructing a top-level face (the most common case).
+ if id.isFace() {
+ limit := padding + 1
+ p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}}
+ p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}}
+ p.orientation = id.Face() & 1
+ return p
+ }
+
+ _, p.iLo, p.jLo, p.orientation = id.faceIJOrientation()
+ p.level = id.Level()
+ p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding)
+ ijSize := sizeIJ(p.level)
+ p.iLo &= -ijSize
+ p.jLo &= -ijSize
+
+ return p
+}
+
+// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index.
+// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j
+// indices correspond to increasing u- and v-values respectively.
+func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell {
+ // Compute the position and orientation of the child incrementally from the
+ // orientation of the parent.
+ pos := ijToPos[parent.orientation][2*i+j]
+
+ p := &PaddedCell{
+ id: parent.id.Children()[pos],
+ padding: parent.padding,
+ bound: parent.bound,
+ orientation: parent.orientation ^ posToOrientation[pos],
+ level: parent.level + 1,
+ middle: r2.EmptyRect(),
+ }
+
+ ijSize := sizeIJ(p.level)
+ p.iLo = parent.iLo + i*ijSize
+ p.jLo = parent.jLo + j*ijSize
+
+ // For each child, one corner of the bound is taken directly from the parent
+ // while the diagonally opposite corner is taken from middle().
+ middle := parent.Middle()
+ if i == 1 {
+ p.bound.X.Lo = middle.X.Lo
+ } else {
+ p.bound.X.Hi = middle.X.Hi
+ }
+ if j == 1 {
+ p.bound.Y.Lo = middle.Y.Lo
+ } else {
+ p.bound.Y.Hi = middle.Y.Hi
+ }
+
+ return p
+}
+
+// CellID returns the CellID this padded cell represents.
+func (p PaddedCell) CellID() CellID {
+ return p.id
+}
+
+// Padding returns the amount of padding on this cell.
+func (p PaddedCell) Padding() float64 {
+ return p.padding
+}
+
+// Level returns the level this cell is at.
+func (p PaddedCell) Level() int {
+ return p.level
+}
+
+// Center returns the center of this cell.
+func (p PaddedCell) Center() Point {
+ ijSize := sizeIJ(p.level)
+ si := uint32(2*p.iLo + ijSize)
+ ti := uint32(2*p.jLo + ijSize)
+ return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
+}
+
+// Middle returns the rectangle in the middle of this cell that belongs to
+// all four of its children in (u,v)-space.
+func (p *PaddedCell) Middle() r2.Rect {
+ // We compute this field lazily because it is not needed the majority of the
+ // time (i.e., for cells where the recursion terminates).
+ if p.middle.IsEmpty() {
+ ijSize := sizeIJ(p.level)
+ u := stToUV(siTiToST(uint32(2*p.iLo + ijSize)))
+ v := stToUV(siTiToST(uint32(2*p.jLo + ijSize)))
+ p.middle = r2.Rect{
+ r1.Interval{u - p.padding, u + p.padding},
+ r1.Interval{v - p.padding, v + p.padding},
+ }
+ }
+ return p.middle
+}
+
+// Bound returns the bounds for this cell in (u,v)-space including padding.
+func (p PaddedCell) Bound() r2.Rect {
+ return p.bound
+}
+
+// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal
+// position. The traversal position corresponds to the order in which child
+// cells are visited by the Hilbert curve.
+func (p PaddedCell) ChildIJ(pos int) (i, j int) {
+ ij := posToIJ[p.orientation][pos]
+ return ij >> 1, ij & 1
+}
+
+// EntryVertex return the vertex where the space-filling curve enters this cell.
+func (p PaddedCell) EntryVertex() Point {
+ // The curve enters at the (0,0) vertex unless the axis directions are
+ // reversed, in which case it enters at the (1,1) vertex.
+ i := p.iLo
+ j := p.jLo
+ if p.orientation&invertMask != 0 {
+ ijSize := sizeIJ(p.level)
+ i += ijSize
+ j += ijSize
+ }
+ return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
+}
+
+// ExitVertex returns the vertex where the space-filling curve exits this cell.
+func (p PaddedCell) ExitVertex() Point {
+ // The curve exits at the (1,0) vertex unless the axes are swapped or
+ // inverted but not both, in which case it exits at the (0,1) vertex.
+ i := p.iLo
+ j := p.jLo
+ ijSize := sizeIJ(p.level)
+ if p.orientation == 0 || p.orientation == swapMask+invertMask {
+ i += ijSize
+ } else {
+ j += ijSize
+ }
+ return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
+}
+
+// ShrinkToFit returns the smallest CellID that contains all descendants of this
+// padded cell whose bounds intersect the given rect. For algorithms that use
+// recursive subdivision to find the cells that intersect a particular object, this
+// method can be used to skip all of the initial subdivision steps where only
+// one child needs to be expanded.
+//
+// Note that this method is not the same as returning the smallest cell that contains
+// the intersection of this cell with rect. Because of the padding, even if one child
+// completely contains rect it is still possible that a neighboring child may also
+// intersect the given rect.
+//
+// The provided Rect must intersect the bounds of this cell.
+func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
+ // Quick rejection test: if rect contains the center of this cell along
+ // either axis, then no further shrinking is possible.
+ if p.level == 0 {
+ // Fast path (most calls to this function start with a face cell).
+ if rect.X.Contains(0) || rect.Y.Contains(0) {
+ return p.id
+ }
+ }
+
+ ijSize := sizeIJ(p.level)
+ if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) ||
+ rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) {
+ return p.id
+ }
+
+ // Otherwise we expand rect by the given padding on all sides and find
+ // the range of coordinates that it spans along the i- and j-axes. We then
+ // compute the highest bit position at which the min and max coordinates
+ // differ. This corresponds to the first cell level at which at least two
+ // children intersect rect.
+
+ // Increase the padding to compensate for the error in uvToST.
+ // (The constant below is a provable upper bound on the additional error.)
+ padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon)
+ iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded
+ var iXor, jXor int // XOR of the min and max i- or j-coordinates
+
+ if iMin < stToIJ(uvToST(padded.X.Lo)) {
+ iMin = stToIJ(uvToST(padded.X.Lo))
+ }
+ if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b {
+ iXor = iMin ^ a
+ } else {
+ iXor = iMin ^ b
+ }
+
+ if jMin < stToIJ(uvToST(padded.Y.Lo)) {
+ jMin = stToIJ(uvToST(padded.Y.Lo))
+ }
+ if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b {
+ jXor = jMin ^ a
+ } else {
+ jXor = jMin ^ b
+ }
+
+ // Compute the highest bit position where the two i- or j-endpoints differ,
+ // and then choose the cell level that includes both of these endpoints. So
+ // if both pairs of endpoints are equal we choose maxLevel; if they differ
+ // only at bit 0, we choose (maxLevel - 1), and so on.
+ levelMSB := uint64(((iXor | jXor) << 1) + 1)
+ level := maxLevel - findMSBSetNonZero64(levelMSB)
+ if level <= p.level {
+ return p.id
+ }
+
+ return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level)
+}
diff --git a/vendor/github.com/golang/geo/s2/point.go b/vendor/github.com/golang/geo/s2/point.go
new file mode 100644
index 000000000..89e7ae0ed
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/point.go
@@ -0,0 +1,258 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Point represents a point on the unit sphere as a normalized 3D vector.
+// Fields should be treated as read-only. Use one of the factory methods for creation.
+type Point struct {
+ r3.Vector
+}
+
+// sortPoints sorts the slice of Points in place.
+func sortPoints(e []Point) {
+ sort.Sort(points(e))
+}
+
+// points implements the Sort interface for slices of Point.
+type points []Point
+
+func (p points) Len() int { return len(p) }
+func (p points) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 }
+
+// PointFromCoords creates a new normalized point from coordinates.
+//
+// This always returns a valid point. If the given coordinates can not be normalized
+// the origin point will be returned.
+//
+// This behavior is different from the C++ construction of a S2Point from coordinates
+// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize.
+func PointFromCoords(x, y, z float64) Point {
+ if x == 0 && y == 0 && z == 0 {
+ return OriginPoint()
+ }
+ return Point{r3.Vector{x, y, z}.Normalize()}
+}
+
+// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed
+// reference point. In particular, this is the "point at infinity" used for
+// point-in-polygon testing (by counting the number of edge crossings).
+//
+// It should *not* be a point that is commonly used in edge tests in order
+// to avoid triggering code to handle degenerate cases (this rules out the
+// north and south poles). It should also not be on the boundary of any
+// low-level S2Cell for the same reason.
+func OriginPoint() Point {
+ return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}}
+}
+
+// PointCross returns a Point that is orthogonal to both p and op. This is similar to
+// p.Cross(op) (the true cross product) except that it does a better job of
+// ensuring orthogonality when the Point is nearly parallel to op, it returns
+// a non-zero result even when p == op or p == -op and the result is a Point.
+//
+// It satisfies the following properties (f == PointCross):
+//
+// (1) f(p, op) != 0 for all p, op
+// (2) f(op,p) == -f(p,op) unless p == op or p == -op
+// (3) f(-p,op) == -f(p,op) unless p == op or p == -op
+// (4) f(p,-op) == -f(p,op) unless p == op or p == -op
+func (p Point) PointCross(op Point) Point {
+ // NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd",
+ // but PointCross more accurately describes how this method is used.
+ x := p.Add(op.Vector).Cross(op.Sub(p.Vector))
+
+ // Compare exactly to the 0 vector.
+ if x == (r3.Vector{}) {
+ // The only result that makes sense mathematically is to return zero, but
+ // we find it more convenient to return an arbitrary orthogonal vector.
+ return Point{p.Ortho()}
+ }
+
+ return Point{x}
+}
+
+// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that
+// order while sweeping CCW around the point O.
+//
+// You can think of this as testing whether A <= B <= C with respect to the
+// CCW ordering around O that starts at A, or equivalently, whether B is
+// contained in the range of angles (inclusive) that starts at A and extends
+// CCW to C. Properties:
+//
+// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
+// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
+// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
+// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
+// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
+func OrderedCCW(a, b, c, o Point) bool {
+ sum := 0
+ if RobustSign(b, o, a) != Clockwise {
+ sum++
+ }
+ if RobustSign(c, o, b) != Clockwise {
+ sum++
+ }
+ if RobustSign(a, o, c) == CounterClockwise {
+ sum++
+ }
+ return sum >= 2
+}
+
+// Distance returns the angle between two points.
+func (p Point) Distance(b Point) s1.Angle {
+ return p.Vector.Angle(b.Vector)
+}
+
+// ApproxEqual reports whether the two points are similar enough to be equal.
+func (p Point) ApproxEqual(other Point) bool {
+ return p.approxEqual(other, s1.Angle(epsilon))
+}
+
+// approxEqual reports whether the two points are within the given epsilon.
+func (p Point) approxEqual(other Point, eps s1.Angle) bool {
+ return p.Vector.Angle(other.Vector) <= eps
+}
+
+// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance
+// between the two given points. The points must be unit length.
+func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle {
+ return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2()))
+}
+
+// regularPoints generates a slice of points shaped as a regular polygon with
+// the numVertices vertices, all located on a circle of the specified angular radius
+// around the center. The radius is the actual distance from center to each vertex.
+func regularPoints(center Point, radius s1.Angle, numVertices int) []Point {
+ return regularPointsForFrame(getFrame(center), radius, numVertices)
+}
+
+// regularPointsForFrame generates a slice of points shaped as a regular polygon
+// with numVertices vertices, all on a circle of the specified angular radius around
+// the center. The radius is the actual distance from the center to each vertex.
+func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point {
+ // We construct the loop in the given frame coordinates, with the center at
+ // (0, 0, 1). For a loop of radius r, the loop vertices have the form
+ // (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the
+ // sphere (arc length) from each vertex to the center is acos(cos(r)) = r.
+ z := math.Cos(radius.Radians())
+ r := math.Sin(radius.Radians())
+ radianStep := 2 * math.Pi / float64(numVertices)
+ var vertices []Point
+
+ for i := 0; i < numVertices; i++ {
+ angle := float64(i) * radianStep
+ p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}}
+ vertices = append(vertices, Point{fromFrame(frame, p).Normalize()})
+ }
+
+ return vertices
+}
+
+// CapBound returns a bounding cap for this point.
+func (p Point) CapBound() Cap {
+ return CapFromPoint(p)
+}
+
+// RectBound returns a bounding latitude-longitude rectangle from this point.
+func (p Point) RectBound() Rect {
+ return RectFromLatLng(LatLngFromPoint(p))
+}
+
+// ContainsCell returns false as Points do not contain any other S2 types.
+func (p Point) ContainsCell(c Cell) bool { return false }
+
+// IntersectsCell reports whether this Point intersects the given cell.
+func (p Point) IntersectsCell(c Cell) bool {
+ return c.ContainsPoint(p)
+}
+
+// ContainsPoint reports if this Point contains the other Point.
+// (This method is named to satisfy the Region interface.)
+func (p Point) ContainsPoint(other Point) bool {
+ return p.Contains(other)
+}
+
+// CellUnionBound computes a covering of the Point.
+func (p Point) CellUnionBound() []CellID {
+ return p.CapBound().CellUnionBound()
+}
+
+// Contains reports if this Point contains the other Point.
+// (This method matches all other s2 types where the reflexive Contains
+// method does not contain the type's name.)
+func (p Point) Contains(other Point) bool { return p == other }
+
+// Encode encodes the Point.
+func (p Point) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ p.encode(e)
+ return e.err
+}
+
+func (p Point) encode(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeFloat64(p.X)
+ e.writeFloat64(p.Y)
+ e.writeFloat64(p.Z)
+}
+
+// Decode decodes the Point.
+func (p *Point) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ p.decode(d)
+ return d.err
+}
+
+func (p *Point) decode(d *decoder) {
+ version := d.readInt8()
+ if d.err != nil {
+ return
+ }
+ if version != encodingVersion {
+ d.err = fmt.Errorf("only version %d is supported", encodingVersion)
+ return
+ }
+ p.X = d.readFloat64()
+ p.Y = d.readFloat64()
+ p.Z = d.readFloat64()
+}
+
+// Rotate the given point about the given axis by the given angle. p and
+// axis must be unit length; angle has no restrictions (e.g., it can be
+// positive, negative, greater than 360 degrees, etc).
+func Rotate(p, axis Point, angle s1.Angle) Point {
+ // Let M be the plane through P that is perpendicular to axis, and let
+ // center be the point where M intersects axis. We construct a
+ // right-handed orthogonal frame (dx, dy, center) such that dx is the
+ // vector from center to P, and dy has the same length as dx. The
+ // result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center).
+ center := axis.Mul(p.Dot(axis.Vector))
+ dx := p.Sub(center)
+ dy := axis.Cross(p.Vector)
+ // Mathematically the result is unit length, but normalization is necessary
+ // to ensure that numerical errors don't accumulate.
+ return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()}
+}
diff --git a/vendor/github.com/golang/geo/s2/point_measures.go b/vendor/github.com/golang/geo/s2/point_measures.go
new file mode 100644
index 000000000..6fa9b7ae4
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/point_measures.go
@@ -0,0 +1,149 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// PointArea returns the area of triangle ABC. This method combines two different
+// algorithms to get accurate results for both large and small triangles.
+// The maximum error is about 5e-15 (about 0.25 square meters on the Earth's
+// surface), the same as GirardArea below, but unlike that method it is
+// also accurate for small triangles. Example: when the true area is 100
+// square meters, PointArea yields an error about 1 trillion times smaller than
+// GirardArea.
+//
+// All points should be unit length, and no two points should be antipodal.
+// The area is always positive.
+func PointArea(a, b, c Point) float64 {
+ // This method is based on l'Huilier's theorem,
+ //
+ // tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2))
+ //
+ // where E is the spherical excess of the triangle (i.e. its area),
+ // a, b, c are the side lengths, and
+ // s is the semiperimeter (a + b + c) / 2.
+ //
+ // The only significant source of error using l'Huilier's method is the
+ // cancellation error of the terms (s-a), (s-b), (s-c). This leads to a
+ // *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares
+ // to a relative error of about 1e-15 / E using Girard's formula, where E is
+ // the true area of the triangle. Girard's formula can be even worse than
+ // this for very small triangles, e.g. a triangle with a true area of 1e-30
+ // might evaluate to 1e-5.
+ //
+ // So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where
+ // dmin = min(s-a, s-b, s-c). This basically includes all triangles
+ // except for extremely long and skinny ones.
+ //
+ // Since we don't know E, we would like a conservative upper bound on
+ // the triangle area in terms of s and dmin. It's possible to show that
+ // E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1).
+ // Using this, it's easy to show that we should always use l'Huilier's
+ // method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore,
+ // if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where
+ // k3 is about 0.1. Since the best case error using Girard's formula
+ // is about 1e-15, this means that we shouldn't even consider it unless
+ // s >= 3e-4 or so.
+ sa := float64(b.Angle(c.Vector))
+ sb := float64(c.Angle(a.Vector))
+ sc := float64(a.Angle(b.Vector))
+ s := 0.5 * (sa + sb + sc)
+ if s >= 3e-4 {
+ // Consider whether Girard's formula might be more accurate.
+ dmin := s - math.Max(sa, math.Max(sb, sc))
+ if dmin < 1e-2*s*s*s*s*s {
+ // This triangle is skinny enough to use Girard's formula.
+ area := GirardArea(a, b, c)
+ if dmin < s*0.1*area {
+ return area
+ }
+ }
+ }
+
+ // Use l'Huilier's formula.
+ return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))*
+ math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
+}
+
+// GirardArea returns the area of the triangle computed using Girard's formula.
+// All points should be unit length, and no two points should be antipodal.
+//
+// This method is about twice as fast as PointArea() but has poor relative
+// accuracy for small triangles. The maximum error is about 5e-15 (about
+// 0.25 square meters on the Earth's surface) and the average error is about
+// 1e-15. These bounds apply to triangles of any size, even as the maximum
+// edge length of the triangle approaches 180 degrees. But note that for
+// such triangles, tiny perturbations of the input points can change the
+// true mathematical area dramatically.
+func GirardArea(a, b, c Point) float64 {
+ // This is equivalent to the usual Girard's formula but is slightly more
+ // accurate, faster to compute, and handles a == b == c without a special
+ // case. PointCross is necessary to get good accuracy when two of
+ // the input points are very close together.
+ ab := a.PointCross(b)
+ bc := b.PointCross(c)
+ ac := a.PointCross(c)
+
+ area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector))
+ if area < 0 {
+ area = 0
+ }
+ return area
+}
+
+// SignedArea returns a positive value for counterclockwise triangles and a negative
+// value otherwise (similar to PointArea).
+func SignedArea(a, b, c Point) float64 {
+ return float64(RobustSign(a, b, c)) * PointArea(a, b, c)
+}
+
+// Angle returns the interior angle at the vertex B in the triangle ABC. The
+// return value is always in the range [0, pi]. All points should be
+// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c.
+//
+// The angle is undefined if A or C is diametrically opposite from B, and
+// becomes numerically unstable as the length of edge AB or BC approaches
+// 180 degrees.
+func Angle(a, b, c Point) s1.Angle {
+ // PointCross is necessary to get good accuracy when two of the input
+ // points are very close together.
+ return a.PointCross(b).Angle(c.PointCross(b).Vector)
+}
+
+// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The
+// return value is positive if ABC is counterclockwise and negative otherwise.
+// If you imagine an ant walking from A to B to C, this is the angle that the
+// ant turns at vertex B (positive = left = CCW, negative = right = CW).
+// This quantity is also known as the "geodesic curvature" at B.
+//
+// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct
+// a,b,c. The result is undefined if (a == b || b == c), but is either
+// -Pi or Pi if (a == c). All points should be normalized.
+func TurnAngle(a, b, c Point) s1.Angle {
+ // We use PointCross to get good accuracy when two points are very
+ // close together, and RobustSign to ensure that the sign is correct for
+ // turns that are close to 180 degrees.
+ angle := a.PointCross(b).Angle(b.PointCross(c).Vector)
+
+ // Don't return RobustSign * angle because it is legal to have (a == c).
+ if RobustSign(a, b, c) == CounterClockwise {
+ return angle
+ }
+ return -angle
+}
diff --git a/vendor/github.com/golang/geo/s2/point_vector.go b/vendor/github.com/golang/geo/s2/point_vector.go
new file mode 100644
index 000000000..f8e6f65b5
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/point_vector.go
@@ -0,0 +1,42 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// Shape interface enforcement
+var (
+ _ Shape = (*PointVector)(nil)
+)
+
+// PointVector is a Shape representing a set of Points. Each point
+// is represented as a degenerate edge with the same starting and ending
+// vertices.
+//
+// This type is useful for adding a collection of points to an ShapeIndex.
+//
+// Its methods are on *PointVector due to implementation details of ShapeIndex.
+type PointVector []Point
+
+func (p *PointVector) NumEdges() int { return len(*p) }
+func (p *PointVector) Edge(i int) Edge { return Edge{(*p)[i], (*p)[i]} }
+func (p *PointVector) ReferencePoint() ReferencePoint { return OriginReferencePoint(false) }
+func (p *PointVector) NumChains() int { return len(*p) }
+func (p *PointVector) Chain(i int) Chain { return Chain{i, 1} }
+func (p *PointVector) ChainEdge(i, j int) Edge { return Edge{(*p)[i], (*p)[j]} }
+func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{e, 0} }
+func (p *PointVector) Dimension() int { return 0 }
+func (p *PointVector) IsEmpty() bool { return defaultShapeIsEmpty(p) }
+func (p *PointVector) IsFull() bool { return defaultShapeIsFull(p) }
+func (p *PointVector) typeTag() typeTag { return typeTagPointVector }
+func (p *PointVector) privateInterface() {}
diff --git a/vendor/github.com/golang/geo/s2/pointcompression.go b/vendor/github.com/golang/geo/s2/pointcompression.go
new file mode 100644
index 000000000..018381799
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/pointcompression.go
@@ -0,0 +1,319 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/golang/geo/r3"
+)
+
+// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded.
+// On decode, this defends against malicious encodings that try and have us exceed RAM.
+const maxEncodedVertices = 50000000
+
+// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point
+// and, if this point is equal to the center of a Cell, the level of this cell
+// (-1 otherwise). This is used for Loops and Polygons to store data in a more
+// compressed format.
+type xyzFaceSiTi struct {
+ xyz Point
+ face int
+ si, ti uint32
+ level int
+}
+
+const derivativeEncodingOrder = 2
+
+func appendFace(faces []faceRun, face int) []faceRun {
+ if len(faces) == 0 || faces[len(faces)-1].face != face {
+ return append(faces, faceRun{face, 1})
+ }
+ faces[len(faces)-1].count++
+ return faces
+}
+
+// encodePointsCompressed uses an optimized compressed format to encode the given values.
+func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) {
+ var faces []faceRun
+ for _, v := range vertices {
+ faces = appendFace(faces, v.face)
+ }
+ encodeFaces(e, faces)
+
+ type piQi struct {
+ pi, qi uint32
+ }
+ verticesPiQi := make([]piQi, len(vertices))
+ for i, v := range vertices {
+ verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)}
+ }
+ piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder)
+ for i, v := range verticesPiQi {
+ f := encodePointCompressed
+ if i == 0 {
+ // The first point will be just the (pi, qi) coordinates
+ // of the Point. NthDerivativeCoder will not save anything
+ // in that case, so we encode in fixed format rather than varint
+ // to avoid the varint overhead.
+ f = encodeFirstPointFixedLength
+ }
+ f(e, v.pi, v.qi, level, piCoder, qiCoder)
+ }
+
+ var offCenter []int
+ for i, v := range vertices {
+ if v.level != level {
+ offCenter = append(offCenter, i)
+ }
+ }
+ e.writeUvarint(uint64(len(offCenter)))
+ for _, idx := range offCenter {
+ e.writeUvarint(uint64(idx))
+ e.writeFloat64(vertices[idx].xyz.X)
+ e.writeFloat64(vertices[idx].xyz.Y)
+ e.writeFloat64(vertices[idx].xyz.Z)
+ }
+}
+
+func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
+ // Do not ZigZagEncode the first point, since it cannot be negative.
+ codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi))
+ // Interleave to reduce overhead from two partial bytes to one.
+ interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi))
+
+ // Write as little endian.
+ bytesRequired := (level + 7) / 8 * 2
+ for i := 0; i < bytesRequired; i++ {
+ e.writeUint8(uint8(interleaved))
+ interleaved >>= 8
+ }
+}
+
+// encodePointCompressed encodes points into e.
+// Given a sequence of Points assumed to be the center of level-k cells,
+// compresses it into a stream using the following method:
+// - decompose the points into (face, si, ti) tuples.
+// - run-length encode the faces, combining face number and count into a
+// varint32. See the faceRun struct.
+// - right shift the (si, ti) to remove the part that's constant for all cells
+// of level-k. The result is called the (pi, qi) space.
+// - 2nd derivative encode the pi and qi sequences (linear prediction)
+// - zig-zag encode all derivative values but the first, which cannot be
+// negative
+// - interleave the zig-zag encoded values
+// - encode the first interleaved value in a fixed length encoding
+// (varint would make this value larger)
+// - encode the remaining interleaved values as varint64s, as the
+// derivative encoding should make the values small.
+// In addition, provides a lossless method to compress a sequence of points even
+// if some points are not the center of level-k cells. These points are stored
+// exactly, using 3 double precision values, after the above encoded string,
+// together with their index in the sequence (this leads to some redundancy - it
+// is expected that only a small fraction of the points are not cell centers).
+//
+// To encode leaf cells, this requires 8 bytes for the first vertex plus
+// an average of 3.8 bytes for each additional vertex, when computed on
+// Google's geographic repository.
+func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
+ // ZigZagEncode, as varint requires the maximum number of bytes for
+ // negative numbers.
+ zzPi := zigzagEncode(piCoder.encode(int32(pi)))
+ zzQi := zigzagEncode(qiCoder.encode(int32(qi)))
+ // Interleave to reduce overhead from two partial bytes to one.
+ interleaved := interleaveUint32(zzPi, zzQi)
+ e.writeUvarint(interleaved)
+}
+
+type faceRun struct {
+ face, count int
+}
+
+func decodeFaceRun(d *decoder) faceRun {
+ faceAndCount := d.readUvarint()
+ ret := faceRun{
+ face: int(faceAndCount % numFaces),
+ count: int(faceAndCount / numFaces),
+ }
+ if ret.count <= 0 && d.err == nil {
+ d.err = errors.New("non-positive count for face run")
+ }
+ return ret
+}
+
+func decodeFaces(numVertices int, d *decoder) []faceRun {
+ var frs []faceRun
+ for nparsed := 0; nparsed < numVertices; {
+ fr := decodeFaceRun(d)
+ if d.err != nil {
+ return nil
+ }
+ frs = append(frs, fr)
+ nparsed += fr.count
+ }
+ return frs
+}
+
+// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face.
+func encodeFaceRun(e *encoder, fr faceRun) {
+ // It isn't necessary to encode the number of faces left for the last run,
+ // but since this would only help if there were more than 21 faces, it will
+ // be a small overall savings, much smaller than the bound encoding.
+ coded := numFaces*uint64(fr.count) + uint64(fr.face)
+ e.writeUvarint(coded)
+}
+
+func encodeFaces(e *encoder, frs []faceRun) {
+ for _, fr := range frs {
+ encodeFaceRun(e, fr)
+ }
+}
+
+type facesIterator struct {
+ faces []faceRun
+ // How often have we yet shown the current face?
+ numCurrentFaceShown int
+ curFace int
+}
+
+func (fi *facesIterator) next() (ok bool) {
+ if len(fi.faces) == 0 {
+ return false
+ }
+ fi.curFace = fi.faces[0].face
+ fi.numCurrentFaceShown++
+
+ // Advance fs if needed.
+ if fi.faces[0].count <= fi.numCurrentFaceShown {
+ fi.faces = fi.faces[1:]
+ fi.numCurrentFaceShown = 0
+ }
+
+ return true
+}
+
+func decodePointsCompressed(d *decoder, level int, target []Point) {
+ faces := decodeFaces(len(target), d)
+
+ piCoder := newNthDerivativeCoder(derivativeEncodingOrder)
+ qiCoder := newNthDerivativeCoder(derivativeEncodingOrder)
+
+ iter := facesIterator{faces: faces}
+ for i := range target {
+ decodeFn := decodePointCompressed
+ if i == 0 {
+ decodeFn = decodeFirstPointFixedLength
+ }
+ pi, qi := decodeFn(d, level, piCoder, qiCoder)
+ if ok := iter.next(); !ok && d.err == nil {
+ d.err = fmt.Errorf("ran out of faces at target %d", i)
+ return
+ }
+ target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)}
+ }
+
+ numOffCenter := int(d.readUvarint())
+ if d.err != nil {
+ return
+ }
+ if numOffCenter > len(target) {
+ d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target))
+ return
+ }
+ for i := 0; i < numOffCenter; i++ {
+ idx := int(d.readUvarint())
+ if d.err != nil {
+ return
+ }
+ if idx >= len(target) {
+ d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target))
+ return
+ }
+ target[idx].X = d.readFloat64()
+ target[idx].Y = d.readFloat64()
+ target[idx].Z = d.readFloat64()
+ }
+}
+
+func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
+ bytesToRead := (level + 7) / 8 * 2
+ var interleaved uint64
+ for i := 0; i < bytesToRead; i++ {
+ rr := d.readUint8()
+ interleaved |= (uint64(rr) << uint(i*8))
+ }
+
+ piCoded, qiCoded := deinterleaveUint32(interleaved)
+
+ return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded)))
+}
+
+func zigzagEncode(x int32) uint32 {
+ return (uint32(x) << 1) ^ uint32(x>>31)
+}
+
+func zigzagDecode(x uint32) int32 {
+ return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31))
+}
+
+func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
+ interleavedZigZagEncodedDerivPiQi := d.readUvarint()
+ piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi)
+ return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag)))
+}
+
+// We introduce a new coordinate system (pi, qi), which is (si, ti)
+// with the bits that are constant for cells of that level shifted
+// off to the right.
+// si = round(s * 2^31)
+// pi = si >> (31 - level)
+// = floor(s * 2^level)
+// If the point has been snapped to the level, the bits that are
+// shifted off will be a 1 in the msb, then 0s after that, so the
+// fractional part discarded by the cast is (close to) 0.5.
+
+// stToPiQi returns the value transformed to the PiQi coordinate space.
+func stToPiQi(s float64, level uint) uint32 {
+ return uint32(s * float64(int(1)<<level))
+}
+
+// siTiToPiQi returns the value transformed into the PiQi coordinate spade.
+// encodeFirstPointFixedLength encodes the return value using level bits,
+// so we clamp si to the range [0, 2**level - 1] before trying to encode
+// it. This is okay because if si == maxSiTi, then it is not a cell center
+// anyway and will be encoded separately as an off-center point.
+func siTitoPiQi(siTi uint32, level int) uint32 {
+ s := uint(siTi)
+ const max = maxSiTi - 1
+ if s > max {
+ s = max
+ }
+
+ return uint32(s >> (maxLevel + 1 - uint(level)))
+}
+
+// piQiToST returns the value transformed to ST space.
+func piQiToST(pi uint32, level int) float64 {
+ // We want to recover the position at the center of the cell. If the point
+ // was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5.
+ // Inverting STtoPiQi gives:
+ // s = (pi + 0.5) / 2^level.
+ return (float64(pi) + 0.5) / float64(int(1)<<uint(level))
+}
+
+func facePiQitoXYZ(face int, pi, qi uint32, level int) r3.Vector {
+ return faceUVToXYZ(face, stToUV(piQiToST(pi, level)), stToUV(piQiToST(qi, level))).Normalize()
+}
diff --git a/vendor/github.com/golang/geo/s2/polygon.go b/vendor/github.com/golang/geo/s2/polygon.go
new file mode 100644
index 000000000..57c43fbcf
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/polygon.go
@@ -0,0 +1,1212 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+)
+
+// Polygon represents a sequence of zero or more loops; recall that the
+// interior of a loop is defined to be its left-hand side (see Loop).
+//
+// When the polygon is initialized, the given loops are automatically converted
+// into a canonical form consisting of "shells" and "holes". Shells and holes
+// are both oriented CCW, and are nested hierarchically. The loops are
+// reordered to correspond to a pre-order traversal of the nesting hierarchy.
+//
+// Polygons may represent any region of the sphere with a polygonal boundary,
+// including the entire sphere (known as the "full" polygon). The full polygon
+// consists of a single full loop (see Loop), whereas the empty polygon has no
+// loops at all.
+//
+// Use FullPolygon() to construct a full polygon. The zero value of Polygon is
+// treated as the empty polygon.
+//
+// Polygons have the following restrictions:
+//
+// - Loops may not cross, i.e. the boundary of a loop may not intersect
+// both the interior and exterior of any other loop.
+//
+// - Loops may not share edges, i.e. if a loop contains an edge AB, then
+// no other loop may contain AB or BA.
+//
+// - Loops may share vertices, however no vertex may appear twice in a
+// single loop (see Loop).
+//
+// - No loop may be empty. The full loop may appear only in the full polygon.
+type Polygon struct {
+ loops []*Loop
+
+ // index is a spatial index of all the polygon loops.
+ index *ShapeIndex
+
+ // hasHoles tracks if this polygon has at least one hole.
+ hasHoles bool
+
+ // numVertices keeps the running total of all of the vertices of the contained loops.
+ numVertices int
+
+ // numEdges tracks the total number of edges in all the loops in this polygon.
+ numEdges int
+
+ // bound is a conservative bound on all points contained by this loop.
+ // If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
+ bound Rect
+
+ // Since bound is not exact, it is possible that a loop A contains
+ // another loop B whose bounds are slightly larger. subregionBound
+ // has been expanded sufficiently to account for this error, i.e.
+ // if A.Contains(B), then A.subregionBound.Contains(B.bound).
+ subregionBound Rect
+
+ // A slice where element i is the cumulative number of edges in the
+ // preceding loops in the polygon. This field is used for polygons that
+ // have a large number of loops, and may be empty for polygons with few loops.
+ cumulativeEdges []int
+}
+
+// PolygonFromLoops constructs a polygon from the given set of loops. The polygon
+// interior consists of the points contained by an odd number of loops. (Recall
+// that a loop contains the set of points on its left-hand side.)
+//
+// This method determines the loop nesting hierarchy and assigns every loop a
+// depth. Shells have even depths, and holes have odd depths.
+//
+// Note: The given set of loops are reordered by this method so that the hierarchy
+// can be traversed using Parent, LastDescendant and the loops depths.
+func PolygonFromLoops(loops []*Loop) *Polygon {
+ p := &Polygon{}
+ // Empty polygons do not contain any loops, even the Empty loop.
+ if len(loops) == 1 && loops[0].IsEmpty() {
+ p.initLoopProperties()
+ return p
+ }
+ p.loops = loops
+ p.initNested()
+ return p
+}
+
+// PolygonFromOrientedLoops returns a Polygon from the given set of loops,
+// like PolygonFromLoops. It expects loops to be oriented such that the polygon
+// interior is on the left-hand side of all loops. This implies that shells
+// and holes should have opposite orientations in the input to this method.
+// (During initialization, loops representing holes will automatically be
+// inverted.)
+func PolygonFromOrientedLoops(loops []*Loop) *Polygon {
+ // Here is the algorithm:
+ //
+ // 1. Remember which of the given loops contain OriginPoint.
+ //
+ // 2. Invert loops as necessary to ensure that they are nestable (i.e., no
+ // loop contains the complement of any other loop). This may result in a
+ // set of loops corresponding to the complement of the given polygon, but
+ // we will fix that problem later.
+ //
+ // We make the loops nestable by first normalizing all the loops (i.e.,
+ // inverting any loops whose turning angle is negative). This handles
+ // all loops except those whose turning angle is very close to zero
+ // (within the maximum error tolerance). Any such loops are inverted if
+ // and only if they contain OriginPoint(). (In theory this step is only
+ // necessary if there are at least two such loops.) The resulting set of
+ // loops is guaranteed to be nestable.
+ //
+ // 3. Build the polygon. This yields either the desired polygon or its
+ // complement.
+ //
+ // 4. If there is at least one loop, we find a loop L that is adjacent to
+ // OriginPoint() (where "adjacent" means that there exists a path
+ // connecting OriginPoint() to some vertex of L such that the path does
+ // not cross any loop). There may be a single such adjacent loop, or
+ // there may be several (in which case they should all have the same
+ // contains_origin() value). We choose L to be the loop containing the
+ // origin whose depth is greatest, or loop(0) (a top-level shell) if no
+ // such loop exists.
+ //
+ // 5. If (L originally contained origin) != (polygon contains origin), we
+ // invert the polygon. This is done by inverting a top-level shell whose
+ // turning angle is minimal and then fixing the nesting hierarchy. Note
+ // that because we normalized all the loops initially, this step is only
+ // necessary if the polygon requires at least one non-normalized loop to
+ // represent it.
+
+ containedOrigin := make(map[*Loop]bool)
+ for _, l := range loops {
+ containedOrigin[l] = l.ContainsOrigin()
+ }
+
+ for _, l := range loops {
+ angle := l.TurningAngle()
+ if math.Abs(angle) > l.turningAngleMaxError() {
+ // Normalize the loop.
+ if angle < 0 {
+ l.Invert()
+ }
+ } else {
+ // Ensure that the loop does not contain the origin.
+ if l.ContainsOrigin() {
+ l.Invert()
+ }
+ }
+ }
+
+ p := PolygonFromLoops(loops)
+
+ if p.NumLoops() > 0 {
+ originLoop := p.Loop(0)
+ polygonContainsOrigin := false
+ for _, l := range p.Loops() {
+ if l.ContainsOrigin() {
+ polygonContainsOrigin = !polygonContainsOrigin
+
+ originLoop = l
+ }
+ }
+ if containedOrigin[originLoop] != polygonContainsOrigin {
+ p.Invert()
+ }
+ }
+
+ return p
+}
+
+// Invert inverts the polygon (replaces it by its complement).
+func (p *Polygon) Invert() {
+ // Inverting any one loop will invert the polygon. The best loop to invert
+ // is the one whose area is largest, since this yields the smallest area
+ // after inversion. The loop with the largest area is always at depth 0.
+ // The descendents of this loop all have their depth reduced by 1, while the
+ // former siblings of this loop all have their depth increased by 1.
+
+ // The empty and full polygons are handled specially.
+ if p.IsEmpty() {
+ *p = *FullPolygon()
+ return
+ }
+ if p.IsFull() {
+ *p = Polygon{}
+ return
+ }
+
+ // Find the loop whose area is largest (i.e., whose turning angle is
+ // smallest), minimizing calls to TurningAngle(). In particular, for
+ // polygons with a single shell at level 0 there is no need to call
+ // TurningAngle() at all. (This method is relatively expensive.)
+ best := 0
+ const none = 10.0 // Flag that means "not computed yet"
+ bestAngle := none
+ for i := 1; i < p.NumLoops(); i++ {
+ if p.Loop(i).depth != 0 {
+ continue
+ }
+ // We defer computing the turning angle of loop 0 until we discover
+ // that the polygon has another top-level shell.
+ if bestAngle == none {
+ bestAngle = p.Loop(best).TurningAngle()
+ }
+ angle := p.Loop(i).TurningAngle()
+ // We break ties deterministically in order to avoid having the output
+ // depend on the input order of the loops.
+ if angle < bestAngle || (angle == bestAngle && compareLoops(p.Loop(i), p.Loop(best)) < 0) {
+ best = i
+ bestAngle = angle
+ }
+ }
+ // Build the new loops vector, starting with the inverted loop.
+ p.Loop(best).Invert()
+ newLoops := make([]*Loop, 0, p.NumLoops())
+ // Add the former siblings of this loop as descendants.
+ lastBest := p.LastDescendant(best)
+ newLoops = append(newLoops, p.Loop(best))
+ for i, l := range p.Loops() {
+ if i < best || i > lastBest {
+ l.depth++
+ newLoops = append(newLoops, l)
+ }
+ }
+ // Add the former children of this loop as siblings.
+ for i, l := range p.Loops() {
+ if i > best && i <= lastBest {
+ l.depth--
+ newLoops = append(newLoops, l)
+ }
+ }
+ p.loops = newLoops
+ p.initLoopProperties()
+}
+
+// Defines a total ordering on Loops that does not depend on the cyclic
+// order of loop vertices. This function is used to choose which loop to
+// invert in the case where several loops have exactly the same area.
+func compareLoops(a, b *Loop) int {
+ if na, nb := a.NumVertices(), b.NumVertices(); na != nb {
+ return na - nb
+ }
+ ai, aDir := a.CanonicalFirstVertex()
+ bi, bDir := b.CanonicalFirstVertex()
+ if aDir != bDir {
+ return aDir - bDir
+ }
+ for n := a.NumVertices() - 1; n >= 0; n, ai, bi = n-1, ai+aDir, bi+bDir {
+ if cmp := a.Vertex(ai).Cmp(b.Vertex(bi).Vector); cmp != 0 {
+ return cmp
+ }
+ }
+ return 0
+}
+
+// PolygonFromCell returns a Polygon from a single loop created from the given Cell.
+func PolygonFromCell(cell Cell) *Polygon {
+ return PolygonFromLoops([]*Loop{LoopFromCell(cell)})
+}
+
+// initNested takes the set of loops in this polygon and performs the nesting
+// computations to set the proper nesting and parent/child relationships.
+func (p *Polygon) initNested() {
+ if len(p.loops) == 1 {
+ p.initOneLoop()
+ return
+ }
+
+ lm := make(loopMap)
+
+ for _, l := range p.loops {
+ lm.insertLoop(l, nil)
+ }
+ // The loops have all been added to the loopMap for ordering. Clear the
+ // loops slice because we add all the loops in-order in initLoops.
+ p.loops = nil
+
+ // Reorder the loops in depth-first traversal order.
+ p.initLoops(lm)
+ p.initLoopProperties()
+}
+
+// loopMap is a map of a loop to its immediate children with respect to nesting.
+// It is used to determine which loops are shells and which are holes.
+type loopMap map[*Loop][]*Loop
+
+// insertLoop adds the given loop to the loop map under the specified parent.
+// All children of the new entry are checked to see if the need to move up to
+// a different level.
+func (lm loopMap) insertLoop(newLoop, parent *Loop) {
+ var children []*Loop
+ for done := false; !done; {
+ children = lm[parent]
+ done = true
+ for _, child := range children {
+ if child.ContainsNested(newLoop) {
+ parent = child
+ done = false
+ break
+ }
+ }
+ }
+
+ // Now, we have found a parent for this loop, it may be that some of the
+ // children of the parent of this loop may now be children of the new loop.
+ newChildren := lm[newLoop]
+ for i := 0; i < len(children); {
+ child := children[i]
+ if newLoop.ContainsNested(child) {
+ newChildren = append(newChildren, child)
+ children = append(children[0:i], children[i+1:]...)
+ } else {
+ i++
+ }
+ }
+
+ lm[newLoop] = newChildren
+ lm[parent] = append(children, newLoop)
+}
+
+// loopStack simplifies access to the loops while being initialized.
+type loopStack []*Loop
+
+func (s *loopStack) push(v *Loop) {
+ *s = append(*s, v)
+}
+func (s *loopStack) pop() *Loop {
+ l := len(*s)
+ r := (*s)[l-1]
+ *s = (*s)[:l-1]
+ return r
+}
+
+// initLoops walks the mapping of loops to all of their children, and adds them in
+// order into to the polygons set of loops.
+func (p *Polygon) initLoops(lm loopMap) {
+ var stack loopStack
+ stack.push(nil)
+ depth := -1
+
+ for len(stack) > 0 {
+ loop := stack.pop()
+ if loop != nil {
+ depth = loop.depth
+ p.loops = append(p.loops, loop)
+ }
+ children := lm[loop]
+ for i := len(children) - 1; i >= 0; i-- {
+ child := children[i]
+ child.depth = depth + 1
+ stack.push(child)
+ }
+ }
+}
+
+// initOneLoop set the properties for a polygon made of a single loop.
+// TODO(roberts): Can this be merged with initLoopProperties
+func (p *Polygon) initOneLoop() {
+ p.hasHoles = false
+ p.numVertices = len(p.loops[0].vertices)
+ p.bound = p.loops[0].RectBound()
+ p.subregionBound = ExpandForSubregions(p.bound)
+ // Ensure the loops depth is set correctly.
+ p.loops[0].depth = 0
+
+ p.initEdgesAndIndex()
+}
+
+// initLoopProperties sets the properties for polygons with multiple loops.
+func (p *Polygon) initLoopProperties() {
+ // the loops depths are set by initNested/initOriented prior to this.
+ p.bound = EmptyRect()
+ p.hasHoles = false
+ for _, l := range p.loops {
+ if l.IsHole() {
+ p.hasHoles = true
+ } else {
+ p.bound = p.bound.Union(l.RectBound())
+ }
+ p.numVertices += l.NumVertices()
+ }
+ p.subregionBound = ExpandForSubregions(p.bound)
+
+ p.initEdgesAndIndex()
+}
+
+// initEdgesAndIndex performs the shape related initializations and adds the final
+// polygon to the index.
+func (p *Polygon) initEdgesAndIndex() {
+ if p.IsFull() {
+ return
+ }
+ const maxLinearSearchLoops = 12 // Based on benchmarks.
+ if len(p.loops) > maxLinearSearchLoops {
+ p.cumulativeEdges = make([]int, 0, len(p.loops))
+ }
+
+ for _, l := range p.loops {
+ if p.cumulativeEdges != nil {
+ p.cumulativeEdges = append(p.cumulativeEdges, p.numEdges)
+ }
+ p.numEdges += len(l.vertices)
+ }
+
+ p.index = NewShapeIndex()
+ p.index.Add(p)
+}
+
+// FullPolygon returns a special "full" polygon.
+func FullPolygon() *Polygon {
+ ret := &Polygon{
+ loops: []*Loop{
+ FullLoop(),
+ },
+ numVertices: len(FullLoop().Vertices()),
+ bound: FullRect(),
+ subregionBound: FullRect(),
+ }
+ ret.initEdgesAndIndex()
+ return ret
+}
+
+// Validate checks whether this is a valid polygon,
+// including checking whether all the loops are themselves valid.
+func (p *Polygon) Validate() error {
+ for i, l := range p.loops {
+ // Check for loop errors that don't require building a ShapeIndex.
+ if err := l.findValidationErrorNoIndex(); err != nil {
+ return fmt.Errorf("loop %d: %v", i, err)
+ }
+ // Check that no loop is empty, and that the full loop only appears in the
+ // full polygon.
+ if l.IsEmpty() {
+ return fmt.Errorf("loop %d: empty loops are not allowed", i)
+ }
+ if l.IsFull() && len(p.loops) > 1 {
+ return fmt.Errorf("loop %d: full loop appears in non-full polygon", i)
+ }
+ }
+
+ // TODO(roberts): Uncomment the remaining checks when they are completed.
+
+ // Check for loop self-intersections and loop pairs that cross
+ // (including duplicate edges and vertices).
+ // if findSelfIntersection(p.index) {
+ // return fmt.Errorf("polygon has loop pairs that cross")
+ // }
+
+ // Check whether initOriented detected inconsistent loop orientations.
+ // if p.hasInconsistentLoopOrientations {
+ // return fmt.Errorf("inconsistent loop orientations detected")
+ // }
+
+ // Finally, verify the loop nesting hierarchy.
+ return p.findLoopNestingError()
+}
+
+// findLoopNestingError reports if there is an error in the loop nesting hierarchy.
+func (p *Polygon) findLoopNestingError() error {
+ // First check that the loop depths make sense.
+ lastDepth := -1
+ for i, l := range p.loops {
+ depth := l.depth
+ if depth < 0 || depth > lastDepth+1 {
+ return fmt.Errorf("loop %d: invalid loop depth (%d)", i, depth)
+ }
+ lastDepth = depth
+ }
+ // Then check that they correspond to the actual loop nesting. This test
+ // is quadratic in the number of loops but the cost per iteration is small.
+ for i, l := range p.loops {
+ last := p.LastDescendant(i)
+ for j, l2 := range p.loops {
+ if i == j {
+ continue
+ }
+ nested := (j >= i+1) && (j <= last)
+ const reverseB = false
+
+ if l.containsNonCrossingBoundary(l2, reverseB) != nested {
+ nestedStr := ""
+ if !nested {
+ nestedStr = "not "
+ }
+ return fmt.Errorf("invalid nesting: loop %d should %scontain loop %d", i, nestedStr, j)
+ }
+ }
+ }
+ return nil
+}
+
+// IsEmpty reports whether this is the special "empty" polygon (consisting of no loops).
+func (p *Polygon) IsEmpty() bool {
+ return len(p.loops) == 0
+}
+
+// IsFull reports whether this is the special "full" polygon (consisting of a
+// single loop that encompasses the entire sphere).
+func (p *Polygon) IsFull() bool {
+ return len(p.loops) == 1 && p.loops[0].IsFull()
+}
+
+// NumLoops returns the number of loops in this polygon.
+func (p *Polygon) NumLoops() int {
+ return len(p.loops)
+}
+
+// Loops returns the loops in this polygon.
+func (p *Polygon) Loops() []*Loop {
+ return p.loops
+}
+
+// Loop returns the loop at the given index. Note that during initialization,
+// the given loops are reordered according to a pre-order traversal of the loop
+// nesting hierarchy. This implies that every loop is immediately followed by
+// its descendants. This hierarchy can be traversed using the methods Parent,
+// LastDescendant, and Loop.depth.
+func (p *Polygon) Loop(k int) *Loop {
+ return p.loops[k]
+}
+
+// Parent returns the index of the parent of loop k.
+// If the loop does not have a parent, ok=false is returned.
+func (p *Polygon) Parent(k int) (index int, ok bool) {
+ // See where we are on the depth hierarchy.
+ depth := p.loops[k].depth
+ if depth == 0 {
+ return -1, false
+ }
+
+ // There may be several loops at the same nesting level as us that share a
+ // parent loop with us. (Imagine a slice of swiss cheese, of which we are one loop.
+ // we don't know how many may be next to us before we get back to our parent loop.)
+ // Move up one position from us, and then begin traversing back through the set of loops
+ // until we find the one that is our parent or we get to the top of the polygon.
+ for k--; k >= 0 && p.loops[k].depth <= depth; k-- {
+ }
+ return k, true
+}
+
+// LastDescendant returns the index of the last loop that is contained within loop k.
+// If k is negative, it returns the last loop in the polygon.
+// Note that loops are indexed according to a pre-order traversal of the nesting
+// hierarchy, so the immediate children of loop k can be found by iterating over
+// the loops (k+1)..LastDescendant(k) and selecting those whose depth is equal
+// to Loop(k).depth+1.
+func (p *Polygon) LastDescendant(k int) int {
+ if k < 0 {
+ return len(p.loops) - 1
+ }
+
+ depth := p.loops[k].depth
+
+ // Find the next loop immediately past us in the set of loops, and then start
+ // moving down the list until we either get to the end or find the next loop
+ // that is higher up the hierarchy than we are.
+ for k++; k < len(p.loops) && p.loops[k].depth > depth; k++ {
+ }
+ return k - 1
+}
+
+// CapBound returns a bounding spherical cap.
+func (p *Polygon) CapBound() Cap { return p.bound.CapBound() }
+
+// RectBound returns a bounding latitude-longitude rectangle.
+func (p *Polygon) RectBound() Rect { return p.bound }
+
+// ContainsPoint reports whether the polygon contains the point.
+func (p *Polygon) ContainsPoint(point Point) bool {
+ // NOTE: A bounds check slows down this function by about 50%. It is
+ // worthwhile only when it might allow us to delay building the index.
+ if !p.index.IsFresh() && !p.bound.ContainsPoint(point) {
+ return false
+ }
+
+ // For small polygons, and during initial construction, it is faster to just
+ // check all the crossing.
+ const maxBruteForceVertices = 32
+ if p.numVertices < maxBruteForceVertices || p.index == nil {
+ inside := false
+ for _, l := range p.loops {
+ // use loops bruteforce to avoid building the index on each loop.
+ inside = inside != l.bruteForceContainsPoint(point)
+ }
+ return inside
+ }
+
+ // Otherwise, look up the ShapeIndex cell containing this point.
+ it := p.index.Iterator()
+ if !it.LocatePoint(point) {
+ return false
+ }
+
+ return p.iteratorContainsPoint(it, point)
+}
+
+// ContainsCell reports whether the polygon contains the given cell.
+func (p *Polygon) ContainsCell(cell Cell) bool {
+ it := p.index.Iterator()
+ relation := it.LocateCellID(cell.ID())
+
+ // If "cell" is disjoint from all index cells, it is not contained.
+ // Similarly, if "cell" is subdivided into one or more index cells then it
+ // is not contained, since index cells are subdivided only if they (nearly)
+ // intersect a sufficient number of edges. (But note that if "cell" itself
+ // is an index cell then it may be contained, since it could be a cell with
+ // no edges in the loop interior.)
+ if relation != Indexed {
+ return false
+ }
+
+ // Otherwise check if any edges intersect "cell".
+ if p.boundaryApproxIntersects(it, cell) {
+ return false
+ }
+
+ // Otherwise check if the loop contains the center of "cell".
+ return p.iteratorContainsPoint(it, cell.Center())
+}
+
+// IntersectsCell reports whether the polygon intersects the given cell.
+func (p *Polygon) IntersectsCell(cell Cell) bool {
+ it := p.index.Iterator()
+ relation := it.LocateCellID(cell.ID())
+
+ // If cell does not overlap any index cell, there is no intersection.
+ if relation == Disjoint {
+ return false
+ }
+ // If cell is subdivided into one or more index cells, there is an
+ // intersection to within the S2ShapeIndex error bound (see Contains).
+ if relation == Subdivided {
+ return true
+ }
+ // If cell is an index cell, there is an intersection because index cells
+ // are created only if they have at least one edge or they are entirely
+ // contained by the loop.
+ if it.CellID() == cell.id {
+ return true
+ }
+ // Otherwise check if any edges intersect cell.
+ if p.boundaryApproxIntersects(it, cell) {
+ return true
+ }
+ // Otherwise check if the loop contains the center of cell.
+ return p.iteratorContainsPoint(it, cell.Center())
+}
+
+// CellUnionBound computes a covering of the Polygon.
+func (p *Polygon) CellUnionBound() []CellID {
+ // TODO(roberts): Use ShapeIndexRegion when it's available.
+ return p.CapBound().CellUnionBound()
+}
+
+// boundaryApproxIntersects reports whether the loop's boundary intersects cell.
+// It may also return true when the loop boundary does not intersect cell but
+// some edge comes within the worst-case error tolerance.
+//
+// This requires that it.Locate(cell) returned Indexed.
+func (p *Polygon) boundaryApproxIntersects(it *ShapeIndexIterator, cell Cell) bool {
+ aClipped := it.IndexCell().findByShapeID(0)
+
+ // If there are no edges, there is no intersection.
+ if len(aClipped.edges) == 0 {
+ return false
+ }
+
+ // We can save some work if cell is the index cell itself.
+ if it.CellID() == cell.ID() {
+ return true
+ }
+
+ // Otherwise check whether any of the edges intersect cell.
+ maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist)
+ bound := cell.BoundUV().ExpandedByMargin(maxError)
+ for _, e := range aClipped.edges {
+ edge := p.index.Shape(0).Edge(e)
+ v0, v1, ok := ClipToPaddedFace(edge.V0, edge.V1, cell.Face(), maxError)
+ if ok && edgeIntersectsRect(v0, v1, bound) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// iteratorContainsPoint reports whether the iterator that is positioned at the
+// ShapeIndexCell that may contain p, contains the point p.
+func (p *Polygon) iteratorContainsPoint(it *ShapeIndexIterator, point Point) bool {
+ // Test containment by drawing a line segment from the cell center to the
+ // given point and counting edge crossings.
+ aClipped := it.IndexCell().findByShapeID(0)
+ inside := aClipped.containsCenter
+
+ if len(aClipped.edges) == 0 {
+ return inside
+ }
+
+ // This block requires ShapeIndex.
+ crosser := NewEdgeCrosser(it.Center(), point)
+ shape := p.index.Shape(0)
+ for _, e := range aClipped.edges {
+ edge := shape.Edge(e)
+ inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1)
+ }
+
+ return inside
+}
+
+// Shape Interface
+
+// NumEdges returns the number of edges in this shape.
+func (p *Polygon) NumEdges() int {
+ return p.numEdges
+}
+
+// Edge returns endpoints for the given edge index.
+func (p *Polygon) Edge(e int) Edge {
+ var i int
+
+ if len(p.cumulativeEdges) > 0 {
+ for i = range p.cumulativeEdges {
+ if i+1 >= len(p.cumulativeEdges) || e < p.cumulativeEdges[i+1] {
+ e -= p.cumulativeEdges[i]
+ break
+ }
+ }
+ } else {
+ // When the number of loops is small, use linear search. Most often
+ // there is exactly one loop and the code below executes zero times.
+ for i = 0; e >= len(p.Loop(i).vertices); i++ {
+ e -= len(p.Loop(i).vertices)
+ }
+ }
+
+ return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)}
+}
+
+// ReferencePoint returns the reference point for this polygon.
+func (p *Polygon) ReferencePoint() ReferencePoint {
+ containsOrigin := false
+ for _, l := range p.loops {
+ containsOrigin = containsOrigin != l.ContainsOrigin()
+ }
+ return OriginReferencePoint(containsOrigin)
+}
+
+// NumChains reports the number of contiguous edge chains in the Polygon.
+func (p *Polygon) NumChains() int {
+ return p.NumLoops()
+}
+
+// Chain returns the i-th edge Chain (loop) in the Shape.
+func (p *Polygon) Chain(chainID int) Chain {
+ if p.cumulativeEdges != nil {
+ return Chain{p.cumulativeEdges[chainID], len(p.Loop(chainID).vertices)}
+ }
+ e := 0
+ for j := 0; j < chainID; j++ {
+ e += len(p.Loop(j).vertices)
+ }
+
+ // Polygon represents a full loop as a loop with one vertex, while
+ // Shape represents a full loop as a chain with no vertices.
+ if numVertices := p.Loop(chainID).NumVertices(); numVertices != 1 {
+ return Chain{e, numVertices}
+ }
+ return Chain{e, 0}
+}
+
+// ChainEdge returns the j-th edge of the i-th edge Chain (loop).
+func (p *Polygon) ChainEdge(i, j int) Edge {
+ return Edge{p.Loop(i).OrientedVertex(j), p.Loop(i).OrientedVertex(j + 1)}
+}
+
+// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
+// of the i-th edge Chain.
+func (p *Polygon) ChainPosition(edgeID int) ChainPosition {
+ var i int
+
+ if len(p.cumulativeEdges) > 0 {
+ for i = range p.cumulativeEdges {
+ if i+1 >= len(p.cumulativeEdges) || edgeID < p.cumulativeEdges[i+1] {
+ edgeID -= p.cumulativeEdges[i]
+ break
+ }
+ }
+ } else {
+ // When the number of loops is small, use linear search. Most often
+ // there is exactly one loop and the code below executes zero times.
+ for i = 0; edgeID >= len(p.Loop(i).vertices); i++ {
+ edgeID -= len(p.Loop(i).vertices)
+ }
+ }
+ // TODO(roberts): unify this and Edge since they are mostly identical.
+ return ChainPosition{i, edgeID}
+}
+
+// Dimension returns the dimension of the geometry represented by this Polygon.
+func (p *Polygon) Dimension() int { return 2 }
+
+func (p *Polygon) typeTag() typeTag { return typeTagPolygon }
+
+func (p *Polygon) privateInterface() {}
+
+// Contains reports whether this polygon contains the other polygon.
+// Specifically, it reports whether all the points in the other polygon
+// are also in this polygon.
+func (p *Polygon) Contains(o *Polygon) bool {
+ // If both polygons have one loop, use the more efficient Loop method.
+ // Note that Loop's Contains does its own bounding rectangle check.
+ if len(p.loops) == 1 && len(o.loops) == 1 {
+ return p.loops[0].Contains(o.loops[0])
+ }
+
+ // Otherwise if neither polygon has holes, we can still use the more
+ // efficient Loop's Contains method (rather than compareBoundary),
+ // but it's worthwhile to do our own bounds check first.
+ if !p.subregionBound.Contains(o.bound) {
+ // Even though Bound(A) does not contain Bound(B), it is still possible
+ // that A contains B. This can only happen when union of the two bounds
+ // spans all longitudes. For example, suppose that B consists of two
+ // shells with a longitude gap between them, while A consists of one shell
+ // that surrounds both shells of B but goes the other way around the
+ // sphere (so that it does not intersect the longitude gap).
+ if !p.bound.Lng.Union(o.bound.Lng).IsFull() {
+ return false
+ }
+ }
+
+ if !p.hasHoles && !o.hasHoles {
+ for _, l := range o.loops {
+ if !p.anyLoopContains(l) {
+ return false
+ }
+ }
+ return true
+ }
+
+ // Polygon A contains B iff B does not intersect the complement of A. From
+ // the intersection algorithm below, this means that the complement of A
+ // must exclude the entire boundary of B, and B must exclude all shell
+ // boundaries of the complement of A. (It can be shown that B must then
+ // exclude the entire boundary of the complement of A.) The first call
+ // below returns false if the boundaries cross, therefore the second call
+ // does not need to check for any crossing edges (which makes it cheaper).
+ return p.containsBoundary(o) && o.excludesNonCrossingComplementShells(p)
+}
+
+// Intersects reports whether this polygon intersects the other polygon, i.e.
+// if there is a point that is contained by both polygons.
+func (p *Polygon) Intersects(o *Polygon) bool {
+ // If both polygons have one loop, use the more efficient Loop method.
+ // Note that Loop Intersects does its own bounding rectangle check.
+ if len(p.loops) == 1 && len(o.loops) == 1 {
+ return p.loops[0].Intersects(o.loops[0])
+ }
+
+ // Otherwise if neither polygon has holes, we can still use the more
+ // efficient Loop.Intersects method. The polygons intersect if and
+ // only if some pair of loop regions intersect.
+ if !p.bound.Intersects(o.bound) {
+ return false
+ }
+
+ if !p.hasHoles && !o.hasHoles {
+ for _, l := range o.loops {
+ if p.anyLoopIntersects(l) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Polygon A is disjoint from B if A excludes the entire boundary of B and B
+ // excludes all shell boundaries of A. (It can be shown that B must then
+ // exclude the entire boundary of A.) The first call below returns false if
+ // the boundaries cross, therefore the second call does not need to check
+ // for crossing edges.
+ return !p.excludesBoundary(o) || !o.excludesNonCrossingShells(p)
+}
+
+// compareBoundary returns +1 if this polygon contains the boundary of B, -1 if A
+// excludes the boundary of B, and 0 if the boundaries of A and B cross.
+func (p *Polygon) compareBoundary(o *Loop) int {
+ result := -1
+ for i := 0; i < len(p.loops) && result != 0; i++ {
+ // If B crosses any loop of A, the result is 0. Otherwise the result
+ // changes sign each time B is contained by a loop of A.
+ result *= -p.loops[i].compareBoundary(o)
+ }
+ return result
+}
+
+// containsBoundary reports whether this polygon contains the entire boundary of B.
+func (p *Polygon) containsBoundary(o *Polygon) bool {
+ for _, l := range o.loops {
+ if p.compareBoundary(l) <= 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// excludesBoundary reports whether this polygon excludes the entire boundary of B.
+func (p *Polygon) excludesBoundary(o *Polygon) bool {
+ for _, l := range o.loops {
+ if p.compareBoundary(l) >= 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// containsNonCrossingBoundary reports whether polygon A contains the boundary of
+// loop B. Shared edges are handled according to the rule described in loops
+// containsNonCrossingBoundary.
+func (p *Polygon) containsNonCrossingBoundary(o *Loop, reverse bool) bool {
+ var inside bool
+ for _, l := range p.loops {
+ x := l.containsNonCrossingBoundary(o, reverse)
+ inside = (inside != x)
+ }
+ return inside
+}
+
+// excludesNonCrossingShells reports wheterh given two polygons A and B such that the
+// boundary of A does not cross any loop of B, if A excludes all shell boundaries of B.
+func (p *Polygon) excludesNonCrossingShells(o *Polygon) bool {
+ for _, l := range o.loops {
+ if l.IsHole() {
+ continue
+ }
+ if p.containsNonCrossingBoundary(l, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// excludesNonCrossingComplementShells reports whether given two polygons A and B
+// such that the boundary of A does not cross any loop of B, if A excludes all
+// shell boundaries of the complement of B.
+func (p *Polygon) excludesNonCrossingComplementShells(o *Polygon) bool {
+ // Special case to handle the complement of the empty or full polygons.
+ if o.IsEmpty() {
+ return !p.IsFull()
+ }
+ if o.IsFull() {
+ return true
+ }
+
+ // Otherwise the complement of B may be obtained by inverting loop(0) and
+ // then swapping the shell/hole status of all other loops. This implies
+ // that the shells of the complement consist of loop 0 plus all the holes of
+ // the original polygon.
+ for j, l := range o.loops {
+ if j > 0 && !l.IsHole() {
+ continue
+ }
+
+ // The interior of the complement is to the right of loop 0, and to the
+ // left of the loops that were originally holes.
+ if p.containsNonCrossingBoundary(l, j == 0) {
+ return false
+ }
+ }
+ return true
+}
+
+// anyLoopContains reports whether any loop in this polygon contains the given loop.
+func (p *Polygon) anyLoopContains(o *Loop) bool {
+ for _, l := range p.loops {
+ if l.Contains(o) {
+ return true
+ }
+ }
+ return false
+}
+
+// anyLoopIntersects reports whether any loop in this polygon intersects the given loop.
+func (p *Polygon) anyLoopIntersects(o *Loop) bool {
+ for _, l := range p.loops {
+ if l.Intersects(o) {
+ return true
+ }
+ }
+ return false
+}
+
+// Area returns the area of the polygon interior, i.e. the region on the left side
+// of an odd number of loops. The return value is between 0 and 4*Pi.
+func (p *Polygon) Area() float64 {
+ var area float64
+ for _, loop := range p.loops {
+ area += float64(loop.Sign()) * loop.Area()
+ }
+ return area
+}
+
+// Encode encodes the Polygon
+func (p *Polygon) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ p.encode(e)
+ return e.err
+}
+
+// encode only supports lossless encoding and not compressed format.
+func (p *Polygon) encode(e *encoder) {
+ if p.numVertices == 0 {
+ p.encodeCompressed(e, maxLevel, nil)
+ return
+ }
+
+ // Convert all the polygon vertices to XYZFaceSiTi format.
+ vs := make([]xyzFaceSiTi, 0, p.numVertices)
+ for _, l := range p.loops {
+ vs = append(vs, l.xyzFaceSiTiVertices()...)
+ }
+
+ // Computes a histogram of the cell levels at which the vertices are snapped.
+ // (histogram[0] is the number of unsnapped vertices, histogram[i] the number
+ // of vertices snapped at level i-1).
+ histogram := make([]int, maxLevel+2)
+ for _, v := range vs {
+ histogram[v.level+1]++
+ }
+
+ // Compute the level at which most of the vertices are snapped.
+ // If multiple levels have the same maximum number of vertices
+ // snapped to it, the first one (lowest level number / largest
+ // area / smallest encoding length) will be chosen, so this
+ // is desired.
+ var snapLevel, numSnapped int
+ for level, h := range histogram[1:] {
+ if h > numSnapped {
+ snapLevel, numSnapped = level, h
+ }
+ }
+
+ // Choose an encoding format based on the number of unsnapped vertices and a
+ // rough estimate of the encoded sizes.
+ numUnsnapped := p.numVertices - numSnapped // Number of vertices that won't be snapped at snapLevel.
+ const pointSize = 3 * 8 // s2.Point is an r3.Vector, which is 3 float64s. That's 3*8 = 24 bytes.
+ compressedSize := 4*p.numVertices + (pointSize+2)*numUnsnapped
+ losslessSize := pointSize * p.numVertices
+ if compressedSize < losslessSize {
+ p.encodeCompressed(e, snapLevel, vs)
+ } else {
+ p.encodeLossless(e)
+ }
+}
+
+// encodeLossless encodes the polygon's Points as float64s.
+func (p *Polygon) encodeLossless(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeBool(true) // a legacy c++ value. must be true.
+ e.writeBool(p.hasHoles)
+ e.writeUint32(uint32(len(p.loops)))
+
+ if e.err != nil {
+ return
+ }
+ if len(p.loops) > maxEncodedLoops {
+ e.err = fmt.Errorf("too many loops (%d; max is %d)", len(p.loops), maxEncodedLoops)
+ return
+ }
+ for _, l := range p.loops {
+ l.encode(e)
+ }
+
+ // Encode the bound.
+ p.bound.encode(e)
+}
+
+func (p *Polygon) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) {
+ e.writeUint8(uint8(encodingCompressedVersion))
+ e.writeUint8(uint8(snapLevel))
+ e.writeUvarint(uint64(len(p.loops)))
+
+ if e.err != nil {
+ return
+ }
+ if l := len(p.loops); l > maxEncodedLoops {
+ e.err = fmt.Errorf("too many loops to encode: %d; max is %d", l, maxEncodedLoops)
+ return
+ }
+
+ for _, l := range p.loops {
+ l.encodeCompressed(e, snapLevel, vertices[:len(l.vertices)])
+ vertices = vertices[len(l.vertices):]
+ }
+ // Do not write the bound, num_vertices, or has_holes_ as they can be
+ // cheaply recomputed by decodeCompressed. Microbenchmarks show the
+ // speed difference is inconsequential.
+}
+
+// Decode decodes the Polygon.
+func (p *Polygon) Decode(r io.Reader) error {
+ d := &decoder{r: asByteReader(r)}
+ version := int8(d.readUint8())
+ var dec func(*decoder)
+ switch version {
+ case encodingVersion:
+ dec = p.decode
+ case encodingCompressedVersion:
+ dec = p.decodeCompressed
+ default:
+ return fmt.Errorf("unsupported version %d", version)
+ }
+ dec(d)
+ return d.err
+}
+
+// maxEncodedLoops is the biggest supported number of loops in a polygon during encoding.
+// Setting a maximum guards an allocation: it prevents an attacker from easily pushing us OOM.
+const maxEncodedLoops = 10000000
+
+func (p *Polygon) decode(d *decoder) {
+ *p = Polygon{}
+ d.readUint8() // Ignore irrelevant serialized owns_loops_ value.
+
+ p.hasHoles = d.readBool()
+
+ // Polygons with no loops are explicitly allowed here: a newly created
+ // polygon has zero loops and such polygons encode and decode properly.
+ nloops := d.readUint32()
+ if d.err != nil {
+ return
+ }
+ if nloops > maxEncodedLoops {
+ d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops)
+ return
+ }
+ p.loops = make([]*Loop, nloops)
+ for i := range p.loops {
+ p.loops[i] = new(Loop)
+ p.loops[i].decode(d)
+ p.numVertices += len(p.loops[i].vertices)
+ }
+
+ p.bound.decode(d)
+ if d.err != nil {
+ return
+ }
+ p.subregionBound = ExpandForSubregions(p.bound)
+ p.initEdgesAndIndex()
+}
+
+func (p *Polygon) decodeCompressed(d *decoder) {
+ snapLevel := int(d.readUint8())
+
+ if snapLevel > maxLevel {
+ d.err = fmt.Errorf("snaplevel too big: %d", snapLevel)
+ return
+ }
+ // Polygons with no loops are explicitly allowed here: a newly created
+ // polygon has zero loops and such polygons encode and decode properly.
+ nloops := int(d.readUvarint())
+ if nloops > maxEncodedLoops {
+ d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops)
+ }
+ p.loops = make([]*Loop, nloops)
+ for i := range p.loops {
+ p.loops[i] = new(Loop)
+ p.loops[i].decodeCompressed(d, snapLevel)
+ }
+ p.initLoopProperties()
+}
+
+// TODO(roberts): Differences from C++
+// Centroid
+// SnapLevel
+// DistanceToPoint
+// DistanceToBoundary
+// Project
+// ProjectToBoundary
+// ApproxContains/ApproxDisjoint for Polygons
+// InitTo{Intersection/ApproxIntersection/Union/ApproxUnion/Diff/ApproxDiff}
+// InitToSimplified
+// InitToSnapped
+// IntersectWithPolyline
+// ApproxIntersectWithPolyline
+// SubtractFromPolyline
+// ApproxSubtractFromPolyline
+// DestructiveUnion
+// DestructiveApproxUnion
+// InitToCellUnionBorder
+// IsNormalized
+// Equal/BoundaryEqual/BoundaryApproxEqual/BoundaryNear Polygons
+// BreakEdgesAndAddToBuilder
+//
+// clearLoops
+// findLoopNestingError
+// initToSimplifiedInternal
+// internalClipPolyline
+// clipBoundary
diff --git a/vendor/github.com/golang/geo/s2/polyline.go b/vendor/github.com/golang/geo/s2/polyline.go
new file mode 100644
index 000000000..517968342
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/polyline.go
@@ -0,0 +1,589 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// Polyline represents a sequence of zero or more vertices connected by
+// straight edges (geodesics). Edges of length 0 and 180 degrees are not
+// allowed, i.e. adjacent vertices should not be identical or antipodal.
+type Polyline []Point
+
+// PolylineFromLatLngs creates a new Polyline from the given LatLngs.
+func PolylineFromLatLngs(points []LatLng) *Polyline {
+ p := make(Polyline, len(points))
+ for k, v := range points {
+ p[k] = PointFromLatLng(v)
+ }
+ return &p
+}
+
+// Reverse reverses the order of the Polyline vertices.
+func (p *Polyline) Reverse() {
+ for i := 0; i < len(*p)/2; i++ {
+ (*p)[i], (*p)[len(*p)-i-1] = (*p)[len(*p)-i-1], (*p)[i]
+ }
+}
+
+// Length returns the length of this Polyline.
+func (p *Polyline) Length() s1.Angle {
+ var length s1.Angle
+
+ for i := 1; i < len(*p); i++ {
+ length += (*p)[i-1].Distance((*p)[i])
+ }
+ return length
+}
+
+// Centroid returns the true centroid of the polyline multiplied by the length of the
+// polyline. The result is not unit length, so you may wish to normalize it.
+//
+// Scaling by the Polyline length makes it easy to compute the centroid
+// of several Polylines (by simply adding up their centroids).
+func (p *Polyline) Centroid() Point {
+ var centroid Point
+ for i := 1; i < len(*p); i++ {
+ // The centroid (multiplied by length) is a vector toward the midpoint
+ // of the edge, whose length is twice the sin of half the angle between
+ // the two vertices. Defining theta to be this angle, we have:
+ vSum := (*p)[i-1].Add((*p)[i].Vector) // Length == 2*cos(theta)
+ vDiff := (*p)[i-1].Sub((*p)[i].Vector) // Length == 2*sin(theta)
+
+ // Length == 2*sin(theta)
+ centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))}
+ }
+ return centroid
+}
+
+// Equal reports whether the given Polyline is exactly the same as this one.
+func (p *Polyline) Equal(b *Polyline) bool {
+ if len(*p) != len(*b) {
+ return false
+ }
+ for i, v := range *p {
+ if v != (*b)[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ApproxEqual reports whether two polylines have the same number of vertices,
+// and corresponding vertex pairs are separated by no more the standard margin.
+func (p *Polyline) ApproxEqual(o *Polyline) bool {
+ return p.approxEqual(o, s1.Angle(epsilon))
+}
+
+// approxEqual reports whether two polylines are equal within the given margin.
+func (p *Polyline) approxEqual(o *Polyline, maxError s1.Angle) bool {
+ if len(*p) != len(*o) {
+ return false
+ }
+ for offset, val := range *p {
+ if !val.approxEqual((*o)[offset], maxError) {
+ return false
+ }
+ }
+ return true
+}
+
+// CapBound returns the bounding Cap for this Polyline.
+func (p *Polyline) CapBound() Cap {
+ return p.RectBound().CapBound()
+}
+
+// RectBound returns the bounding Rect for this Polyline.
+func (p *Polyline) RectBound() Rect {
+ rb := NewRectBounder()
+ for _, v := range *p {
+ rb.AddPoint(v)
+ }
+ return rb.RectBound()
+}
+
+// ContainsCell reports whether this Polyline contains the given Cell. Always returns false
+// because "containment" is not numerically well-defined except at the Polyline vertices.
+func (p *Polyline) ContainsCell(cell Cell) bool {
+ return false
+}
+
+// IntersectsCell reports whether this Polyline intersects the given Cell.
+func (p *Polyline) IntersectsCell(cell Cell) bool {
+ if len(*p) == 0 {
+ return false
+ }
+
+ // We only need to check whether the cell contains vertex 0 for correctness,
+ // but these tests are cheap compared to edge crossings so we might as well
+ // check all the vertices.
+ for _, v := range *p {
+ if cell.ContainsPoint(v) {
+ return true
+ }
+ }
+
+ cellVertices := []Point{
+ cell.Vertex(0),
+ cell.Vertex(1),
+ cell.Vertex(2),
+ cell.Vertex(3),
+ }
+
+ for j := 0; j < 4; j++ {
+ crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], (*p)[0])
+ for i := 1; i < len(*p); i++ {
+ if crosser.ChainCrossingSign((*p)[i]) != DoNotCross {
+ // There is a proper crossing, or two vertices were the same.
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// ContainsPoint returns false since Polylines are not closed.
+func (p *Polyline) ContainsPoint(point Point) bool {
+ return false
+}
+
+// CellUnionBound computes a covering of the Polyline.
+func (p *Polyline) CellUnionBound() []CellID {
+ return p.CapBound().CellUnionBound()
+}
+
+// NumEdges returns the number of edges in this shape.
+func (p *Polyline) NumEdges() int {
+ if len(*p) == 0 {
+ return 0
+ }
+ return len(*p) - 1
+}
+
+// Edge returns endpoints for the given edge index.
+func (p *Polyline) Edge(i int) Edge {
+ return Edge{(*p)[i], (*p)[i+1]}
+}
+
+// ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
+func (p *Polyline) ReferencePoint() ReferencePoint {
+ return OriginReferencePoint(false)
+}
+
+// NumChains reports the number of contiguous edge chains in this Polyline.
+func (p *Polyline) NumChains() int {
+ return minInt(1, p.NumEdges())
+}
+
+// Chain returns the i-th edge Chain in the Shape.
+func (p *Polyline) Chain(chainID int) Chain {
+ return Chain{0, p.NumEdges()}
+}
+
+// ChainEdge returns the j-th edge of the i-th edge Chain.
+func (p *Polyline) ChainEdge(chainID, offset int) Edge {
+ return Edge{(*p)[offset], (*p)[offset+1]}
+}
+
+// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
+func (p *Polyline) ChainPosition(edgeID int) ChainPosition {
+ return ChainPosition{0, edgeID}
+}
+
+// Dimension returns the dimension of the geometry represented by this Polyline.
+func (p *Polyline) Dimension() int { return 1 }
+
+// IsEmpty reports whether this shape contains no points.
+func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) }
+
+// IsFull reports whether this shape contains all points on the sphere.
+func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) }
+
+func (p *Polyline) typeTag() typeTag { return typeTagPolyline }
+
+func (p *Polyline) privateInterface() {}
+
+// findEndVertex reports the maximal end index such that the line segment between
+// the start index and this one such that the line segment between these two
+// vertices passes within the given tolerance of all interior vertices, in order.
+func findEndVertex(p Polyline, tolerance s1.Angle, index int) int {
+ // The basic idea is to keep track of the "pie wedge" of angles
+ // from the starting vertex such that a ray from the starting
+ // vertex at that angle will pass through the discs of radius
+ // tolerance centered around all vertices processed so far.
+ //
+ // First we define a coordinate frame for the tangent and normal
+ // spaces at the starting vertex. Essentially this means picking
+ // three orthonormal vectors X,Y,Z such that X and Y span the
+ // tangent plane at the starting vertex, and Z is up. We use
+ // the coordinate frame to define a mapping from 3D direction
+ // vectors to a one-dimensional ray angle in the range (-π,
+ // π]. The angle of a direction vector is computed by
+ // transforming it into the X,Y,Z basis, and then calculating
+ // atan2(y,x). This mapping allows us to represent a wedge of
+ // angles as a 1D interval. Since the interval wraps around, we
+ // represent it as an Interval, i.e. an interval on the unit
+ // circle.
+ origin := p[index]
+ frame := getFrame(origin)
+
+ // As we go along, we keep track of the current wedge of angles
+ // and the distance to the last vertex (which must be
+ // non-decreasing).
+ currentWedge := s1.FullInterval()
+ var lastDistance s1.Angle
+
+ for index++; index < len(p); index++ {
+ candidate := p[index]
+ distance := origin.Distance(candidate)
+
+ // We don't allow simplification to create edges longer than
+ // 90 degrees, to avoid numeric instability as lengths
+ // approach 180 degrees. We do need to allow for original
+ // edges longer than 90 degrees, though.
+ if distance > math.Pi/2 && lastDistance > 0 {
+ break
+ }
+
+ // Vertices must be in increasing order along the ray, except
+ // for the initial disc around the origin.
+ if distance < lastDistance && lastDistance > tolerance {
+ break
+ }
+
+ lastDistance = distance
+
+ // Points that are within the tolerance distance of the origin
+ // do not constrain the ray direction, so we can ignore them.
+ if distance <= tolerance {
+ continue
+ }
+
+ // If the current wedge of angles does not contain the angle
+ // to this vertex, then stop right now. Note that the wedge
+ // of possible ray angles is not necessarily empty yet, but we
+ // can't continue unless we are willing to backtrack to the
+ // last vertex that was contained within the wedge (since we
+ // don't create new vertices). This would be more complicated
+ // and also make the worst-case running time more than linear.
+ direction := toFrame(frame, candidate)
+ center := math.Atan2(direction.Y, direction.X)
+ if !currentWedge.Contains(center) {
+ break
+ }
+
+ // To determine how this vertex constrains the possible ray
+ // angles, consider the triangle ABC where A is the origin, B
+ // is the candidate vertex, and C is one of the two tangent
+ // points between A and the spherical cap of radius
+ // tolerance centered at B. Then from the spherical law of
+ // sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are
+ // the lengths of the edges opposite A and C. In our case C
+ // is a 90 degree angle, therefore A = asin(sin(a) / sin(c)).
+ // Angle A is the half-angle of the allowable wedge.
+ halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians()))
+ target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle)
+ currentWedge = currentWedge.Intersection(target)
+ }
+
+ // We break out of the loop when we reach a vertex index that
+ // can't be included in the line segment, so back up by one
+ // vertex.
+ return index - 1
+}
+
+// SubsampleVertices returns a subsequence of vertex indices such that the
+// polyline connecting these vertices is never further than the given tolerance from
+// the original polyline. Provided the first and last vertices are distinct,
+// they are always preserved; if they are not, the subsequence may contain
+// only a single index.
+//
+// Some useful properties of the algorithm:
+//
+// - It runs in linear time.
+//
+// - The output always represents a valid polyline. In particular, adjacent
+// output vertices are never identical or antipodal.
+//
+// - The method is not optimal, but it tends to produce 2-3% fewer
+// vertices than the Douglas-Peucker algorithm with the same tolerance.
+//
+// - The output is parametrically equivalent to the original polyline to
+// within the given tolerance. For example, if a polyline backtracks on
+// itself and then proceeds onwards, the backtracking will be preserved
+// (to within the given tolerance). This is different than the
+// Douglas-Peucker algorithm which only guarantees geometric equivalence.
+func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int {
+ var result []int
+
+ if len(*p) < 1 {
+ return result
+ }
+
+ result = append(result, 0)
+ clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0))
+
+ for index := 0; index+1 < len(*p); {
+ nextIndex := findEndVertex(*p, clampedTolerance, index)
+ // Don't create duplicate adjacent vertices.
+ if (*p)[nextIndex] != (*p)[index] {
+ result = append(result, nextIndex)
+ }
+ index = nextIndex
+ }
+
+ return result
+}
+
+// Encode encodes the Polyline.
+func (p Polyline) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ p.encode(e)
+ return e.err
+}
+
+func (p Polyline) encode(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeUint32(uint32(len(p)))
+ for _, v := range p {
+ e.writeFloat64(v.X)
+ e.writeFloat64(v.Y)
+ e.writeFloat64(v.Z)
+ }
+}
+
+// Decode decodes the polyline.
+func (p *Polyline) Decode(r io.Reader) error {
+ d := decoder{r: asByteReader(r)}
+ p.decode(d)
+ return d.err
+}
+
+func (p *Polyline) decode(d decoder) {
+ version := d.readInt8()
+ if d.err != nil {
+ return
+ }
+ if int(version) != int(encodingVersion) {
+ d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
+ return
+ }
+ nvertices := d.readUint32()
+ if d.err != nil {
+ return
+ }
+ if nvertices > maxEncodedVertices {
+ d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
+ return
+ }
+ *p = make([]Point, nvertices)
+ for i := range *p {
+ (*p)[i].X = d.readFloat64()
+ (*p)[i].Y = d.readFloat64()
+ (*p)[i].Z = d.readFloat64()
+ }
+}
+
+// Project returns a point on the polyline that is closest to the given point,
+// and the index of the next vertex after the projected point. The
+// value of that index is always in the range [1, len(polyline)].
+// The polyline must not be empty.
+func (p *Polyline) Project(point Point) (Point, int) {
+ if len(*p) == 1 {
+ // If there is only one vertex, it is always closest to any given point.
+ return (*p)[0], 1
+ }
+
+ // Initial value larger than any possible distance on the unit sphere.
+ minDist := 10 * s1.Radian
+ minIndex := -1
+
+ // Find the line segment in the polyline that is closest to the point given.
+ for i := 1; i < len(*p); i++ {
+ if dist := DistanceFromSegment(point, (*p)[i-1], (*p)[i]); dist < minDist {
+ minDist = dist
+ minIndex = i
+ }
+ }
+
+ // Compute the point on the segment found that is closest to the point given.
+ closest := Project(point, (*p)[minIndex-1], (*p)[minIndex])
+ if closest == (*p)[minIndex] {
+ minIndex++
+ }
+
+ return closest, minIndex
+}
+
+// IsOnRight reports whether the point given is on the right hand side of the
+// polyline, using a naive definition of "right-hand-sideness" where the point
+// is on the RHS of the polyline iff the point is on the RHS of the line segment
+// in the polyline which it is closest to.
+// The polyline must have at least 2 vertices.
+func (p *Polyline) IsOnRight(point Point) bool {
+ // If the closest point C is an interior vertex of the polyline, let B and D
+ // be the previous and next vertices. The given point P is on the right of
+ // the polyline (locally) if B, P, D are ordered CCW around vertex C.
+ closest, next := p.Project(point)
+ if closest == (*p)[next-1] && next > 1 && next < len(*p) {
+ if point == (*p)[next-1] {
+ // Polyline vertices are not on the RHS.
+ return false
+ }
+ return OrderedCCW((*p)[next-2], point, (*p)[next], (*p)[next-1])
+ }
+ // Otherwise, the closest point C is incident to exactly one polyline edge.
+ // We test the point P against that edge.
+ if next == len(*p) {
+ next--
+ }
+ return Sign(point, (*p)[next], (*p)[next-1])
+}
+
+// Validate checks whether this is a valid polyline or not.
+func (p *Polyline) Validate() error {
+ // All vertices must be unit length.
+ for i, pt := range *p {
+ if !pt.IsUnit() {
+ return fmt.Errorf("vertex %d is not unit length", i)
+ }
+ }
+
+ // Adjacent vertices must not be identical or antipodal.
+ for i := 1; i < len(*p); i++ {
+ prev, cur := (*p)[i-1], (*p)[i]
+ if prev == cur {
+ return fmt.Errorf("vertices %d and %d are identical", i-1, i)
+ }
+ if prev == (Point{cur.Mul(-1)}) {
+ return fmt.Errorf("vertices %d and %d are antipodal", i-1, i)
+ }
+ }
+
+ return nil
+}
+
+// Intersects reports whether this polyline intersects the given polyline. If
+// the polylines share a vertex they are considered to be intersecting. When a
+// polyline endpoint is the only intersection with the other polyline, the
+// function may return true or false arbitrarily.
+//
+// The running time is quadratic in the number of vertices.
+func (p *Polyline) Intersects(o *Polyline) bool {
+ if len(*p) == 0 || len(*o) == 0 {
+ return false
+ }
+
+ if !p.RectBound().Intersects(o.RectBound()) {
+ return false
+ }
+
+ // TODO(roberts): Use ShapeIndex here.
+ for i := 1; i < len(*p); i++ {
+ crosser := NewChainEdgeCrosser((*p)[i-1], (*p)[i], (*o)[0])
+ for j := 1; j < len(*o); j++ {
+ if crosser.ChainCrossingSign((*o)[j]) != DoNotCross {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Interpolate returns the point whose distance from vertex 0 along the polyline is
+// the given fraction of the polyline's total length, and the index of
+// the next vertex after the interpolated point P. Fractions less than zero
+// or greater than one are clamped. The return value is unit length. The cost of
+// this function is currently linear in the number of vertices.
+//
+// This method allows the caller to easily construct a given suffix of the
+// polyline by concatenating P with the polyline vertices starting at that next
+// vertex. Note that P is guaranteed to be different than the point at the next
+// vertex, so this will never result in a duplicate vertex.
+//
+// The polyline must not be empty. Note that if fraction >= 1.0, then the next
+// vertex will be set to len(p) (indicating that no vertices from the polyline
+// need to be appended). The value of the next vertex is always between 1 and
+// len(p).
+//
+// This method can also be used to construct a prefix of the polyline, by
+// taking the polyline vertices up to next vertex-1 and appending the
+// returned point P if it is different from the last vertex (since in this
+// case there is no guarantee of distinctness).
+func (p *Polyline) Interpolate(fraction float64) (Point, int) {
+ // We intentionally let the (fraction >= 1) case fall through, since
+ // we need to handle it in the loop below in any case because of
+ // possible roundoff errors.
+ if fraction <= 0 {
+ return (*p)[0], 1
+ }
+ target := s1.Angle(fraction) * p.Length()
+
+ for i := 1; i < len(*p); i++ {
+ length := (*p)[i-1].Distance((*p)[i])
+ if target < length {
+ // This interpolates with respect to arc length rather than
+ // straight-line distance, and produces a unit-length result.
+ result := InterpolateAtDistance(target, (*p)[i-1], (*p)[i])
+
+ // It is possible that (result == vertex(i)) due to rounding errors.
+ if result == (*p)[i] {
+ return result, i + 1
+ }
+ return result, i
+ }
+ target -= length
+ }
+
+ return (*p)[len(*p)-1], len(*p)
+}
+
+// Uninterpolate is the inverse operation of Interpolate. Given a point on the
+// polyline, it returns the ratio of the distance to the point from the
+// beginning of the polyline over the length of the polyline. The return
+// value is always betwen 0 and 1 inclusive.
+//
+// The polyline should not be empty. If it has fewer than 2 vertices, the
+// return value is zero.
+func (p *Polyline) Uninterpolate(point Point, nextVertex int) float64 {
+ if len(*p) < 2 {
+ return 0
+ }
+
+ var sum s1.Angle
+ for i := 1; i < nextVertex; i++ {
+ sum += (*p)[i-1].Distance((*p)[i])
+ }
+ lengthToPoint := sum + (*p)[nextVertex-1].Distance(point)
+ for i := nextVertex; i < len(*p); i++ {
+ sum += (*p)[i-1].Distance((*p)[i])
+ }
+ // The ratio can be greater than 1.0 due to rounding errors or because the
+ // point is not exactly on the polyline.
+ return minFloat64(1.0, float64(lengthToPoint/sum))
+}
+
+// TODO(roberts): Differences from C++.
+// NearlyCoversPolyline
+// InitToSnapped
+// InitToSimplified
+// SnapLevel
+// encode/decode compressed
diff --git a/vendor/github.com/golang/geo/s2/polyline_measures.go b/vendor/github.com/golang/geo/s2/polyline_measures.go
new file mode 100644
index 000000000..38ce991b5
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/polyline_measures.go
@@ -0,0 +1,53 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file defines various measures for polylines on the sphere. These are
+// low-level methods that work directly with arrays of Points. They are used to
+// implement the methods in various other measures files.
+
+import (
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// polylineLength returns the length of the given Polyline.
+// It returns 0 for polylines with fewer than two vertices.
+func polylineLength(p []Point) s1.Angle {
+ var length s1.Angle
+
+ for i := 1; i < len(p); i++ {
+ length += p[i-1].Distance(p[i])
+ }
+ return length
+}
+
+// polylineCentroid returns the true centroid of the polyline multiplied by the
+// length of the polyline. The result is not unit length, so you may wish to
+// normalize it.
+//
+// Scaling by the Polyline length makes it easy to compute the centroid
+// of several Polylines (by simply adding up their centroids).
+//
+// Note that for degenerate Polylines (e.g., AA) this returns Point(0, 0, 0).
+// (This answer is correct; the result of this function is a line integral over
+// the polyline, whose value is always zero if the polyline is degenerate.)
+func polylineCentroid(p []Point) Point {
+ var centroid r3.Vector
+ for i := 1; i < len(p); i++ {
+ centroid = centroid.Add(EdgeTrueCentroid(p[i-1], p[i]).Vector)
+ }
+ return Point{centroid}
+}
diff --git a/vendor/github.com/golang/geo/s2/predicates.go b/vendor/github.com/golang/geo/s2/predicates.go
new file mode 100644
index 000000000..9fc5e1751
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/predicates.go
@@ -0,0 +1,701 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// This file contains various predicates that are guaranteed to produce
+// correct, consistent results. They are also relatively efficient. This is
+// achieved by computing conservative error bounds and falling back to high
+// precision or even exact arithmetic when the result is uncertain. Such
+// predicates are useful in implementing robust algorithms.
+//
+// See also EdgeCrosser, which implements various exact
+// edge-crossing predicates more efficiently than can be done here.
+
+import (
+ "math"
+ "math/big"
+
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ // If any other machine architectures need to be suppported, these next three
+ // values will need to be updated.
+
+ // epsilon is a small number that represents a reasonable level of noise between two
+ // values that can be considered to be equal.
+ epsilon = 1e-15
+ // dblEpsilon is a smaller number for values that require more precision.
+ // This is the C++ DBL_EPSILON equivalent.
+ dblEpsilon = 2.220446049250313e-16
+ // dblError is the C++ value for S2 rounding_epsilon().
+ dblError = 1.110223024625156e-16
+
+ // maxDeterminantError is the maximum error in computing (AxB).C where all vectors
+ // are unit length. Using standard inequalities, it can be shown that
+ //
+ // fl(AxB) = AxB + D where |D| <= (|AxB| + (2/sqrt(3))*|A|*|B|) * e
+ //
+ // where "fl()" denotes a calculation done in floating-point arithmetic,
+ // |x| denotes either absolute value or the L2-norm as appropriate, and
+ // e is a reasonably small value near the noise level of floating point
+ // number accuracy. Similarly,
+ //
+ // fl(B.C) = B.C + d where |d| <= (|B.C| + 2*|B|*|C|) * e .
+ //
+ // Applying these bounds to the unit-length vectors A,B,C and neglecting
+ // relative error (which does not affect the sign of the result), we get
+ //
+ // fl((AxB).C) = (AxB).C + d where |d| <= (3 + 2/sqrt(3)) * e
+ maxDeterminantError = 1.8274 * dblEpsilon
+
+ // detErrorMultiplier is the factor to scale the magnitudes by when checking
+ // for the sign of set of points with certainty. Using a similar technique to
+ // the one used for maxDeterminantError, the error is at most:
+ //
+ // |d| <= (3 + 6/sqrt(3)) * |A-C| * |B-C| * e
+ //
+ // If the determinant magnitude is larger than this value then we know
+ // its sign with certainty.
+ detErrorMultiplier = 3.2321 * dblEpsilon
+)
+
+// Direction is an indication of the ordering of a set of points.
+type Direction int
+
+// These are the three options for the direction of a set of points.
+const (
+ Clockwise Direction = -1
+ Indeterminate Direction = 0
+ CounterClockwise Direction = 1
+)
+
+// newBigFloat constructs a new big.Float with maximum precision.
+func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) }
+
+// Sign returns true if the points A, B, C are strictly counterclockwise,
+// and returns false if the points are clockwise or collinear (i.e. if they are all
+// contained on some great circle).
+//
+// Due to numerical errors, situations may arise that are mathematically
+// impossible, e.g. ABC may be considered strictly CCW while BCA is not.
+// However, the implementation guarantees the following:
+//
+// If Sign(a,b,c), then !Sign(c,b,a) for all a,b,c.
+func Sign(a, b, c Point) bool {
+ // NOTE(dnadasi): In the C++ API the equivalent method here was known as "SimpleSign".
+
+ // We compute the signed volume of the parallelepiped ABC. The usual
+ // formula for this is (A ⨯ B) · C, but we compute it here using (C ⨯ A) · B
+ // in order to ensure that ABC and CBA are not both CCW. This follows
+ // from the following identities (which are true numerically, not just
+ // mathematically):
+ //
+ // (1) x ⨯ y == -(y ⨯ x)
+ // (2) -x · y == -(x · y)
+ return c.Cross(a.Vector).Dot(b.Vector) > 0
+}
+
+// RobustSign returns a Direction representing the ordering of the points.
+// CounterClockwise is returned if the points are in counter-clockwise order,
+// Clockwise for clockwise, and Indeterminate if any two points are the same (collinear),
+// or the sign could not completely be determined.
+//
+// This function has additional logic to make sure that the above properties hold even
+// when the three points are coplanar, and to deal with the limitations of
+// floating-point arithmetic.
+//
+// RobustSign satisfies the following conditions:
+//
+// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a
+// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
+// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
+//
+// In other words:
+//
+// (1) The result is Indeterminate if and only if two points are the same.
+// (2) Rotating the order of the arguments does not affect the result.
+// (3) Exchanging any two arguments inverts the result.
+//
+// On the other hand, note that it is not true in general that
+// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities
+// involving antipodal points.
+func RobustSign(a, b, c Point) Direction {
+ sign := triageSign(a, b, c)
+ if sign == Indeterminate {
+ sign = expensiveSign(a, b, c)
+ }
+ return sign
+}
+
+// stableSign reports the direction sign of the points in a numerically stable way.
+// Unlike triageSign, this method can usually compute the correct determinant sign
+// even when all three points are as collinear as possible. For example if three
+// points are spaced 1km apart along a random line on the Earth's surface using
+// the nearest representable points, there is only a 0.4% chance that this method
+// will not be able to find the determinant sign. The probability of failure
+// decreases as the points get closer together; if the collinear points are 1 meter
+// apart, the failure rate drops to 0.0004%.
+//
+// This method could be extended to also handle nearly-antipodal points, but antipodal
+// points are rare in practice so it seems better to simply fall back to
+// exact arithmetic in that case.
+func stableSign(a, b, c Point) Direction {
+ ab := b.Sub(a.Vector)
+ ab2 := ab.Norm2()
+ bc := c.Sub(b.Vector)
+ bc2 := bc.Norm2()
+ ca := a.Sub(c.Vector)
+ ca2 := ca.Norm2()
+
+ // Now compute the determinant ((A-C)x(B-C)).C, where the vertices have been
+ // cyclically permuted if necessary so that AB is the longest edge. (This
+ // minimizes the magnitude of cross product.) At the same time we also
+ // compute the maximum error in the determinant.
+
+ // The two shortest edges, pointing away from their common point.
+ var e1, e2, op r3.Vector
+ if ab2 >= bc2 && ab2 >= ca2 {
+ // AB is the longest edge.
+ e1, e2, op = ca, bc, c.Vector
+ } else if bc2 >= ca2 {
+ // BC is the longest edge.
+ e1, e2, op = ab, ca, a.Vector
+ } else {
+ // CA is the longest edge.
+ e1, e2, op = bc, ab, b.Vector
+ }
+
+ det := -e1.Cross(e2).Dot(op)
+ maxErr := detErrorMultiplier * math.Sqrt(e1.Norm2()*e2.Norm2())
+
+ // If the determinant isn't zero, within maxErr, we know definitively the point ordering.
+ if det > maxErr {
+ return CounterClockwise
+ }
+ if det < -maxErr {
+ return Clockwise
+ }
+ return Indeterminate
+}
+
+// triageSign returns the direction sign of the points. It returns Indeterminate if two
+// points are identical or the result is uncertain. Uncertain cases can be resolved, if
+// desired, by calling expensiveSign.
+//
+// The purpose of this method is to allow additional cheap tests to be done without
+// calling expensiveSign.
+func triageSign(a, b, c Point) Direction {
+ det := a.Cross(b.Vector).Dot(c.Vector)
+ if det > maxDeterminantError {
+ return CounterClockwise
+ }
+ if det < -maxDeterminantError {
+ return Clockwise
+ }
+ return Indeterminate
+}
+
+// expensiveSign reports the direction sign of the points. It returns Indeterminate
+// if two of the input points are the same. It uses multiple-precision arithmetic
+// to ensure that its results are always self-consistent.
+func expensiveSign(a, b, c Point) Direction {
+ // Return Indeterminate if and only if two points are the same.
+ // This ensures RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a.
+ // ie. Property 1 of RobustSign.
+ if a == b || b == c || c == a {
+ return Indeterminate
+ }
+
+ // Next we try recomputing the determinant still using floating-point
+ // arithmetic but in a more precise way. This is more expensive than the
+ // simple calculation done by triageSign, but it is still *much* cheaper
+ // than using arbitrary-precision arithmetic. This optimization is able to
+ // compute the correct determinant sign in virtually all cases except when
+ // the three points are truly collinear (e.g., three points on the equator).
+ detSign := stableSign(a, b, c)
+ if detSign != Indeterminate {
+ return detSign
+ }
+
+ // Otherwise fall back to exact arithmetic and symbolic permutations.
+ return exactSign(a, b, c, true)
+}
+
+// exactSign reports the direction sign of the points computed using high-precision
+// arithmetic and/or symbolic perturbations.
+func exactSign(a, b, c Point, perturb bool) Direction {
+ // Sort the three points in lexicographic order, keeping track of the sign
+ // of the permutation. (Each exchange inverts the sign of the determinant.)
+ permSign := CounterClockwise
+ pa := &a
+ pb := &b
+ pc := &c
+ if pa.Cmp(pb.Vector) > 0 {
+ pa, pb = pb, pa
+ permSign = -permSign
+ }
+ if pb.Cmp(pc.Vector) > 0 {
+ pb, pc = pc, pb
+ permSign = -permSign
+ }
+ if pa.Cmp(pb.Vector) > 0 {
+ pa, pb = pb, pa
+ permSign = -permSign
+ }
+
+ // Construct multiple-precision versions of the sorted points and compute
+ // their precise 3x3 determinant.
+ xa := r3.PreciseVectorFromVector(pa.Vector)
+ xb := r3.PreciseVectorFromVector(pb.Vector)
+ xc := r3.PreciseVectorFromVector(pc.Vector)
+ xbCrossXc := xb.Cross(xc)
+ det := xa.Dot(xbCrossXc)
+
+ // The precision of big.Float is high enough that the result should always
+ // be exact enough (no rounding was performed).
+
+ // If the exact determinant is non-zero, we're done.
+ detSign := Direction(det.Sign())
+ if detSign == Indeterminate && perturb {
+ // Otherwise, we need to resort to symbolic perturbations to resolve the
+ // sign of the determinant.
+ detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
+ }
+ return permSign * detSign
+}
+
+// symbolicallyPerturbedSign reports the sign of the determinant of three points
+// A, B, C under a model where every possible Point is slightly perturbed by
+// a unique infinitesmal amount such that no three perturbed points are
+// collinear and no four points are coplanar. The perturbations are so small
+// that they do not change the sign of any determinant that was non-zero
+// before the perturbations, and therefore can be safely ignored unless the
+// determinant of three points is exactly zero (using multiple-precision
+// arithmetic). This returns CounterClockwise or Clockwise according to the
+// sign of the determinant after the symbolic perturbations are taken into account.
+//
+// Since the symbolic perturbation of a given point is fixed (i.e., the
+// perturbation is the same for all calls to this method and does not depend
+// on the other two arguments), the results of this method are always
+// self-consistent. It will never return results that would correspond to an
+// impossible configuration of non-degenerate points.
+//
+// This requires that the 3x3 determinant of A, B, C must be exactly zero.
+// And the points must be distinct, with A < B < C in lexicographic order.
+//
+// Reference:
+// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
+// Graphics, 1990).
+//
+func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
+ // This method requires that the points are sorted in lexicographically
+ // increasing order. This is because every possible Point has its own
+ // symbolic perturbation such that if A < B then the symbolic perturbation
+ // for A is much larger than the perturbation for B.
+ //
+ // Alternatively, we could sort the points in this method and keep track of
+ // the sign of the permutation, but it is more efficient to do this before
+ // converting the inputs to the multi-precision representation, and this
+ // also lets us re-use the result of the cross product B x C.
+ //
+ // Every input coordinate x[i] is assigned a symbolic perturbation dx[i].
+ // We then compute the sign of the determinant of the perturbed points,
+ // i.e.
+ // | a.X+da.X a.Y+da.Y a.Z+da.Z |
+ // | b.X+db.X b.Y+db.Y b.Z+db.Z |
+ // | c.X+dc.X c.Y+dc.Y c.Z+dc.Z |
+ //
+ // The perturbations are chosen such that
+ //
+ // da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X
+ //
+ // where each perturbation is so much smaller than the previous one that we
+ // don't even need to consider it unless the coefficients of all previous
+ // perturbations are zero. In fact, it is so small that we don't need to
+ // consider it unless the coefficient of all products of the previous
+ // perturbations are zero. For example, we don't need to consider the
+ // coefficient of db.Y unless the coefficient of db.Z *da.X is zero.
+ //
+ // The follow code simply enumerates the coefficients of the perturbations
+ // (and products of perturbations) that appear in the determinant above, in
+ // order of decreasing perturbation magnitude. The first non-zero
+ // coefficient determines the sign of the result. The easiest way to
+ // enumerate the coefficients in the correct order is to pretend that each
+ // perturbation is some tiny value "eps" raised to a power of two:
+ //
+ // eps** 1 2 4 8 16 32 64 128 256
+ // da.Z da.Y da.X db.Z db.Y db.X dc.Z dc.Y dc.X
+ //
+ // Essentially we can then just count in binary and test the corresponding
+ // subset of perturbations at each step. So for example, we must test the
+ // coefficient of db.Z*da.X before db.Y because eps**12 > eps**16.
+ //
+ // Of course, not all products of these perturbations appear in the
+ // determinant above, since the determinant only contains the products of
+ // elements in distinct rows and columns. Thus we don't need to consider
+ // da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of
+ // perturbations have the same coefficient in the determinant; for example,
+ // da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore
+ // we only need to test this coefficient the first time we encounter it in
+ // the binary order above (which will be db.Y*da.X).
+ //
+ // The sequence of tests below also appears in Table 4-ii of the paper
+ // referenced above, if you just want to look it up, with the following
+ // translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that
+ // some of the signs are different because the opposite cross product is
+ // used (e.g., B x C rather than C x B).
+
+ detSign := bCrossC.Z.Sign() // da.Z
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = bCrossC.Y.Sign() // da.Y
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = bCrossC.X.Sign() // da.X
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+
+ detSign = newBigFloat().Sub(newBigFloat().Mul(c.X, a.Y), newBigFloat().Mul(c.Y, a.X)).Sign() // db.Z
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = c.X.Sign() // db.Z * da.Y
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = -(c.Y.Sign()) // db.Z * da.X
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+
+ detSign = newBigFloat().Sub(newBigFloat().Mul(c.Z, a.X), newBigFloat().Mul(c.X, a.Z)).Sign() // db.Y
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = c.Z.Sign() // db.Y * da.X
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+
+ // The following test is listed in the paper, but it is redundant because
+ // the previous tests guarantee that C == (0, 0, 0).
+ // (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
+
+ detSign = newBigFloat().Sub(newBigFloat().Mul(a.X, b.Y), newBigFloat().Mul(a.Y, b.X)).Sign() // dc.Z
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = -(b.X.Sign()) // dc.Z * da.Y
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = b.Y.Sign() // dc.Z * da.X
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ detSign = a.X.Sign() // dc.Z * db.Y
+ if detSign != 0 {
+ return Direction(detSign)
+ }
+ return CounterClockwise // dc.Z * db.Y * da.X
+}
+
+// CompareDistances returns -1, 0, or +1 according to whether AX < BX, A == B,
+// or AX > BX respectively. Distances are measured with respect to the positions
+// of X, A, and B as though they were reprojected to lie exactly on the surface of
+// the unit sphere. Furthermore, this method uses symbolic perturbations to
+// ensure that the result is non-zero whenever A != B, even when AX == BX
+// exactly, or even when A and B project to the same point on the sphere.
+// Such results are guaranteed to be self-consistent, i.e. if AB < BC and
+// BC < AC, then AB < AC.
+func CompareDistances(x, a, b Point) int {
+ // We start by comparing distances using dot products (i.e., cosine of the
+ // angle), because (1) this is the cheapest technique, and (2) it is valid
+ // over the entire range of possible angles. (We can only use the sin^2
+ // technique if both angles are less than 90 degrees or both angles are
+ // greater than 90 degrees.)
+ sign := triageCompareCosDistances(x, a, b)
+ if sign != 0 {
+ return sign
+ }
+
+ // Optimization for (a == b) to avoid falling back to exact arithmetic.
+ if a == b {
+ return 0
+ }
+
+ // It is much better numerically to compare distances using cos(angle) if
+ // the distances are near 90 degrees and sin^2(angle) if the distances are
+ // near 0 or 180 degrees. We only need to check one of the two angles when
+ // making this decision because the fact that the test above failed means
+ // that angles "a" and "b" are very close together.
+ cosAX := a.Dot(x.Vector)
+ if cosAX > 1/math.Sqrt2 {
+ // Angles < 45 degrees.
+ sign = triageCompareSin2Distances(x, a, b)
+ } else if cosAX < -1/math.Sqrt2 {
+ // Angles > 135 degrees. sin^2(angle) is decreasing in this range.
+ sign = -triageCompareSin2Distances(x, a, b)
+ }
+ // C++ adds an additional check here using 80-bit floats.
+ // This is skipped in Go because we only have 32 and 64 bit floats.
+
+ if sign != 0 {
+ return sign
+ }
+
+ sign = exactCompareDistances(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector))
+ if sign != 0 {
+ return sign
+ }
+ return symbolicCompareDistances(x, a, b)
+}
+
+// cosDistance returns cos(XY) where XY is the angle between X and Y, and the
+// maximum error amount in the result. This requires X and Y be normalized.
+func cosDistance(x, y Point) (cos, err float64) {
+ cos = x.Dot(y.Vector)
+ return cos, 9.5*dblError*math.Abs(cos) + 1.5*dblError
+}
+
+// sin2Distance returns sin**2(XY), where XY is the angle between X and Y,
+// and the maximum error amount in the result. This requires X and Y be normalized.
+func sin2Distance(x, y Point) (sin2, err float64) {
+ // The (x-y).Cross(x+y) trick eliminates almost all of error due to x
+ // and y being not quite unit length. This method is extremely accurate
+ // for small distances; the *relative* error in the result is O(dblError) for
+ // distances as small as dblError.
+ n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
+ sin2 = 0.25 * n.Norm2()
+ err = ((21+4*math.Sqrt(3))*dblError*sin2 +
+ 32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) +
+ 768*dblError*dblError*dblError*dblError)
+ return sin2, err
+}
+
+// triageCompareCosDistances returns -1, 0, or +1 according to whether AX < BX,
+// A == B, or AX > BX by comparing the distances between them using cosDistance.
+func triageCompareCosDistances(x, a, b Point) int {
+ cosAX, cosAXerror := cosDistance(a, x)
+ cosBX, cosBXerror := cosDistance(b, x)
+ diff := cosAX - cosBX
+ err := cosAXerror + cosBXerror
+ if diff > err {
+ return -1
+ }
+ if diff < -err {
+ return 1
+ }
+ return 0
+}
+
+// triageCompareSin2Distances returns -1, 0, or +1 according to whether AX < BX,
+// A == B, or AX > BX by comparing the distances between them using sin2Distance.
+func triageCompareSin2Distances(x, a, b Point) int {
+ sin2AX, sin2AXerror := sin2Distance(a, x)
+ sin2BX, sin2BXerror := sin2Distance(b, x)
+ diff := sin2AX - sin2BX
+ err := sin2AXerror + sin2BXerror
+ if diff > err {
+ return 1
+ }
+ if diff < -err {
+ return -1
+ }
+ return 0
+}
+
+// exactCompareDistances returns -1, 0, or 1 after comparing using the values as
+// PreciseVectors.
+func exactCompareDistances(x, a, b r3.PreciseVector) int {
+ // This code produces the same result as though all points were reprojected
+ // to lie exactly on the surface of the unit sphere. It is based on testing
+ // whether x.Dot(a.Normalize()) < x.Dot(b.Normalize()), reformulated
+ // so that it can be evaluated using exact arithmetic.
+ cosAX := x.Dot(a)
+ cosBX := x.Dot(b)
+
+ // If the two values have different signs, we need to handle that case now
+ // before squaring them below.
+ aSign := cosAX.Sign()
+ bSign := cosBX.Sign()
+ if aSign != bSign {
+ // If cos(AX) > cos(BX), then AX < BX.
+ if aSign > bSign {
+ return -1
+ }
+ return 1
+ }
+ cosAX2 := newBigFloat().Mul(cosAX, cosAX)
+ cosBX2 := newBigFloat().Mul(cosBX, cosBX)
+ cmp := newBigFloat().Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2()))
+ return aSign * cmp.Sign()
+}
+
+// symbolicCompareDistances returns -1, 0, or +1 given three points such that AX == BX
+// (exactly) according to whether AX < BX, AX == BX, or AX > BX after symbolic
+// perturbations are taken into account.
+func symbolicCompareDistances(x, a, b Point) int {
+ // Our symbolic perturbation strategy is based on the following model.
+ // Similar to "simulation of simplicity", we assign a perturbation to every
+ // point such that if A < B, then the symbolic perturbation for A is much,
+ // much larger than the symbolic perturbation for B. We imagine that
+ // rather than projecting every point to lie exactly on the unit sphere,
+ // instead each point is positioned on its own tiny pedestal that raises it
+ // just off the surface of the unit sphere. This means that the distance AX
+ // is actually the true distance AX plus the (symbolic) heights of the
+ // pedestals for A and X. The pedestals are infinitesmally thin, so they do
+ // not affect distance measurements except at the two endpoints. If several
+ // points project to exactly the same point on the unit sphere, we imagine
+ // that they are placed on separate pedestals placed close together, where
+ // the distance between pedestals is much, much less than the height of any
+ // pedestal. (There are a finite number of Points, and therefore a finite
+ // number of pedestals, so this is possible.)
+ //
+ // If A < B, then A is on a higher pedestal than B, and therefore AX > BX.
+ switch a.Cmp(b.Vector) {
+ case -1:
+ return 1
+ case 1:
+ return -1
+ default:
+ return 0
+ }
+}
+
+var (
+ // ca45Degrees is a predefined ChordAngle representing (approximately) 45 degrees.
+ ca45Degrees = s1.ChordAngleFromSquaredLength(2 - math.Sqrt2)
+)
+
+// CompareDistance returns -1, 0, or +1 according to whether the distance XY is
+// respectively less than, equal to, or greater than the provided chord angle. Distances are measured
+// with respect to the positions of all points as though they are projected to lie
+// exactly on the surface of the unit sphere.
+func CompareDistance(x, y Point, r s1.ChordAngle) int {
+ // As with CompareDistances, we start by comparing dot products because
+ // the sin^2 method is only valid when the distance XY and the limit "r" are
+ // both less than 90 degrees.
+ sign := triageCompareCosDistance(x, y, float64(r))
+ if sign != 0 {
+ return sign
+ }
+
+ // Unlike with CompareDistances, it's not worth using the sin^2 method
+ // when the distance limit is near 180 degrees because the ChordAngle
+ // representation itself has has a rounding error of up to 2e-8 radians for
+ // distances near 180 degrees.
+ if r < ca45Degrees {
+ sign = triageCompareSin2Distance(x, y, float64(r))
+ if sign != 0 {
+ return sign
+ }
+ }
+ return exactCompareDistance(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(y.Vector), big.NewFloat(float64(r)).SetPrec(big.MaxPrec))
+}
+
+// triageCompareCosDistance returns -1, 0, or +1 according to whether the distance XY is
+// less than, equal to, or greater than r2 respectively using cos distance.
+func triageCompareCosDistance(x, y Point, r2 float64) int {
+ cosXY, cosXYError := cosDistance(x, y)
+ cosR := 1.0 - 0.5*r2
+ cosRError := 2.0 * dblError * cosR
+ diff := cosXY - cosR
+ err := cosXYError + cosRError
+ if diff > err {
+ return -1
+ }
+ if diff < -err {
+ return 1
+ }
+ return 0
+}
+
+// triageCompareSin2Distance returns -1, 0, or +1 according to whether the distance XY is
+// less than, equal to, or greater than r2 respectively using sin^2 distance.
+func triageCompareSin2Distance(x, y Point, r2 float64) int {
+ // Only valid for distance limits < 90 degrees.
+ sin2XY, sin2XYError := sin2Distance(x, y)
+ sin2R := r2 * (1.0 - 0.25*r2)
+ sin2RError := 3.0 * dblError * sin2R
+ diff := sin2XY - sin2R
+ err := sin2XYError + sin2RError
+ if diff > err {
+ return 1
+ }
+ if diff < -err {
+ return -1
+ }
+ return 0
+}
+
+var (
+ bigOne = big.NewFloat(1.0).SetPrec(big.MaxPrec)
+ bigHalf = big.NewFloat(0.5).SetPrec(big.MaxPrec)
+)
+
+// exactCompareDistance returns -1, 0, or +1 after comparing using PreciseVectors.
+func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int {
+ // This code produces the same result as though all points were reprojected
+ // to lie exactly on the surface of the unit sphere. It is based on
+ // comparing the cosine of the angle XY (when both points are projected to
+ // lie exactly on the sphere) to the given threshold.
+ cosXY := x.Dot(y)
+ cosR := newBigFloat().Sub(bigOne, newBigFloat().Mul(bigHalf, r2))
+
+ // If the two values have different signs, we need to handle that case now
+ // before squaring them below.
+ xySign := cosXY.Sign()
+ rSign := cosR.Sign()
+ if xySign != rSign {
+ if xySign > rSign {
+ return -1
+ }
+ return 1 // If cos(XY) > cos(r), then XY < r.
+ }
+ cmp := newBigFloat().Sub(
+ newBigFloat().Mul(
+ newBigFloat().Mul(cosR, cosR), newBigFloat().Mul(x.Norm2(), y.Norm2())),
+ newBigFloat().Mul(cosXY, cosXY))
+ return xySign * cmp.Sign()
+}
+
+// TODO(roberts): Differences from C++
+// CompareEdgeDistance
+// CompareEdgeDirections
+// EdgeCircumcenterSign
+// GetVoronoiSiteExclusion
+// GetClosestVertex
+// TriageCompareLineSin2Distance
+// TriageCompareLineCos2Distance
+// TriageCompareLineDistance
+// TriageCompareEdgeDistance
+// ExactCompareLineDistance
+// ExactCompareEdgeDistance
+// TriageCompareEdgeDirections
+// ExactCompareEdgeDirections
+// ArePointsAntipodal
+// ArePointsLinearlyDependent
+// GetCircumcenter
+// TriageEdgeCircumcenterSign
+// ExactEdgeCircumcenterSign
+// UnperturbedSign
+// SymbolicEdgeCircumcenterSign
+// ExactVoronoiSiteExclusion
diff --git a/vendor/github.com/golang/geo/s2/projections.go b/vendor/github.com/golang/geo/s2/projections.go
new file mode 100644
index 000000000..07b8e62d2
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/projections.go
@@ -0,0 +1,203 @@
+// Copyright 2018 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/s1"
+)
+
+// Projection defines an interface for different ways of mapping between s2 and r2 Points.
+// It can also define the coordinate wrapping behavior along each axis.
+type Projection interface {
+ // Project converts a point on the sphere to a projected 2D point.
+ Project(p Point) r2.Point
+
+ // Unproject converts a projected 2D point to a point on the sphere.
+ //
+ // If wrapping is defined for a given axis (see below), then this method
+ // should accept any real number for the corresponding coordinate.
+ Unproject(p r2.Point) Point
+
+ // FromLatLng is a convenience function equivalent to Project(LatLngToPoint(ll)),
+ // but the implementation is more efficient.
+ FromLatLng(ll LatLng) r2.Point
+
+ // ToLatLng is a convenience function equivalent to LatLngFromPoint(Unproject(p)),
+ // but the implementation is more efficient.
+ ToLatLng(p r2.Point) LatLng
+
+ // Interpolate returns the point obtained by interpolating the given
+ // fraction of the distance along the line from A to B.
+ // Fractions < 0 or > 1 result in extrapolation instead.
+ Interpolate(f float64, a, b r2.Point) r2.Point
+
+ // WrapDistance reports the coordinate wrapping distance along each axis.
+ // If this value is non-zero for a given axis, the coordinates are assumed
+ // to "wrap" with the given period. For example, if WrapDistance.Y == 360
+ // then (x, y) and (x, y + 360) should map to the same Point.
+ //
+ // This information is used to ensure that edges takes the shortest path
+ // between two given points. For example, if coordinates represent
+ // (latitude, longitude) pairs in degrees and WrapDistance().Y == 360,
+ // then the edge (5:179, 5:-179) would be interpreted as spanning 2 degrees
+ // of longitude rather than 358 degrees.
+ //
+ // If a given axis does not wrap, its WrapDistance should be set to zero.
+ WrapDistance() r2.Point
+}
+
+// PlateCarreeProjection defines the "plate carree" (square plate) projection,
+// which converts points on the sphere to (longitude, latitude) pairs.
+// Coordinates can be scaled so that they represent radians, degrees, etc, but
+// the projection is always centered around (latitude=0, longitude=0).
+//
+// Note that (x, y) coordinates are backwards compared to the usual (latitude,
+// longitude) ordering, in order to match the usual convention for graphs in
+// which "x" is horizontal and "y" is vertical.
+type PlateCarreeProjection struct {
+ xWrap float64
+ toRadians float64 // Multiplier to convert coordinates to radians.
+ fromRadians float64 // Multiplier to convert coordinates from radians.
+}
+
+// NewPlateCarreeProjection constructs a plate carree projection where the
+// x-coordinates (lng) span [-xScale, xScale] and the y coordinates (lat)
+// span [-xScale/2, xScale/2]. For example if xScale==180 then the x
+// range is [-180, 180] and the y range is [-90, 90].
+//
+// By default coordinates are expressed in radians, i.e. the x range is
+// [-Pi, Pi] and the y range is [-Pi/2, Pi/2].
+func NewPlateCarreeProjection(xScale float64) Projection {
+ return &PlateCarreeProjection{
+ xWrap: 2 * xScale,
+ toRadians: math.Pi / xScale,
+ fromRadians: xScale / math.Pi,
+ }
+}
+
+// Project converts a point on the sphere to a projected 2D point.
+func (p *PlateCarreeProjection) Project(pt Point) r2.Point {
+ return p.FromLatLng(LatLngFromPoint(pt))
+}
+
+// Unproject converts a projected 2D point to a point on the sphere.
+func (p *PlateCarreeProjection) Unproject(pt r2.Point) Point {
+ return PointFromLatLng(p.ToLatLng(pt))
+}
+
+// FromLatLng returns the LatLng projected into an R2 Point.
+func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point {
+ return r2.Point{
+ X: p.fromRadians * ll.Lng.Radians(),
+ Y: p.fromRadians * ll.Lat.Radians(),
+ }
+}
+
+// ToLatLng returns the LatLng projected from the given R2 Point.
+func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng {
+ return LatLng{
+ Lat: s1.Angle(p.toRadians * pt.Y),
+ Lng: s1.Angle(p.toRadians * math.Remainder(pt.X, p.xWrap)),
+ }
+}
+
+// Interpolate returns the point obtained by interpolating the given
+// fraction of the distance along the line from A to B.
+func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
+ return a.Mul(1 - f).Add(b.Mul(f))
+}
+
+// WrapDistance reports the coordinate wrapping distance along each axis.
+func (p *PlateCarreeProjection) WrapDistance() r2.Point {
+ return r2.Point{p.xWrap, 0}
+}
+
+// MercatorProjection defines the spherical Mercator projection. Google Maps
+// uses this projection together with WGS84 coordinates, in which case it is
+// known as the "Web Mercator" projection (see Wikipedia). This class makes
+// no assumptions regarding the coordinate system of its input points, but
+// simply applies the spherical Mercator projection to them.
+//
+// The Mercator projection is finite in width (x) but infinite in height (y).
+// "x" corresponds to longitude, and spans a finite range such as [-180, 180]
+// (with coordinate wrapping), while "y" is a function of latitude and spans
+// an infinite range. (As "y" coordinates get larger, points get closer to
+// the north pole but never quite reach it.) The north and south poles have
+// infinite "y" values. (Note that this will cause problems if you tessellate
+// a Mercator edge where one endpoint is a pole. If you need to do this, clip
+// the edge first so that the "y" coordinate is no more than about 5 * maxX.)
+type MercatorProjection struct {
+ xWrap float64
+ toRadians float64 // Multiplier to convert coordinates to radians.
+ fromRadians float64 // Multiplier to convert coordinates from radians.
+}
+
+// NewMercatorProjection constructs a Mercator projection with the given maximum
+// longitude axis value corresponding to a range of [-maxLng, maxLng].
+// The horizontal and vertical axes are scaled equally.
+func NewMercatorProjection(maxLng float64) Projection {
+ return &MercatorProjection{
+ xWrap: 2 * maxLng,
+ toRadians: math.Pi / maxLng,
+ fromRadians: maxLng / math.Pi,
+ }
+}
+
+// Project converts a point on the sphere to a projected 2D point.
+func (p *MercatorProjection) Project(pt Point) r2.Point {
+ return p.FromLatLng(LatLngFromPoint(pt))
+}
+
+// Unproject converts a projected 2D point to a point on the sphere.
+func (p *MercatorProjection) Unproject(pt r2.Point) Point {
+ return PointFromLatLng(p.ToLatLng(pt))
+}
+
+// FromLatLng returns the LatLng projected into an R2 Point.
+func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point {
+ // This formula is more accurate near zero than the log(tan()) version.
+ // Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity.
+ sinPhi := math.Sin(float64(ll.Lat))
+ y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi))
+ return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y}
+}
+
+// ToLatLng returns the LatLng projected from the given R2 Point.
+func (p *MercatorProjection) ToLatLng(pt r2.Point) LatLng {
+ // This formula is more accurate near zero than the atan(exp()) version.
+ x := p.toRadians * math.Remainder(pt.X, p.xWrap)
+ k := math.Exp(2 * p.toRadians * pt.Y)
+ var y float64
+ if math.IsInf(k, 0) {
+ y = math.Pi / 2
+ } else {
+ y = math.Asin((k - 1) / (k + 1))
+ }
+ return LatLng{s1.Angle(y), s1.Angle(x)}
+}
+
+// Interpolate returns the point obtained by interpolating the given
+// fraction of the distance along the line from A to B.
+func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
+ return a.Mul(1 - f).Add(b.Mul(f))
+}
+
+// WrapDistance reports the coordinate wrapping distance along each axis.
+func (p *MercatorProjection) WrapDistance() r2.Point {
+ return r2.Point{p.xWrap, 0}
+}
diff --git a/vendor/github.com/golang/geo/s2/query_options.go b/vendor/github.com/golang/geo/s2/query_options.go
new file mode 100644
index 000000000..9b7e38d62
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/query_options.go
@@ -0,0 +1,196 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+const maxQueryResults = math.MaxInt32
+
+// queryOptions represents the set of all configurable parameters used by all of
+// the Query types. Most of these fields have non-zero defaults, so initialization
+// is handled within each Query type. All of the exported methods accept user
+// supplied sets of options to set or adjust as necessary.
+//
+// Several of the defaults depend on the distance interface type being used
+// (e.g. minDistance, maxDistance, etc.)
+//
+// If a user sets an option value that a given query type doesn't use, it is ignored.
+type queryOptions struct {
+ // maxResults specifies that at most MaxResults edges should be returned.
+ // This must be at least 1.
+ //
+ // The default value is to return all results.
+ maxResults int
+
+ // distanceLimit specifies that only edges whose distance to the target is
+ // within this distance should be returned.
+ //
+ // Note that edges whose distance is exactly equal to this are
+ // not returned. In most cases this doesn't matter (since distances are
+ // not computed exactly in the first place), but if such edges are needed
+ // then you can retrieve them by specifying the distance as the next
+ // largest representable distance. i.e. distanceLimit.Successor().
+ //
+ // The default value is the infinity value, such that all results will be
+ // returned.
+ distanceLimit s1.ChordAngle
+
+ // maxError specifies that edges up to MaxError further away than the true
+ // closest edges may be substituted in the result set, as long as such
+ // edges satisfy all the remaining search criteria (such as DistanceLimit).
+ // This option only has an effect if MaxResults is also specified;
+ // otherwise all edges closer than MaxDistance will always be returned.
+ //
+ // Note that this does not affect how the distance between edges is
+ // computed; it simply gives the algorithm permission to stop the search
+ // early as soon as the best possible improvement drops below MaxError.
+ //
+ // This can be used to implement distance predicates efficiently. For
+ // example, to determine whether the minimum distance is less than D, set
+ // MaxResults == 1 and MaxDistance == MaxError == D. This causes
+ // the algorithm to terminate as soon as it finds any edge whose distance
+ // is less than D, rather than continuing to search for an edge that is
+ // even closer.
+ //
+ // The default value is zero.
+ maxError s1.ChordAngle
+
+ // includeInteriors specifies that polygon interiors should be included
+ // when measuring distances. In other words, polygons that contain the target
+ // should have a distance of zero. (For targets consisting of multiple connected
+ // components, the distance is zero if any component is contained.) This
+ // is indicated in the results by returning a (ShapeID, EdgeID) pair
+ // with EdgeID == -1, i.e. this value denotes the polygons's interior.
+ //
+ // Note that for efficiency, any polygon that intersects the target may or
+ // may not have an EdgeID == -1 result. Such results are optional
+ // because in that case the distance to the polygon is already zero.
+ //
+ // The default value is true.
+ includeInteriors bool
+
+ // specifies that distances should be computed by examining every edge
+ // rather than using the ShapeIndex.
+ //
+ // TODO(roberts): When optimized is implemented, update the default to false.
+ // The default value is true.
+ useBruteForce bool
+
+ // region specifies that results must intersect the given Region.
+ //
+ // Note that if you want to set the region to a disc around a target
+ // point, it is faster to use a PointTarget with distanceLimit set
+ // instead. You can also set a distance limit and also require that results
+ // lie within a given rectangle.
+ //
+ // The default is nil (no region limits).
+ region Region
+}
+
+// UseBruteForce sets or disables the use of brute force in a query.
+func (q *queryOptions) UseBruteForce(x bool) *queryOptions {
+ q.useBruteForce = x
+ return q
+}
+
+// IncludeInteriors specifies whether polygon interiors should be
+// included when measuring distances.
+func (q *queryOptions) IncludeInteriors(x bool) *queryOptions {
+ q.includeInteriors = x
+ return q
+}
+
+// MaxError specifies that edges up to dist away than the true
+// matching edges may be substituted in the result set, as long as such
+// edges satisfy all the remaining search criteria (such as DistanceLimit).
+// This option only has an effect if MaxResults is also specified;
+// otherwise all edges closer than MaxDistance will always be returned.
+func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions {
+ q.maxError = x
+ return q
+}
+
+// MaxResults specifies that at most MaxResults edges should be returned.
+// This must be at least 1.
+func (q *queryOptions) MaxResults(x int) *queryOptions {
+ // TODO(roberts): What should be done if the value is <= 0?
+ q.maxResults = int(x)
+ return q
+}
+
+// DistanceLimit specifies that only edges whose distance to the target is
+// within, this distance should be returned. Edges whose distance is equal
+// are not returned.
+//
+// To include values that are equal, specify the limit with the next largest
+// representable distance such as limit.Successor(), or set the option with
+// Furthest/ClosestInclusiveDistanceLimit.
+func (q *queryOptions) DistanceLimit(x s1.ChordAngle) *queryOptions {
+ q.distanceLimit = x
+ return q
+}
+
+// ClosestInclusiveDistanceLimit sets the distance limit such that results whose
+// distance is exactly equal to the limit are also returned.
+func (q *queryOptions) ClosestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
+ q.distanceLimit = limit.Successor()
+ return q
+}
+
+// FurthestInclusiveDistanceLimit sets the distance limit such that results whose
+// distance is exactly equal to the limit are also returned.
+func (q *queryOptions) FurthestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions {
+ q.distanceLimit = limit.Predecessor()
+ return q
+}
+
+// ClosestConservativeDistanceLimit sets the distance limit such that results
+// also incorporates the error in distance calculations. This ensures that all
+// edges whose true distance is less than or equal to limit will be returned
+// (along with some edges whose true distance is slightly greater).
+//
+// Algorithms that need to do exact distance comparisons can use this
+// option to find a set of candidate edges that can then be filtered
+// further (e.g., using CompareDistance).
+func (q *queryOptions) ClosestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
+ q.distanceLimit = limit.Expanded(minUpdateDistanceMaxError(limit))
+ return q
+}
+
+// FurthestConservativeDistanceLimit sets the distance limit such that results
+// also incorporates the error in distance calculations. This ensures that all
+// edges whose true distance is greater than or equal to limit will be returned
+// (along with some edges whose true distance is slightly less).
+func (q *queryOptions) FurthestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions {
+ q.distanceLimit = limit.Expanded(-minUpdateDistanceMaxError(limit))
+ return q
+}
+
+// newQueryOptions returns a set of options using the given distance type
+// with the proper default values.
+func newQueryOptions(d distance) *queryOptions {
+ return &queryOptions{
+ maxResults: maxQueryResults,
+ distanceLimit: d.infinity().chordAngle(),
+ maxError: 0,
+ includeInteriors: true,
+ useBruteForce: false,
+ region: nil,
+ }
+}
diff --git a/vendor/github.com/golang/geo/s2/rect.go b/vendor/github.com/golang/geo/s2/rect.go
new file mode 100644
index 000000000..cb4c93180
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/rect.go
@@ -0,0 +1,710 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Rect represents a closed latitude-longitude rectangle.
+type Rect struct {
+ Lat r1.Interval
+ Lng s1.Interval
+}
+
+var (
+ validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2}
+ validRectLngRange = s1.FullInterval()
+)
+
+// EmptyRect returns the empty rectangle.
+func EmptyRect() Rect { return Rect{r1.EmptyInterval(), s1.EmptyInterval()} }
+
+// FullRect returns the full rectangle.
+func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} }
+
+// RectFromLatLng constructs a rectangle containing a single point p.
+func RectFromLatLng(p LatLng) Rect {
+ return Rect{
+ Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()},
+ Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()},
+ }
+}
+
+// RectFromCenterSize constructs a rectangle with the given size and center.
+// center needs to be normalized, but size does not. The latitude
+// interval of the result is clamped to [-90,90] degrees, and the longitude
+// interval of the result is FullRect() if and only if the longitude size is
+// 360 degrees or more.
+//
+// Examples of clamping (in degrees):
+// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160]
+// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180]
+// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155]
+func RectFromCenterSize(center, size LatLng) Rect {
+ half := LatLng{size.Lat / 2, size.Lng / 2}
+ return RectFromLatLng(center).expanded(half)
+}
+
+// IsValid returns true iff the rectangle is valid.
+// This requires Lat ⊆ [-π/2,π/2] and Lng ⊆ [-π,π], and Lat = ∅ iff Lng = ∅
+func (r Rect) IsValid() bool {
+ return math.Abs(r.Lat.Lo) <= math.Pi/2 &&
+ math.Abs(r.Lat.Hi) <= math.Pi/2 &&
+ r.Lng.IsValid() &&
+ r.Lat.IsEmpty() == r.Lng.IsEmpty()
+}
+
+// IsEmpty reports whether the rectangle is empty.
+func (r Rect) IsEmpty() bool { return r.Lat.IsEmpty() }
+
+// IsFull reports whether the rectangle is full.
+func (r Rect) IsFull() bool { return r.Lat.Equal(validRectLatRange) && r.Lng.IsFull() }
+
+// IsPoint reports whether the rectangle is a single point.
+func (r Rect) IsPoint() bool { return r.Lat.Lo == r.Lat.Hi && r.Lng.Lo == r.Lng.Hi }
+
+// Vertex returns the i-th vertex of the rectangle (i = 0,1,2,3) in CCW order
+// (lower left, lower right, upper right, upper left).
+func (r Rect) Vertex(i int) LatLng {
+ var lat, lng float64
+
+ switch i {
+ case 0:
+ lat = r.Lat.Lo
+ lng = r.Lng.Lo
+ case 1:
+ lat = r.Lat.Lo
+ lng = r.Lng.Hi
+ case 2:
+ lat = r.Lat.Hi
+ lng = r.Lng.Hi
+ case 3:
+ lat = r.Lat.Hi
+ lng = r.Lng.Lo
+ }
+ return LatLng{s1.Angle(lat) * s1.Radian, s1.Angle(lng) * s1.Radian}
+}
+
+// Lo returns one corner of the rectangle.
+func (r Rect) Lo() LatLng {
+ return LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(r.Lng.Lo) * s1.Radian}
+}
+
+// Hi returns the other corner of the rectangle.
+func (r Rect) Hi() LatLng {
+ return LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(r.Lng.Hi) * s1.Radian}
+}
+
+// Center returns the center of the rectangle.
+func (r Rect) Center() LatLng {
+ return LatLng{s1.Angle(r.Lat.Center()) * s1.Radian, s1.Angle(r.Lng.Center()) * s1.Radian}
+}
+
+// Size returns the size of the Rect.
+func (r Rect) Size() LatLng {
+ return LatLng{s1.Angle(r.Lat.Length()) * s1.Radian, s1.Angle(r.Lng.Length()) * s1.Radian}
+}
+
+// Area returns the surface area of the Rect.
+func (r Rect) Area() float64 {
+ if r.IsEmpty() {
+ return 0
+ }
+ capDiff := math.Abs(math.Sin(r.Lat.Hi) - math.Sin(r.Lat.Lo))
+ return r.Lng.Length() * capDiff
+}
+
+// AddPoint increases the size of the rectangle to include the given point.
+func (r Rect) AddPoint(ll LatLng) Rect {
+ if !ll.IsValid() {
+ return r
+ }
+ return Rect{
+ Lat: r.Lat.AddPoint(ll.Lat.Radians()),
+ Lng: r.Lng.AddPoint(ll.Lng.Radians()),
+ }
+}
+
+// expanded returns a rectangle that has been expanded by margin.Lat on each side
+// in the latitude direction, and by margin.Lng on each side in the longitude
+// direction. If either margin is negative, then it shrinks the rectangle on
+// the corresponding sides instead. The resulting rectangle may be empty.
+//
+// The latitude-longitude space has the topology of a cylinder. Longitudes
+// "wrap around" at +/-180 degrees, while latitudes are clamped to range [-90, 90].
+// This means that any expansion (positive or negative) of the full longitude range
+// remains full (since the "rectangle" is actually a continuous band around the
+// cylinder), while expansion of the full latitude range remains full only if the
+// margin is positive.
+//
+// If either the latitude or longitude interval becomes empty after
+// expansion by a negative margin, the result is empty.
+//
+// Note that if an expanded rectangle contains a pole, it may not contain
+// all possible lat/lng representations of that pole, e.g., both points [π/2,0]
+// and [π/2,1] represent the same pole, but they might not be contained by the
+// same Rect.
+//
+// If you are trying to grow a rectangle by a certain distance on the
+// sphere (e.g. 5km), refer to the ExpandedByDistance() C++ method implementation
+// instead.
+func (r Rect) expanded(margin LatLng) Rect {
+ lat := r.Lat.Expanded(margin.Lat.Radians())
+ lng := r.Lng.Expanded(margin.Lng.Radians())
+
+ if lat.IsEmpty() || lng.IsEmpty() {
+ return EmptyRect()
+ }
+
+ return Rect{
+ Lat: lat.Intersection(validRectLatRange),
+ Lng: lng,
+ }
+}
+
+func (r Rect) String() string { return fmt.Sprintf("[Lo%v, Hi%v]", r.Lo(), r.Hi()) }
+
+// PolarClosure returns the rectangle unmodified if it does not include either pole.
+// If it includes either pole, PolarClosure returns an expansion of the rectangle along
+// the longitudinal range to include all possible representations of the contained poles.
+func (r Rect) PolarClosure() Rect {
+ if r.Lat.Lo == -math.Pi/2 || r.Lat.Hi == math.Pi/2 {
+ return Rect{r.Lat, s1.FullInterval()}
+ }
+ return r
+}
+
+// Union returns the smallest Rect containing the union of this rectangle and the given rectangle.
+func (r Rect) Union(other Rect) Rect {
+ return Rect{
+ Lat: r.Lat.Union(other.Lat),
+ Lng: r.Lng.Union(other.Lng),
+ }
+}
+
+// Intersection returns the smallest rectangle containing the intersection of
+// this rectangle and the given rectangle. Note that the region of intersection
+// may consist of two disjoint rectangles, in which case a single rectangle
+// spanning both of them is returned.
+func (r Rect) Intersection(other Rect) Rect {
+ lat := r.Lat.Intersection(other.Lat)
+ lng := r.Lng.Intersection(other.Lng)
+
+ if lat.IsEmpty() || lng.IsEmpty() {
+ return EmptyRect()
+ }
+ return Rect{lat, lng}
+}
+
+// Intersects reports whether this rectangle and the other have any points in common.
+func (r Rect) Intersects(other Rect) bool {
+ return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng)
+}
+
+// CapBound returns a cap that contains Rect.
+func (r Rect) CapBound() Cap {
+ // We consider two possible bounding caps, one whose axis passes
+ // through the center of the lat-long rectangle and one whose axis
+ // is the north or south pole. We return the smaller of the two caps.
+
+ if r.IsEmpty() {
+ return EmptyCap()
+ }
+
+ var poleZ, poleAngle float64
+ if r.Lat.Hi+r.Lat.Lo < 0 {
+ // South pole axis yields smaller cap.
+ poleZ = -1
+ poleAngle = math.Pi/2 + r.Lat.Hi
+ } else {
+ poleZ = 1
+ poleAngle = math.Pi/2 - r.Lat.Lo
+ }
+ poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian)
+
+ // For bounding rectangles that span 180 degrees or less in longitude, the
+ // maximum cap size is achieved at one of the rectangle vertices. For
+ // rectangles that are larger than 180 degrees, we punt and always return a
+ // bounding cap centered at one of the two poles.
+ if math.Remainder(r.Lng.Hi-r.Lng.Lo, 2*math.Pi) >= 0 && r.Lng.Hi-r.Lng.Lo < 2*math.Pi {
+ midCap := CapFromPoint(PointFromLatLng(r.Center())).AddPoint(PointFromLatLng(r.Lo())).AddPoint(PointFromLatLng(r.Hi()))
+ if midCap.Height() < poleCap.Height() {
+ return midCap
+ }
+ }
+ return poleCap
+}
+
+// RectBound returns itself.
+func (r Rect) RectBound() Rect {
+ return r
+}
+
+// Contains reports whether this Rect contains the other Rect.
+func (r Rect) Contains(other Rect) bool {
+ return r.Lat.ContainsInterval(other.Lat) && r.Lng.ContainsInterval(other.Lng)
+}
+
+// ContainsCell reports whether the given Cell is contained by this Rect.
+func (r Rect) ContainsCell(c Cell) bool {
+ // A latitude-longitude rectangle contains a cell if and only if it contains
+ // the cell's bounding rectangle. This test is exact from a mathematical
+ // point of view, assuming that the bounds returned by Cell.RectBound()
+ // are tight. However, note that there can be a loss of precision when
+ // converting between representations -- for example, if an s2.Cell is
+ // converted to a polygon, the polygon's bounding rectangle may not contain
+ // the cell's bounding rectangle. This has some slightly unexpected side
+ // effects; for instance, if one creates an s2.Polygon from an s2.Cell, the
+ // polygon will contain the cell, but the polygon's bounding box will not.
+ return r.Contains(c.RectBound())
+}
+
+// ContainsLatLng reports whether the given LatLng is within the Rect.
+func (r Rect) ContainsLatLng(ll LatLng) bool {
+ if !ll.IsValid() {
+ return false
+ }
+ return r.Lat.Contains(ll.Lat.Radians()) && r.Lng.Contains(ll.Lng.Radians())
+}
+
+// ContainsPoint reports whether the given Point is within the Rect.
+func (r Rect) ContainsPoint(p Point) bool {
+ return r.ContainsLatLng(LatLngFromPoint(p))
+}
+
+// CellUnionBound computes a covering of the Rect.
+func (r Rect) CellUnionBound() []CellID {
+ return r.CapBound().CellUnionBound()
+}
+
+// intersectsLatEdge reports whether the edge AB intersects the given edge of constant
+// latitude. Requires the points to have unit length.
+func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
+ // Unfortunately, lines of constant latitude are curves on
+ // the sphere. They can intersect a straight edge in 0, 1, or 2 points.
+
+ // First, compute the normal to the plane AB that points vaguely north.
+ z := Point{a.PointCross(b).Normalize()}
+ if z.Z < 0 {
+ z = Point{z.Mul(-1)}
+ }
+
+ // Extend this to an orthonormal frame (x,y,z) where x is the direction
+ // where the great circle through AB achieves its maximium latitude.
+ y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()}
+ x := y.Cross(z.Vector)
+
+ // Compute the angle "theta" from the x-axis (in the x-y plane defined
+ // above) where the great circle intersects the given line of latitude.
+ sinLat := math.Sin(float64(lat))
+ if math.Abs(sinLat) >= x.Z {
+ // The great circle does not reach the given latitude.
+ return false
+ }
+
+ cosTheta := sinLat / x.Z
+ sinTheta := math.Sqrt(1 - cosTheta*cosTheta)
+ theta := math.Atan2(sinTheta, cosTheta)
+
+ // The candidate intersection points are located +/- theta in the x-y
+ // plane. For an intersection to be valid, we need to check that the
+ // intersection point is contained in the interior of the edge AB and
+ // also that it is contained within the given longitude interval "lng".
+
+ // Compute the range of theta values spanned by the edge AB.
+ abTheta := s1.IntervalFromPointPair(
+ math.Atan2(a.Dot(y.Vector), a.Dot(x)),
+ math.Atan2(b.Dot(y.Vector), b.Dot(x)))
+
+ if abTheta.Contains(theta) {
+ // Check if the intersection point is also in the given lng interval.
+ isect := x.Mul(cosTheta).Add(y.Mul(sinTheta))
+ if lng.Contains(math.Atan2(isect.Y, isect.X)) {
+ return true
+ }
+ }
+
+ if abTheta.Contains(-theta) {
+ // Check if the other intersection point is also in the given lng interval.
+ isect := x.Mul(cosTheta).Sub(y.Mul(sinTheta))
+ if lng.Contains(math.Atan2(isect.Y, isect.X)) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersectsLngEdge reports whether the edge AB intersects the given edge of constant
+// longitude. Requires the points to have unit length.
+func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
+ // The nice thing about edges of constant longitude is that
+ // they are straight lines on the sphere (geodesics).
+ return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
+ PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross
+}
+
+// IntersectsCell reports whether this rectangle intersects the given cell. This is an
+// exact test and may be fairly expensive.
+func (r Rect) IntersectsCell(c Cell) bool {
+ // First we eliminate the cases where one region completely contains the
+ // other. Once these are disposed of, then the regions will intersect
+ // if and only if their boundaries intersect.
+ if r.IsEmpty() {
+ return false
+ }
+ if r.ContainsPoint(Point{c.id.rawPoint()}) {
+ return true
+ }
+ if c.ContainsPoint(PointFromLatLng(r.Center())) {
+ return true
+ }
+
+ // Quick rejection test (not required for correctness).
+ if !r.Intersects(c.RectBound()) {
+ return false
+ }
+
+ // Precompute the cell vertices as points and latitude-longitudes. We also
+ // check whether the Cell contains any corner of the rectangle, or
+ // vice-versa, since the edge-crossing tests only check the edge interiors.
+ vertices := [4]Point{}
+ latlngs := [4]LatLng{}
+
+ for i := range vertices {
+ vertices[i] = c.Vertex(i)
+ latlngs[i] = LatLngFromPoint(vertices[i])
+ if r.ContainsLatLng(latlngs[i]) {
+ return true
+ }
+ if c.ContainsPoint(PointFromLatLng(r.Vertex(i))) {
+ return true
+ }
+ }
+
+ // Now check whether the boundaries intersect. Unfortunately, a
+ // latitude-longitude rectangle does not have straight edges: two edges
+ // are curved, and at least one of them is concave.
+ for i := range vertices {
+ edgeLng := s1.IntervalFromEndpoints(latlngs[i].Lng.Radians(), latlngs[(i+1)&3].Lng.Radians())
+ if !r.Lng.Intersects(edgeLng) {
+ continue
+ }
+
+ a := vertices[i]
+ b := vertices[(i+1)&3]
+ if edgeLng.Contains(r.Lng.Lo) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Lo)) {
+ return true
+ }
+ if edgeLng.Contains(r.Lng.Hi) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Hi)) {
+ return true
+ }
+ if intersectsLatEdge(a, b, s1.Angle(r.Lat.Lo), r.Lng) {
+ return true
+ }
+ if intersectsLatEdge(a, b, s1.Angle(r.Lat.Hi), r.Lng) {
+ return true
+ }
+ }
+ return false
+}
+
+// Encode encodes the Rect.
+func (r Rect) Encode(w io.Writer) error {
+ e := &encoder{w: w}
+ r.encode(e)
+ return e.err
+}
+
+func (r Rect) encode(e *encoder) {
+ e.writeInt8(encodingVersion)
+ e.writeFloat64(r.Lat.Lo)
+ e.writeFloat64(r.Lat.Hi)
+ e.writeFloat64(r.Lng.Lo)
+ e.writeFloat64(r.Lng.Hi)
+}
+
+// Decode decodes a rectangle.
+func (r *Rect) Decode(rd io.Reader) error {
+ d := &decoder{r: asByteReader(rd)}
+ r.decode(d)
+ return d.err
+}
+
+func (r *Rect) decode(d *decoder) {
+ if version := d.readUint8(); int(version) != int(encodingVersion) && d.err == nil {
+ d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
+ return
+ }
+ r.Lat.Lo = d.readFloat64()
+ r.Lat.Hi = d.readFloat64()
+ r.Lng.Lo = d.readFloat64()
+ r.Lng.Hi = d.readFloat64()
+ return
+}
+
+// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere)
+// from a given point to the rectangle (both its boundary and its interior).
+// If r is empty, the result is meaningless.
+// The latlng must be valid.
+func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle {
+ if r.Lng.Contains(float64(ll.Lng)) {
+ return maxAngle(0, ll.Lat-s1.Angle(r.Lat.Hi), s1.Angle(r.Lat.Lo)-ll.Lat)
+ }
+
+ i := s1.IntervalFromEndpoints(r.Lng.Hi, r.Lng.ComplementCenter())
+ rectLng := r.Lng.Lo
+ if i.Contains(float64(ll.Lng)) {
+ rectLng = r.Lng.Hi
+ }
+
+ lo := LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
+ hi := LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(rectLng) * s1.Radian}
+ return DistanceFromSegment(PointFromLatLng(ll), PointFromLatLng(lo), PointFromLatLng(hi))
+}
+
+// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the
+// surface of the sphere) to the given Rect. The directed Hausdorff
+// distance from rectangle A to rectangle B is given by
+// h(A, B) = max_{p in A} min_{q in B} d(p, q).
+func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle {
+ if r.IsEmpty() {
+ return 0 * s1.Radian
+ }
+ if other.IsEmpty() {
+ return math.Pi * s1.Radian
+ }
+
+ lng := r.Lng.DirectedHausdorffDistance(other.Lng)
+ return directedHausdorffDistance(lng, r.Lat, other.Lat)
+}
+
+// HausdorffDistance returns the undirected Hausdorff distance (measured along the
+// surface of the sphere) to the given Rect.
+// The Hausdorff distance between rectangle A and rectangle B is given by
+// H(A, B) = max{h(A, B), h(B, A)}.
+func (r Rect) HausdorffDistance(other Rect) s1.Angle {
+ return maxAngle(r.DirectedHausdorffDistance(other),
+ other.DirectedHausdorffDistance(r))
+}
+
+// ApproxEqual reports whether the latitude and longitude intervals of the two rectangles
+// are the same up to a small tolerance.
+func (r Rect) ApproxEqual(other Rect) bool {
+ return r.Lat.ApproxEqual(other.Lat) && r.Lng.ApproxEqual(other.Lng)
+}
+
+// directedHausdorffDistance returns the directed Hausdorff distance
+// from one longitudinal edge spanning latitude range 'a' to the other
+// longitudinal edge spanning latitude range 'b', with their longitudinal
+// difference given by 'lngDiff'.
+func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle {
+ // By symmetry, we can assume a's longitude is 0 and b's longitude is
+ // lngDiff. Call b's two endpoints bLo and bHi. Let H be the hemisphere
+ // containing a and delimited by the longitude line of b. The Voronoi diagram
+ // of b on H has three edges (portions of great circles) all orthogonal to b
+ // and meeting at bLo cross bHi.
+ // E1: (bLo, bLo cross bHi)
+ // E2: (bHi, bLo cross bHi)
+ // E3: (-bMid, bLo cross bHi), where bMid is the midpoint of b
+ //
+ // They subdivide H into three Voronoi regions. Depending on how longitude 0
+ // (which contains edge a) intersects these regions, we distinguish two cases:
+ // Case 1: it intersects three regions. This occurs when lngDiff <= π/2.
+ // Case 2: it intersects only two regions. This occurs when lngDiff > π/2.
+ //
+ // In the first case, the directed Hausdorff distance to edge b can only be
+ // realized by the following points on a:
+ // A1: two endpoints of a.
+ // A2: intersection of a with the equator, if b also intersects the equator.
+ //
+ // In the second case, the directed Hausdorff distance to edge b can only be
+ // realized by the following points on a:
+ // B1: two endpoints of a.
+ // B2: intersection of a with E3
+ // B3: farthest point from bLo to the interior of D, and farthest point from
+ // bHi to the interior of U, if any, where D (resp. U) is the portion
+ // of edge a below (resp. above) the intersection point from B2.
+
+ if lngDiff < 0 {
+ panic("impossible: negative lngDiff")
+ }
+ if lngDiff > math.Pi {
+ panic("impossible: lngDiff > Pi")
+ }
+
+ if lngDiff == 0 {
+ return s1.Angle(a.DirectedHausdorffDistance(b))
+ }
+
+ // Assumed longitude of b.
+ bLng := lngDiff
+ // Two endpoints of b.
+ bLo := PointFromLatLng(LatLng{s1.Angle(b.Lo), bLng})
+ bHi := PointFromLatLng(LatLng{s1.Angle(b.Hi), bLng})
+
+ // Cases A1 and B1.
+ aLo := PointFromLatLng(LatLng{s1.Angle(a.Lo), 0})
+ aHi := PointFromLatLng(LatLng{s1.Angle(a.Hi), 0})
+ maxDistance := maxAngle(
+ DistanceFromSegment(aLo, bLo, bHi),
+ DistanceFromSegment(aHi, bLo, bHi))
+
+ if lngDiff <= math.Pi/2 {
+ // Case A2.
+ if a.Contains(0) && b.Contains(0) {
+ maxDistance = maxAngle(maxDistance, lngDiff)
+ }
+ return maxDistance
+ }
+
+ // Case B2.
+ p := bisectorIntersection(b, bLng)
+ pLat := LatLngFromPoint(p).Lat
+ if a.Contains(float64(pLat)) {
+ maxDistance = maxAngle(maxDistance, p.Angle(bLo.Vector))
+ }
+
+ // Case B3.
+ if pLat > s1.Angle(a.Lo) {
+ intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo)
+ if ok {
+ maxDistance = maxAngle(maxDistance, intDist)
+ }
+ }
+ if pLat < s1.Angle(a.Hi) {
+ intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi)
+ if ok {
+ maxDistance = maxAngle(maxDistance, intDist)
+ }
+ }
+
+ return maxDistance
+}
+
+// interiorMaxDistance returns the max distance from a point b to the segment spanning latitude range
+// aLat on longitude 0 if the max occurs in the interior of aLat. Otherwise, returns (0, false).
+func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) {
+ // Longitude 0 is in the y=0 plane. b.X >= 0 implies that the maximum
+ // does not occur in the interior of aLat.
+ if aLat.IsEmpty() || b.X >= 0 {
+ return 0, false
+ }
+
+ // Project b to the y=0 plane. The antipodal of the normalized projection is
+ // the point at which the maxium distance from b occurs, if it is contained
+ // in aLat.
+ intersectionPoint := PointFromCoords(-b.X, 0, -b.Z)
+ if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) {
+ return 0, false
+ }
+ return b.Angle(intersectionPoint.Vector), true
+}
+
+// bisectorIntersection return the intersection of longitude 0 with the bisector of an edge
+// on longitude 'lng' and spanning latitude range 'lat'.
+func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point {
+ lng = s1.Angle(math.Abs(float64(lng)))
+ latCenter := s1.Angle(lat.Center())
+
+ // A vector orthogonal to the bisector of the given longitudinal edge.
+ orthoBisector := LatLng{latCenter - math.Pi/2, lng}
+ if latCenter < 0 {
+ orthoBisector = LatLng{-latCenter - math.Pi/2, lng - math.Pi}
+ }
+
+ // A vector orthogonal to longitude 0.
+ orthoLng := Point{r3.Vector{0, -1, 0}}
+
+ return orthoLng.PointCross(PointFromLatLng(orthoBisector))
+}
+
+// Centroid returns the true centroid of the given Rect multiplied by its
+// surface area. The result is not unit length, so you may want to normalize it.
+// Note that in general the centroid is *not* at the center of the rectangle, and
+// in fact it may not even be contained by the rectangle. (It is the "center of
+// mass" of the rectangle viewed as subset of the unit sphere, i.e. it is the
+// point in space about which this curved shape would rotate.)
+//
+// The reason for multiplying the result by the rectangle area is to make it
+// easier to compute the centroid of more complicated shapes. The centroid
+// of a union of disjoint regions can be computed simply by adding their
+// Centroid results.
+func (r Rect) Centroid() Point {
+ // When a sphere is divided into slices of constant thickness by a set
+ // of parallel planes, all slices have the same surface area. This
+ // implies that the z-component of the centroid is simply the midpoint
+ // of the z-interval spanned by the Rect.
+ //
+ // Similarly, it is easy to see that the (x,y) of the centroid lies in
+ // the plane through the midpoint of the rectangle's longitude interval.
+ // We only need to determine the distance "d" of this point from the
+ // z-axis.
+ //
+ // Let's restrict our attention to a particular z-value. In this
+ // z-plane, the Rect is a circular arc. The centroid of this arc
+ // lies on a radial line through the midpoint of the arc, and at a
+ // distance from the z-axis of
+ //
+ // r * (sin(alpha) / alpha)
+ //
+ // where r = sqrt(1-z^2) is the radius of the arc, and "alpha" is half
+ // of the arc length (i.e., the arc covers longitudes [-alpha, alpha]).
+ //
+ // To find the centroid distance from the z-axis for the entire
+ // rectangle, we just need to integrate over the z-interval. This gives
+ //
+ // d = Integrate[sqrt(1-z^2)*sin(alpha)/alpha, z1..z2] / (z2 - z1)
+ //
+ // where [z1, z2] is the range of z-values covered by the rectangle.
+ // This simplifies to
+ //
+ // d = sin(alpha)/(2*alpha*(z2-z1))*(z2*r2 - z1*r1 + theta2 - theta1)
+ //
+ // where [theta1, theta2] is the latitude interval, z1=sin(theta1),
+ // z2=sin(theta2), r1=cos(theta1), and r2=cos(theta2).
+ //
+ // Finally, we want to return not the centroid itself, but the centroid
+ // scaled by the area of the rectangle. The area of the rectangle is
+ //
+ // A = 2 * alpha * (z2 - z1)
+ //
+ // which fortunately appears in the denominator of "d".
+
+ if r.IsEmpty() {
+ return Point{}
+ }
+
+ z1 := math.Sin(r.Lat.Lo)
+ z2 := math.Sin(r.Lat.Hi)
+ r1 := math.Cos(r.Lat.Lo)
+ r2 := math.Cos(r.Lat.Hi)
+
+ alpha := 0.5 * r.Lng.Length()
+ r0 := math.Sin(alpha) * (r2*z2 - r1*z1 + r.Lat.Length())
+ lng := r.Lng.Center()
+ z := alpha * (z2 + z1) * (z2 - z1) // scaled by the area
+
+ return Point{r3.Vector{r0 * math.Cos(lng), r0 * math.Sin(lng), z}}
+}
+
+// BUG: The major differences from the C++ version are:
+// - Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)
diff --git a/vendor/github.com/golang/geo/s2/rect_bounder.go b/vendor/github.com/golang/geo/s2/rect_bounder.go
new file mode 100644
index 000000000..419dea0c1
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/rect_bounder.go
@@ -0,0 +1,352 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// RectBounder is used to compute a bounding rectangle that contains all edges
+// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
+// Note that the bounding rectangle of an edge can be larger than the bounding
+// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
+//
+// The bounds are calculated conservatively to account for numerical errors
+// when points are converted to LatLngs. More precisely, this function
+// guarantees the following:
+// Let L be a closed edge chain (Loop) such that the interior of the loop does
+// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
+// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
+type RectBounder struct {
+ // The previous vertex in the chain.
+ a Point
+ // The previous vertex latitude longitude.
+ aLL LatLng
+ bound Rect
+}
+
+// NewRectBounder returns a new instance of a RectBounder.
+func NewRectBounder() *RectBounder {
+ return &RectBounder{
+ bound: EmptyRect(),
+ }
+}
+
+// maxErrorForTests returns the maximum error in RectBound provided that the
+// result does not include either pole. It is only used for testing purposes
+func (r *RectBounder) maxErrorForTests() LatLng {
+ // The maximum error in the latitude calculation is
+ // 3.84 * dblEpsilon for the PointCross calculation
+ // 0.96 * dblEpsilon for the Latitude calculation
+ // 5 * dblEpsilon added by AddPoint/RectBound to compensate for error
+ // -----------------
+ // 9.80 * dblEpsilon maximum error in result
+ //
+ // The maximum error in the longitude calculation is dblEpsilon. RectBound
+ // does not do any expansion because this isn't necessary in order to
+ // bound the *rounded* longitudes of contained points.
+ return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian}
+}
+
+// AddPoint adds the given point to the chain. The Point must be unit length.
+func (r *RectBounder) AddPoint(b Point) {
+ bLL := LatLngFromPoint(b)
+
+ if r.bound.IsEmpty() {
+ r.a = b
+ r.aLL = bLL
+ r.bound = r.bound.AddPoint(bLL)
+ return
+ }
+
+ // First compute the cross product N = A x B robustly. This is the normal
+ // to the great circle through A and B. We don't use RobustSign
+ // since that method returns an arbitrary vector orthogonal to A if the two
+ // vectors are proportional, and we want the zero vector in that case.
+ n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
+
+ // The relative error in N gets large as its norm gets very small (i.e.,
+ // when the two points are nearly identical or antipodal). We handle this
+ // by choosing a maximum allowable error, and if the error is greater than
+ // this we fall back to a different technique. Since it turns out that
+ // the other sources of error in converting the normal to a maximum
+ // latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
+ // have the total error be a multiple of dblEpsilon, we have chosen to
+ // limit the maximum error in the normal to be 3.84 * dblEpsilon.
+ // It is possible to show that the error is less than this when
+ //
+ // n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
+ // = 1.91346e-15 (about 8.618 * dblEpsilon)
+ nNorm := n.Norm()
+ if nNorm < 1.91346e-15 {
+ // A and B are either nearly identical or nearly antipodal (to within
+ // 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
+ if r.a.Dot(b.Vector) < 0 {
+ // The two points are nearly antipodal. The easiest solution is to
+ // assume that the edge between A and B could go in any direction
+ // around the sphere.
+ r.bound = FullRect()
+ } else {
+ // The two points are nearly identical (to within 4.309 * dblEpsilon).
+ // In this case we can just use the bounding rectangle of the points,
+ // since after the expansion done by GetBound this Rect is
+ // guaranteed to include the (lat,lng) values of all points along AB.
+ r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
+ }
+ r.a = b
+ r.aLL = bLL
+ return
+ }
+
+ // Compute the longitude range spanned by AB.
+ lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
+ if lngAB.Length() >= math.Pi-2*dblEpsilon {
+ // The points lie on nearly opposite lines of longitude to within the
+ // maximum error of the calculation. The easiest solution is to assume
+ // that AB could go on either side of the pole.
+ lngAB = s1.FullInterval()
+ }
+
+ // Next we compute the latitude range spanned by the edge AB. We start
+ // with the range spanning the two endpoints of the edge:
+ latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
+
+ // This is the desired range unless the edge AB crosses the plane
+ // through N and the Z-axis (which is where the great circle through A
+ // and B attains its minimum and maximum latitudes). To test whether AB
+ // crosses this plane, we compute a vector M perpendicular to this
+ // plane and then project A and B onto it.
+ m := n.Cross(r3.Vector{0, 0, 1})
+ mA := m.Dot(r.a.Vector)
+ mB := m.Dot(b.Vector)
+
+ // We want to test the signs of "mA" and "mB", so we need to bound
+ // the error in these calculations. It is possible to show that the
+ // total error is bounded by
+ //
+ // (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
+ // = 6.06638e-16 * nNorm + 6.83174e-31
+
+ mError := 6.06638e-16*nNorm + 6.83174e-31
+ if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
+ // Minimum/maximum latitude *may* occur in the edge interior.
+ //
+ // The maximum latitude is 90 degrees minus the latitude of N. We
+ // compute this directly using atan2 in order to get maximum accuracy
+ // near the poles.
+ //
+ // Our goal is compute a bound that contains the computed latitudes of
+ // all S2Points P that pass the point-in-polygon containment test.
+ // There are three sources of error we need to consider:
+ // - the directional error in N (at most 3.84 * dblEpsilon)
+ // - converting N to a maximum latitude
+ // - computing the latitude of the test point P
+ // The latter two sources of error are at most 0.955 * dblEpsilon
+ // individually, but it is possible to show by a more complex analysis
+ // that together they can add up to at most 1.16 * dblEpsilon, for a
+ // total error of 5 * dblEpsilon.
+ //
+ // We add 3 * dblEpsilon to the bound here, and GetBound() will pad
+ // the bound by another 2 * dblEpsilon.
+ maxLat := math.Min(
+ math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
+ math.Pi/2)
+
+ // In order to get tight bounds when the two points are close together,
+ // we also bound the min/max latitude relative to the latitudes of the
+ // endpoints A and B. First we compute the distance between A and B,
+ // and then we compute the maximum change in latitude between any two
+ // points along the great circle that are separated by this distance.
+ // This gives us a latitude change "budget". Some of this budget must
+ // be spent getting from A to B; the remainder bounds the round-trip
+ // distance (in latitude) from A or B to the min or max latitude
+ // attained along the edge AB.
+ latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
+ maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
+
+ // Test whether AB passes through the point of maximum latitude or
+ // minimum latitude. If the dot product(s) are small enough then the
+ // result may be ambiguous.
+ if mA <= mError && mB >= -mError {
+ latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
+ }
+ if mB <= mError && mA >= -mError {
+ latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
+ }
+ }
+ r.a = b
+ r.aLL = bLL
+ r.bound = r.bound.Union(Rect{latAB, lngAB})
+}
+
+// RectBound returns the bounding rectangle of the edge chain that connects the
+// vertices defined so far. This bound satisfies the guarantee made
+// above, i.e. if the edge chain defines a Loop, then the bound contains
+// the LatLng coordinates of all Points contained by the loop.
+func (r *RectBounder) RectBound() Rect {
+ return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
+}
+
+// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
+// contain the bounds of any subregion whose bounds are computed using
+// ComputeRectBound. For example, consider a loop L that defines a square.
+// GetBound ensures that if a point P is contained by this square, then
+// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
+// shaped loop S contained by L. It is possible that GetBound returns a
+// *larger* bound for S than it does for L, due to rounding errors. This
+// method expands the bound for L so that it is guaranteed to contain the
+// bounds of any subregion S.
+//
+// More precisely, if L is a loop that does not contain either pole, and S
+// is a loop such that L.Contains(S), then
+//
+// ExpandForSubregions(L.RectBound).Contains(S.RectBound).
+//
+func ExpandForSubregions(bound Rect) Rect {
+ // Empty bounds don't need expansion.
+ if bound.IsEmpty() {
+ return bound
+ }
+
+ // First we need to check whether the bound B contains any nearly-antipodal
+ // points (to within 4.309 * dblEpsilon). If so then we need to return
+ // FullRect, since the subregion might have an edge between two
+ // such points, and AddPoint returns Full for such edges. Note that
+ // this can happen even if B is not Full for example, consider a loop
+ // that defines a 10km strip straddling the equator extending from
+ // longitudes -100 to +100 degrees.
+ //
+ // It is easy to check whether B contains any antipodal points, but checking
+ // for nearly-antipodal points is trickier. Essentially we consider the
+ // original bound B and its reflection through the origin B', and then test
+ // whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
+
+ // lngGap is a lower bound on the longitudinal distance between B and its
+ // reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
+ // endpoint longitude calculations and the Length call.)
+ lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
+
+ // minAbsLat is the minimum distance from B to the equator (if zero or
+ // negative, then B straddles the equator).
+ minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
+
+ // latGapSouth and latGapNorth measure the minimum distance from B to the
+ // south and north poles respectively.
+ latGapSouth := math.Pi/2 + bound.Lat.Lo
+ latGapNorth := math.Pi/2 - bound.Lat.Hi
+
+ if minAbsLat >= 0 {
+ // The bound B does not straddle the equator. In this case the minimum
+ // distance is between one endpoint of the latitude edge in B closest to
+ // the equator and the other endpoint of that edge in B'. The latitude
+ // distance between these two points is 2*minAbsLat, and the longitude
+ // distance is lngGap. We could compute the distance exactly using the
+ // Haversine formula, but then we would need to bound the errors in that
+ // calculation. Since we only need accuracy when the distance is very
+ // small (close to 4.309 * dblEpsilon), we substitute the Euclidean
+ // distance instead. This gives us a right triangle XYZ with two edges of
+ // length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
+ // length of the third edge z, and we have
+ //
+ // z ~= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
+ //
+ // Therefore the region may contain nearly antipodal points only if
+ //
+ // 2*minAbsLat + lngGap < sqrt(2) * 4.309 * dblEpsilon
+ // ~= 1.354e-15
+ //
+ // Note that because the given bound B is conservative, minAbsLat and
+ // lngGap are both lower bounds on their true values so we do not need
+ // to make any adjustments for their errors.
+ if 2*minAbsLat+lngGap < 1.354e-15 {
+ return FullRect()
+ }
+ } else if lngGap >= math.Pi/2 {
+ // B spans at most Pi/2 in longitude. The minimum distance is always
+ // between one corner of B and the diagonally opposite corner of B'. We
+ // use the same distance approximation that we used above; in this case
+ // we have an obtuse triangle XYZ with two edges of length x = latGapSouth
+ // and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
+ //
+ // z >= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
+ //
+ // Unlike the case above, latGapSouth and latGapNorth are not lower bounds
+ // (because of the extra addition operation, and because math.Pi/2 is not
+ // exactly equal to Pi/2); they can exceed their true values by up to
+ // 0.75 * dblEpsilon. Putting this all together, the region may contain
+ // nearly antipodal points only if
+ //
+ // latGapSouth + latGapNorth < (sqrt(2) * 4.309 + 1.5) * dblEpsilon
+ // ~= 1.687e-15
+ if latGapSouth+latGapNorth < 1.687e-15 {
+ return FullRect()
+ }
+ } else {
+ // Otherwise we know that (1) the bound straddles the equator and (2) its
+ // width in longitude is at least Pi/2. In this case the minimum
+ // distance can occur either between a corner of B and the diagonally
+ // opposite corner of B' (as in the case above), or between a corner of B
+ // and the opposite longitudinal edge reflected in B'. It is sufficient
+ // to only consider the corner-edge case, since this distance is also a
+ // lower bound on the corner-corner distance when that case applies.
+
+ // Consider the spherical triangle XYZ where X is a corner of B with
+ // minimum absolute latitude, Y is the closest pole to X, and Z is the
+ // point closest to X on the opposite longitudinal edge of B'. This is a
+ // right triangle (Z = Pi/2), and from the spherical law of sines we have
+ //
+ // sin(z) / sin(Z) = sin(y) / sin(Y)
+ // sin(maxLatGap) / 1 = sin(dMin) / sin(lngGap)
+ // sin(dMin) = sin(maxLatGap) * sin(lngGap)
+ //
+ // where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
+ // desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
+ // for 0 <= t <= Pi/2, that we only need an accurate approximation when
+ // at least one of "maxLatGap" or lngGap is extremely small (in which
+ // case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
+ // to 0.75 * dblEpsilon, we want to test whether
+ //
+ // maxLatGap * lngGap < (4.309 + 0.75) * (Pi/2) * dblEpsilon
+ // ~= 1.765e-15
+ if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
+ return FullRect()
+ }
+ }
+ // Next we need to check whether the subregion might contain any edges that
+ // span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
+ // sets the longitude bound to Full in that case. This corresponds to
+ // testing whether (lngGap <= 0) in lngExpansion below.
+
+ // Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
+ // In the worst case, the errors when computing the latitude bound for a
+ // subregion could go in the opposite direction as the errors when computing
+ // the bound for the original region, so we need to double this value.
+ // (More analysis shows that it's okay to round down to a multiple of
+ // dblEpsilon.)
+ //
+ // For longitude, we rely on the fact that atan2 is correctly rounded and
+ // therefore no additional bounds expansion is necessary.
+
+ latExpansion := 9 * dblEpsilon
+ lngExpansion := 0.0
+ if lngGap <= 0 {
+ lngExpansion = math.Pi
+ }
+ return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
+}
diff --git a/vendor/github.com/golang/geo/s2/region.go b/vendor/github.com/golang/geo/s2/region.go
new file mode 100644
index 000000000..9ea3de1ca
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/region.go
@@ -0,0 +1,71 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// A Region represents a two-dimensional region on the unit sphere.
+//
+// The purpose of this interface is to allow complex regions to be
+// approximated as simpler regions. The interface is restricted to methods
+// that are useful for computing approximations.
+type Region interface {
+ // CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
+ CapBound() Cap
+
+ // RectBound returns a bounding latitude-longitude rectangle that contains
+ // the region. The bounds are not guaranteed to be tight.
+ RectBound() Rect
+
+ // ContainsCell reports whether the region completely contains the given region.
+ // It returns false if containment could not be determined.
+ ContainsCell(c Cell) bool
+
+ // IntersectsCell reports whether the region intersects the given cell or
+ // if intersection could not be determined. It returns false if the region
+ // does not intersect.
+ IntersectsCell(c Cell) bool
+
+ // ContainsPoint reports whether the region contains the given point or not.
+ // The point should be unit length, although some implementations may relax
+ // this restriction.
+ ContainsPoint(p Point) bool
+
+ // CellUnionBound returns a small collection of CellIDs whose union covers
+ // the region. The cells are not sorted, may have redundancies (such as cells
+ // that contain other cells), and may cover much more area than necessary.
+ //
+ // This method is not intended for direct use by client code. Clients
+ // should typically use Covering, which has options to control the size and
+ // accuracy of the covering. Alternatively, if you want a fast covering and
+ // don't care about accuracy, consider calling FastCovering (which returns a
+ // cleaned-up version of the covering computed by this method).
+ //
+ // CellUnionBound implementations should attempt to return a small
+ // covering (ideally 4 cells or fewer) that covers the region and can be
+ // computed quickly. The result is used by RegionCoverer as a starting
+ // point for further refinement.
+ CellUnionBound() []CellID
+}
+
+// Enforce Region interface satisfaction.
+var (
+ _ Region = Cap{}
+ _ Region = Cell{}
+ _ Region = (*CellUnion)(nil)
+ _ Region = (*Loop)(nil)
+ _ Region = Point{}
+ _ Region = (*Polygon)(nil)
+ _ Region = (*Polyline)(nil)
+ _ Region = Rect{}
+)
diff --git a/vendor/github.com/golang/geo/s2/regioncoverer.go b/vendor/github.com/golang/geo/s2/regioncoverer.go
new file mode 100644
index 000000000..476e58559
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/regioncoverer.go
@@ -0,0 +1,477 @@
+// Copyright 2015 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "container/heap"
+)
+
+// RegionCoverer allows arbitrary regions to be approximated as unions of cells (CellUnion).
+// This is useful for implementing various sorts of search and precomputation operations.
+//
+// Typical usage:
+//
+// rc := &s2.RegionCoverer{MaxLevel: 30, MaxCells: 5}
+// r := s2.Region(CapFromCenterArea(center, area))
+// covering := rc.Covering(r)
+//
+// This yields a CellUnion of at most 5 cells that is guaranteed to cover the
+// given region (a disc-shaped region on the sphere).
+//
+// For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used.
+// This effectively allows the branching factor of the S2 CellID hierarchy to be increased.
+// Currently the only parameter values allowed are 1, 2, or 3, corresponding to
+// branching factors of 4, 16, and 64 respectively.
+//
+// Note the following:
+//
+// - MinLevel takes priority over MaxCells, i.e. cells below the given level will
+// never be used even if this causes a large number of cells to be returned.
+//
+// - For any setting of MaxCells, up to 6 cells may be returned if that
+// is the minimum number of cells required (e.g. if the region intersects
+// all six face cells). Up to 3 cells may be returned even for very tiny
+// convex regions if they happen to be located at the intersection of
+// three cube faces.
+//
+// - For any setting of MaxCells, an arbitrary number of cells may be
+// returned if MinLevel is too high for the region being approximated.
+//
+// - If MaxCells is less than 4, the area of the covering may be
+// arbitrarily large compared to the area of the original region even if
+// the region is convex (e.g. a Cap or Rect).
+//
+// The approximation algorithm is not optimal but does a pretty good job in
+// practice. The output does not always use the maximum number of cells
+// allowed, both because this would not always yield a better approximation,
+// and because MaxCells is a limit on how much work is done exploring the
+// possible covering as well as a limit on the final output size.
+//
+// Because it is an approximation algorithm, one should not rely on the
+// stability of the output. In particular, the output of the covering algorithm
+// may change across different versions of the library.
+//
+// One can also generate interior coverings, which are sets of cells which
+// are entirely contained within a region. Interior coverings can be
+// empty, even for non-empty regions, if there are no cells that satisfy
+// the provided constraints and are contained by the region. Note that for
+// performance reasons, it is wise to specify a MaxLevel when computing
+// interior coverings - otherwise for regions with small or zero area, the
+// algorithm may spend a lot of time subdividing cells all the way to leaf
+// level to try to find contained cells.
+type RegionCoverer struct {
+ MinLevel int // the minimum cell level to be used.
+ MaxLevel int // the maximum cell level to be used.
+ LevelMod int // the LevelMod to be used.
+ MaxCells int // the maximum desired number of cells in the approximation.
+}
+
+type coverer struct {
+ minLevel int // the minimum cell level to be used.
+ maxLevel int // the maximum cell level to be used.
+ levelMod int // the LevelMod to be used.
+ maxCells int // the maximum desired number of cells in the approximation.
+ region Region
+ result CellUnion
+ pq priorityQueue
+ interiorCovering bool
+}
+
+type candidate struct {
+ cell Cell
+ terminal bool // Cell should not be expanded further.
+ numChildren int // Number of children that intersect the region.
+ children []*candidate // Actual size may be 0, 4, 16, or 64 elements.
+ priority int // Priority of the candidate.
+}
+
+type priorityQueue []*candidate
+
+func (pq priorityQueue) Len() int {
+ return len(pq)
+}
+
+func (pq priorityQueue) Less(i, j int) bool {
+ // We want Pop to give us the highest, not lowest, priority so we use greater than here.
+ return pq[i].priority > pq[j].priority
+}
+
+func (pq priorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *priorityQueue) Push(x interface{}) {
+ item := x.(*candidate)
+ *pq = append(*pq, item)
+}
+
+func (pq *priorityQueue) Pop() interface{} {
+ item := (*pq)[len(*pq)-1]
+ *pq = (*pq)[:len(*pq)-1]
+ return item
+}
+
+func (pq *priorityQueue) Reset() {
+ *pq = (*pq)[:0]
+}
+
+// newCandidate returns a new candidate with no children if the cell intersects the given region.
+// The candidate is marked as terminal if it should not be expanded further.
+func (c *coverer) newCandidate(cell Cell) *candidate {
+ if !c.region.IntersectsCell(cell) {
+ return nil
+ }
+ cand := &candidate{cell: cell}
+ level := int(cell.level)
+ if level >= c.minLevel {
+ if c.interiorCovering {
+ if c.region.ContainsCell(cell) {
+ cand.terminal = true
+ } else if level+c.levelMod > c.maxLevel {
+ return nil
+ }
+ } else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) {
+ cand.terminal = true
+ }
+ }
+ return cand
+}
+
+// expandChildren populates the children of the candidate by expanding the given number of
+// levels from the given cell. Returns the number of children that were marked "terminal".
+func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int {
+ numLevels--
+ var numTerminals int
+ last := cell.id.ChildEnd()
+ for ci := cell.id.ChildBegin(); ci != last; ci = ci.Next() {
+ childCell := CellFromCellID(ci)
+ if numLevels > 0 {
+ if c.region.IntersectsCell(childCell) {
+ numTerminals += c.expandChildren(cand, childCell, numLevels)
+ }
+ continue
+ }
+ if child := c.newCandidate(childCell); child != nil {
+ cand.children = append(cand.children, child)
+ cand.numChildren++
+ if child.terminal {
+ numTerminals++
+ }
+ }
+ }
+ return numTerminals
+}
+
+// addCandidate adds the given candidate to the result if it is marked as "terminal",
+// otherwise expands its children and inserts it into the priority queue.
+// Passing an argument of nil does nothing.
+func (c *coverer) addCandidate(cand *candidate) {
+ if cand == nil {
+ return
+ }
+
+ if cand.terminal {
+ c.result = append(c.result, cand.cell.id)
+ return
+ }
+
+ // Expand one level at a time until we hit minLevel to ensure that we don't skip over it.
+ numLevels := c.levelMod
+ level := int(cand.cell.level)
+ if level < c.minLevel {
+ numLevels = 1
+ }
+
+ numTerminals := c.expandChildren(cand, cand.cell, numLevels)
+ maxChildrenShift := uint(2 * c.levelMod)
+ if cand.numChildren == 0 {
+ return
+ } else if !c.interiorCovering && numTerminals == 1<<maxChildrenShift && level >= c.minLevel {
+ // Optimization: add the parent cell rather than all of its children.
+ // We can't do this for interior coverings, since the children just
+ // intersect the region, but may not be contained by it - we need to
+ // subdivide them further.
+ cand.terminal = true
+ c.addCandidate(cand)
+ } else {
+ // We negate the priority so that smaller absolute priorities are returned
+ // first. The heuristic is designed to refine the largest cells first,
+ // since those are where we have the largest potential gain. Among cells
+ // of the same size, we prefer the cells with the fewest children.
+ // Finally, among cells with equal numbers of children we prefer those
+ // with the smallest number of children that cannot be refined further.
+ cand.priority = -(((level<<maxChildrenShift)+cand.numChildren)<<maxChildrenShift + numTerminals)
+ heap.Push(&c.pq, cand)
+ }
+}
+
+// adjustLevel returns the reduced "level" so that it satisfies levelMod. Levels smaller than minLevel
+// are not affected (since cells at these levels are eventually expanded).
+func (c *coverer) adjustLevel(level int) int {
+ if c.levelMod > 1 && level > c.minLevel {
+ level -= (level - c.minLevel) % c.levelMod
+ }
+ return level
+}
+
+// adjustCellLevels ensures that all cells with level > minLevel also satisfy levelMod,
+// by replacing them with an ancestor if necessary. Cell levels smaller
+// than minLevel are not modified (see AdjustLevel). The output is
+// then normalized to ensure that no redundant cells are present.
+func (c *coverer) adjustCellLevels(cells *CellUnion) {
+ if c.levelMod == 1 {
+ return
+ }
+
+ var out int
+ for _, ci := range *cells {
+ level := ci.Level()
+ newLevel := c.adjustLevel(level)
+ if newLevel != level {
+ ci = ci.Parent(newLevel)
+ }
+ if out > 0 && (*cells)[out-1].Contains(ci) {
+ continue
+ }
+ for out > 0 && ci.Contains((*cells)[out-1]) {
+ out--
+ }
+ (*cells)[out] = ci
+ out++
+ }
+ *cells = (*cells)[:out]
+}
+
+// initialCandidates computes a set of initial candidates that cover the given region.
+func (c *coverer) initialCandidates() {
+ // Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
+ temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
+
+ cells := temp.FastCovering(c.region)
+ c.adjustCellLevels(&cells)
+ for _, ci := range cells {
+ c.addCandidate(c.newCandidate(CellFromCellID(ci)))
+ }
+}
+
+// coveringInternal generates a covering and stores it in result.
+// Strategy: Start with the 6 faces of the cube. Discard any
+// that do not intersect the shape. Then repeatedly choose the
+// largest cell that intersects the shape and subdivide it.
+//
+// result contains the cells that will be part of the output, while pq
+// contains cells that we may still subdivide further. Cells that are
+// entirely contained within the region are immediately added to the output,
+// while cells that do not intersect the region are immediately discarded.
+// Therefore pq only contains cells that partially intersect the region.
+// Candidates are prioritized first according to cell size (larger cells
+// first), then by the number of intersecting children they have (fewest
+// children first), and then by the number of fully contained children
+// (fewest children first).
+func (c *coverer) coveringInternal(region Region) {
+ c.region = region
+
+ c.initialCandidates()
+ for c.pq.Len() > 0 && (!c.interiorCovering || len(c.result) < c.maxCells) {
+ cand := heap.Pop(&c.pq).(*candidate)
+
+ // For interior covering we keep subdividing no matter how many children
+ // candidate has. If we reach MaxCells before expanding all children,
+ // we will just use some of them.
+ // For exterior covering we cannot do this, because result has to cover the
+ // whole region, so all children have to be used.
+ // candidate.numChildren == 1 case takes care of the situation when we
+ // already have more than MaxCells in result (minLevel is too high).
+ // Subdividing of the candidate with one child does no harm in this case.
+ if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells {
+ for _, child := range cand.children {
+ if !c.interiorCovering || len(c.result) < c.maxCells {
+ c.addCandidate(child)
+ }
+ }
+ } else {
+ cand.terminal = true
+ c.addCandidate(cand)
+ }
+ }
+ c.pq.Reset()
+ c.region = nil
+}
+
+// newCoverer returns an instance of coverer.
+func (rc *RegionCoverer) newCoverer() *coverer {
+ return &coverer{
+ minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)),
+ maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)),
+ levelMod: maxInt(1, minInt(3, rc.LevelMod)),
+ maxCells: rc.MaxCells,
+ }
+}
+
+// Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
+func (rc *RegionCoverer) Covering(region Region) CellUnion {
+ covering := rc.CellUnion(region)
+ covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
+ return covering
+}
+
+// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
+func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
+ intCovering := rc.InteriorCellUnion(region)
+ intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
+ return intCovering
+}
+
+// CellUnion returns a normalized CellUnion that covers the given region and
+// satisfies the restrictions except for minLevel and levelMod. These criteria
+// cannot be satisfied using a cell union because cell unions are
+// automatically normalized by replacing four child cells with their parent
+// whenever possible. (Note that the list of cell ids passed to the CellUnion
+// constructor does in fact satisfy all the given restrictions.)
+func (rc *RegionCoverer) CellUnion(region Region) CellUnion {
+ c := rc.newCoverer()
+ c.coveringInternal(region)
+ cu := c.result
+ cu.Normalize()
+ return cu
+}
+
+// InteriorCellUnion returns a normalized CellUnion that is contained within the given region and
+// satisfies the restrictions except for minLevel and levelMod. These criteria
+// cannot be satisfied using a cell union because cell unions are
+// automatically normalized by replacing four child cells with their parent
+// whenever possible. (Note that the list of cell ids passed to the CellUnion
+// constructor does in fact satisfy all the given restrictions.)
+func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion {
+ c := rc.newCoverer()
+ c.interiorCovering = true
+ c.coveringInternal(region)
+ cu := c.result
+ cu.Normalize()
+ return cu
+}
+
+// FastCovering returns a CellUnion that covers the given region similar to Covering,
+// except that this method is much faster and the coverings are not as tight.
+// All of the usual parameters are respected (MaxCells, MinLevel, MaxLevel, and LevelMod),
+// except that the implementation makes no attempt to take advantage of large values of
+// MaxCells. (A small number of cells will always be returned.)
+//
+// This function is useful as a starting point for algorithms that
+// recursively subdivide cells.
+func (rc *RegionCoverer) FastCovering(region Region) CellUnion {
+ c := rc.newCoverer()
+ cu := CellUnion(region.CellUnionBound())
+ c.normalizeCovering(&cu)
+ return cu
+}
+
+// normalizeCovering normalizes the "covering" so that it conforms to the current covering
+// parameters (MaxCells, minLevel, maxLevel, and levelMod).
+// This method makes no attempt to be optimal. In particular, if
+// minLevel > 0 or levelMod > 1 then it may return more than the
+// desired number of cells even when this isn't necessary.
+//
+// Note that when the covering parameters have their default values, almost
+// all of the code in this function is skipped.
+func (c *coverer) normalizeCovering(covering *CellUnion) {
+ // If any cells are too small, or don't satisfy levelMod, then replace them with ancestors.
+ if c.maxLevel < maxLevel || c.levelMod > 1 {
+ for i, ci := range *covering {
+ level := ci.Level()
+ newLevel := c.adjustLevel(minInt(level, c.maxLevel))
+ if newLevel != level {
+ (*covering)[i] = ci.Parent(newLevel)
+ }
+ }
+ }
+ // Sort the cells and simplify them.
+ covering.Normalize()
+
+ // If there are still too many cells, then repeatedly replace two adjacent
+ // cells in CellID order by their lowest common ancestor.
+ for len(*covering) > c.maxCells {
+ bestIndex := -1
+ bestLevel := -1
+ for i := 0; i+1 < len(*covering); i++ {
+ level, ok := (*covering)[i].CommonAncestorLevel((*covering)[i+1])
+ if !ok {
+ continue
+ }
+ level = c.adjustLevel(level)
+ if level > bestLevel {
+ bestLevel = level
+ bestIndex = i
+ }
+ }
+
+ if bestLevel < c.minLevel {
+ break
+ }
+ (*covering)[bestIndex] = (*covering)[bestIndex].Parent(bestLevel)
+ covering.Normalize()
+ }
+ // Make sure that the covering satisfies minLevel and levelMod,
+ // possibly at the expense of satisfying MaxCells.
+ if c.minLevel > 0 || c.levelMod > 1 {
+ covering.Denormalize(c.minLevel, c.levelMod)
+ }
+}
+
+// SimpleRegionCovering returns a set of cells at the given level that cover
+// the connected region and a starting point on the boundary or inside the
+// region. The cells are returned in arbitrary order.
+//
+// Note that this method is not faster than the regular Covering
+// method for most region types, such as Cap or Polygon, and in fact it
+// can be much slower when the output consists of a large number of cells.
+// Currently it can be faster at generating coverings of long narrow regions
+// such as polylines, but this may change in the future.
+func SimpleRegionCovering(region Region, start Point, level int) []CellID {
+ return FloodFillRegionCovering(region, cellIDFromPoint(start).Parent(level))
+}
+
+// FloodFillRegionCovering returns all edge-connected cells at the same level as
+// the given CellID that intersect the given region, in arbitrary order.
+func FloodFillRegionCovering(region Region, start CellID) []CellID {
+ var output []CellID
+ all := map[CellID]bool{
+ start: true,
+ }
+ frontier := []CellID{start}
+ for len(frontier) > 0 {
+ id := frontier[len(frontier)-1]
+ frontier = frontier[:len(frontier)-1]
+ if !region.IntersectsCell(CellFromCellID(id)) {
+ continue
+ }
+ output = append(output, id)
+ for _, nbr := range id.EdgeNeighbors() {
+ if !all[nbr] {
+ all[nbr] = true
+ frontier = append(frontier, nbr)
+ }
+ }
+ }
+
+ return output
+}
+
+// TODO(roberts): The differences from the C++ version
+// finish up FastCovering to match C++
+// IsCanonical
+// CanonicalizeCovering
+// containsAllChildren
+// replaceCellsWithAncestor
diff --git a/vendor/github.com/golang/geo/s2/shape.go b/vendor/github.com/golang/geo/s2/shape.go
new file mode 100644
index 000000000..2cbf170c3
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/shape.go
@@ -0,0 +1,263 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "sort"
+)
+
+// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
+// allowed, and can be used to represent points.
+type Edge struct {
+ V0, V1 Point
+}
+
+// Cmp compares the two edges using the underlying Points Cmp method and returns
+//
+// -1 if e < other
+// 0 if e == other
+// +1 if e > other
+//
+// The two edges are compared by first vertex, and then by the second vertex.
+func (e Edge) Cmp(other Edge) int {
+ if v0cmp := e.V0.Cmp(other.V0.Vector); v0cmp != 0 {
+ return v0cmp
+ }
+ return e.V1.Cmp(other.V1.Vector)
+}
+
+// sortEdges sorts the slice of Edges in place.
+func sortEdges(e []Edge) {
+ sort.Sort(edges(e))
+}
+
+// edges implements the Sort interface for slices of Edge.
+type edges []Edge
+
+func (e edges) Len() int { return len(e) }
+func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 }
+
+// ShapeEdgeID is a unique identifier for an Edge within an ShapeIndex,
+// consisting of a (shapeID, edgeID) pair.
+type ShapeEdgeID struct {
+ ShapeID int32
+ EdgeID int32
+}
+
+// Cmp compares the two ShapeEdgeIDs and returns
+//
+// -1 if s < other
+// 0 if s == other
+// +1 if s > other
+//
+// The two are compared first by shape id and then by edge id.
+func (s ShapeEdgeID) Cmp(other ShapeEdgeID) int {
+ switch {
+ case s.ShapeID < other.ShapeID:
+ return -1
+ case s.ShapeID > other.ShapeID:
+ return 1
+ }
+ switch {
+ case s.EdgeID < other.EdgeID:
+ return -1
+ case s.EdgeID > other.EdgeID:
+ return 1
+ }
+ return 0
+}
+
+// ShapeEdge represents a ShapeEdgeID with the two endpoints of that Edge.
+type ShapeEdge struct {
+ ID ShapeEdgeID
+ Edge Edge
+}
+
+// Chain represents a range of edge IDs corresponding to a chain of connected
+// edges, specified as a (start, length) pair. The chain is defined to consist of
+// edge IDs {start, start + 1, ..., start + length - 1}.
+type Chain struct {
+ Start, Length int
+}
+
+// ChainPosition represents the position of an edge within a given edge chain,
+// specified as a (chainID, offset) pair. Chains are numbered sequentially
+// starting from zero, and offsets are measured from the start of each chain.
+type ChainPosition struct {
+ ChainID, Offset int
+}
+
+// A ReferencePoint consists of a point and a boolean indicating whether the point
+// is contained by a particular shape.
+type ReferencePoint struct {
+ Point Point
+ Contained bool
+}
+
+// OriginReferencePoint returns a ReferencePoint with the given value for
+// contained and the origin point. It should be used when all points or no
+// points are contained.
+func OriginReferencePoint(contained bool) ReferencePoint {
+ return ReferencePoint{Point: OriginPoint(), Contained: contained}
+}
+
+// typeTag is a 32-bit tag that can be used to identify the type of an encoded
+// Shape. All encodable types have a non-zero type tag. The tag associated with
+type typeTag uint32
+
+const (
+ // Indicates that a given Shape type cannot be encoded.
+ typeTagNone typeTag = 0
+ typeTagPolygon typeTag = 1
+ typeTagPolyline typeTag = 2
+ typeTagPointVector typeTag = 3
+ typeTagLaxPolyline typeTag = 4
+ typeTagLaxPolygon typeTag = 5
+
+ // The minimum allowable tag for future user-defined Shape types.
+ typeTagMinUser typeTag = 8192
+)
+
+// Shape represents polygonal geometry in a flexible way. It is organized as a
+// collection of edges that optionally defines an interior. All geometry
+// represented by a given Shape must have the same dimension, which means that
+// an Shape can represent either a set of points, a set of polylines, or a set
+// of polygons.
+//
+// Shape is defined as an interface in order to give clients control over the
+// underlying data representation. Sometimes an Shape does not have any data of
+// its own, but instead wraps some other type.
+//
+// Shape operations are typically defined on a ShapeIndex rather than
+// individual shapes. An ShapeIndex is simply a collection of Shapes,
+// possibly of different dimensions (e.g. 10 points and 3 polygons), organized
+// into a data structure for efficient edge access.
+//
+// The edges of a Shape are indexed by a contiguous range of edge IDs
+// starting at 0. The edges are further subdivided into chains, where each
+// chain consists of a sequence of edges connected end-to-end (a polyline).
+// For example, a Shape representing two polylines AB and CDE would have
+// three edges (AB, CD, DE) grouped into two chains: (AB) and (CD, DE).
+// Similarly, an Shape representing 5 points would have 5 chains consisting
+// of one edge each.
+//
+// Shape has methods that allow edges to be accessed either using the global
+// numbering (edge ID) or within a particular chain. The global numbering is
+// sufficient for most purposes, but the chain representation is useful for
+// certain algorithms such as intersection (see BooleanOperation).
+type Shape interface {
+ // NumEdges returns the number of edges in this shape.
+ NumEdges() int
+
+ // Edge returns the edge for the given edge index.
+ Edge(i int) Edge
+
+ // ReferencePoint returns an arbitrary reference point for the shape. (The
+ // containment boolean value must be false for shapes that do not have an interior.)
+ //
+ // This reference point may then be used to compute the containment of other
+ // points by counting edge crossings.
+ ReferencePoint() ReferencePoint
+
+ // NumChains reports the number of contiguous edge chains in the shape.
+ // For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist
+ // of two chains (AB,BC,CD and AE,EF). Every chain is assigned a chain Id
+ // numbered sequentially starting from zero.
+ //
+ // Note that it is always acceptable to implement this method by returning
+ // NumEdges, i.e. every chain consists of a single edge, but this may
+ // reduce the efficiency of some algorithms.
+ NumChains() int
+
+ // Chain returns the range of edge IDs corresponding to the given edge chain.
+ // Edge chains must form contiguous, non-overlapping ranges that cover
+ // the entire range of edge IDs. This is spelled out more formally below:
+ //
+ // 0 <= i < NumChains()
+ // Chain(i).length > 0, for all i
+ // Chain(0).start == 0
+ // Chain(i).start + Chain(i).length == Chain(i+1).start, for i < NumChains()-1
+ // Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1
+ Chain(chainID int) Chain
+
+ // ChainEdgeReturns the edge at offset "offset" within edge chain "chainID".
+ // Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)"
+ // but more efficient.
+ ChainEdge(chainID, offset int) Edge
+
+ // ChainPosition finds the chain containing the given edge, and returns the
+ // position of that edge as a ChainPosition(chainID, offset) pair.
+ //
+ // shape.Chain(pos.chainID).start + pos.offset == edgeID
+ // shape.Chain(pos.chainID+1).start > edgeID
+ //
+ // where pos == shape.ChainPosition(edgeID).
+ ChainPosition(edgeID int) ChainPosition
+
+ // Dimension returns the dimension of the geometry represented by this shape,
+ // either 0, 1 or 2 for point, polyline and polygon geometry respectively.
+ //
+ // 0 - Point geometry. Each point is represented as a degenerate edge.
+ //
+ // 1 - Polyline geometry. Polyline edges may be degenerate. A shape may
+ // represent any number of polylines. Polylines edges may intersect.
+ //
+ // 2 - Polygon geometry. Edges should be oriented such that the polygon
+ // interior is always on the left. In theory the edges may be returned
+ // in any order, but typically the edges are organized as a collection
+ // of edge chains where each chain represents one polygon loop.
+ // Polygons may have degeneracies (e.g., degenerate edges or sibling
+ // pairs consisting of an edge and its corresponding reversed edge).
+ // A polygon loop may also be full (containing all points on the
+ // sphere); by convention this is represented as a chain with no edges.
+ // (See laxPolygon for details.)
+ //
+ // This method allows degenerate geometry of different dimensions
+ // to be distinguished, e.g. it allows a point to be distinguished from a
+ // polyline or polygon that has been simplified to a single point.
+ Dimension() int
+
+ // IsEmpty reports whether the Shape contains no points. (Note that the full
+ // polygon is represented as a chain with zero edges.)
+ IsEmpty() bool
+
+ // IsFull reports whether the Shape contains all points on the sphere.
+ IsFull() bool
+
+ // typeTag returns a value that can be used to identify the type of an
+ // encoded Shape.
+ typeTag() typeTag
+
+ // We do not support implementations of this interface outside this package.
+ privateInterface()
+}
+
+// defaultShapeIsEmpty reports whether this shape contains no points.
+func defaultShapeIsEmpty(s Shape) bool {
+ return s.NumEdges() == 0 && (s.Dimension() != 2 || s.NumChains() == 0)
+}
+
+// defaultShapeIsFull reports whether this shape contains all points on the sphere.
+func defaultShapeIsFull(s Shape) bool {
+ return s.NumEdges() == 0 && s.Dimension() == 2 && s.NumChains() > 0
+}
+
+// A minimal check for types that should satisfy the Shape interface.
+var (
+ _ Shape = &Loop{}
+ _ Shape = &Polygon{}
+ _ Shape = &Polyline{}
+)
diff --git a/vendor/github.com/golang/geo/s2/shapeindex.go b/vendor/github.com/golang/geo/s2/shapeindex.go
new file mode 100644
index 000000000..8da299d06
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/shapeindex.go
@@ -0,0 +1,1507 @@
+// Copyright 2016 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+ "sort"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+)
+
+// CellRelation describes the possible relationships between a target cell
+// and the cells of the ShapeIndex. If the target is an index cell or is
+// contained by an index cell, it is Indexed. If the target is subdivided
+// into one or more index cells, it is Subdivided. Otherwise it is Disjoint.
+type CellRelation int
+
+// The possible CellRelations for a ShapeIndex.
+const (
+ Indexed CellRelation = iota
+ Subdivided
+ Disjoint
+)
+
+const (
+ // cellPadding defines the total error when clipping an edge which comes
+ // from two sources:
+ // (1) Clipping the original spherical edge to a cube face (the face edge).
+ // The maximum error in this step is faceClipErrorUVCoord.
+ // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary.
+ // The maximum error in this step is edgeClipErrorUVCoord.
+ // Finally, since we encounter the same errors when clipping query edges, we
+ // double the total error so that we only need to pad edges during indexing
+ // and not at query time.
+ cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord)
+
+ // cellSizeToLongEdgeRatio defines the cell size relative to the length of an
+ // edge at which it is first considered to be long. Long edges do not
+ // contribute toward the decision to subdivide a cell further. For example,
+ // a value of 2.0 means that the cell must be at least twice the size of the
+ // edge in order for that edge to be counted. There are two reasons for not
+ // counting long edges: (1) such edges typically need to be propagated to
+ // several children, which increases time and memory costs without much benefit,
+ // and (2) in pathological cases, many long edges close together could force
+ // subdivision to continue all the way to the leaf cell level.
+ cellSizeToLongEdgeRatio = 1.0
+)
+
+// clippedShape represents the part of a shape that intersects a Cell.
+// It consists of the set of edge IDs that intersect that cell and a boolean
+// indicating whether the center of the cell is inside the shape (for shapes
+// that have an interior).
+//
+// Note that the edges themselves are not clipped; we always use the original
+// edges for intersection tests so that the results will be the same as the
+// original shape.
+type clippedShape struct {
+ // shapeID is the index of the shape this clipped shape is a part of.
+ shapeID int32
+
+ // containsCenter indicates if the center of the CellID this shape has been
+ // clipped to falls inside this shape. This is false for shapes that do not
+ // have an interior.
+ containsCenter bool
+
+ // edges is the ordered set of ShapeIndex original edge IDs. Edges
+ // are stored in increasing order of edge ID.
+ edges []int
+}
+
+// newClippedShape returns a new clipped shape for the given shapeID and number of expected edges.
+func newClippedShape(id int32, numEdges int) *clippedShape {
+ return &clippedShape{
+ shapeID: id,
+ edges: make([]int, numEdges),
+ }
+}
+
+// numEdges returns the number of edges that intersect the CellID of the Cell this was clipped to.
+func (c *clippedShape) numEdges() int {
+ return len(c.edges)
+}
+
+// containsEdge reports if this clipped shape contains the given edge ID.
+func (c *clippedShape) containsEdge(id int) bool {
+ // Linear search is fast because the number of edges per shape is typically
+ // very small (less than 10).
+ for _, e := range c.edges {
+ if e == id {
+ return true
+ }
+ }
+ return false
+}
+
+// ShapeIndexCell stores the index contents for a particular CellID.
+type ShapeIndexCell struct {
+ shapes []*clippedShape
+}
+
+// NewShapeIndexCell creates a new cell that is sized to hold the given number of shapes.
+func NewShapeIndexCell(numShapes int) *ShapeIndexCell {
+ return &ShapeIndexCell{
+ shapes: make([]*clippedShape, numShapes),
+ }
+}
+
+// numEdges reports the total number of edges in all clipped shapes in this cell.
+func (s *ShapeIndexCell) numEdges() int {
+ var e int
+ for _, cs := range s.shapes {
+ e += cs.numEdges()
+ }
+ return e
+}
+
+// add adds the given clipped shape to this index cell.
+func (s *ShapeIndexCell) add(c *clippedShape) {
+ // C++ uses a set, so it's ordered and unique. We don't currently catch
+ // the case when a duplicate value is added.
+ s.shapes = append(s.shapes, c)
+}
+
+// findByShapeID returns the clipped shape that contains the given shapeID,
+// or nil if none of the clipped shapes contain it.
+func (s *ShapeIndexCell) findByShapeID(shapeID int32) *clippedShape {
+ // Linear search is fine because the number of shapes per cell is typically
+ // very small (most often 1), and is large only for pathological inputs
+ // (e.g. very deeply nested loops).
+ for _, clipped := range s.shapes {
+ if clipped.shapeID == shapeID {
+ return clipped
+ }
+ }
+ return nil
+}
+
+// faceEdge and clippedEdge store temporary edge data while the index is being
+// updated.
+//
+// While it would be possible to combine all the edge information into one
+// structure, there are two good reasons for separating it:
+//
+// - Memory usage. Separating the two means that we only need to
+// store one copy of the per-face data no matter how many times an edge is
+// subdivided, and it also lets us delay computing bounding boxes until
+// they are needed for processing each face (when the dataset spans
+// multiple faces).
+//
+// - Performance. UpdateEdges is significantly faster on large polygons when
+// the data is separated, because it often only needs to access the data in
+// clippedEdge and this data is cached more successfully.
+
+// faceEdge represents an edge that has been projected onto a given face,
+type faceEdge struct {
+ shapeID int32 // The ID of shape that this edge belongs to
+ edgeID int // Edge ID within that shape
+ maxLevel int // Not desirable to subdivide this edge beyond this level
+ hasInterior bool // Belongs to a shape that has a dimension of 2
+ a, b r2.Point // The edge endpoints, clipped to a given face
+ edge Edge // The original edge.
+}
+
+// clippedEdge represents the portion of that edge that has been clipped to a given Cell.
+type clippedEdge struct {
+ faceEdge *faceEdge // The original unclipped edge
+ bound r2.Rect // Bounding box for the clipped portion
+}
+
+// ShapeIndexIteratorPos defines the set of possible iterator starting positions. By
+// default iterators are unpositioned, since this avoids an extra seek in this
+// situation where one of the seek methods (such as Locate) is immediately called.
+type ShapeIndexIteratorPos int
+
+const (
+ // IteratorBegin specifies the iterator should be positioned at the beginning of the index.
+ IteratorBegin ShapeIndexIteratorPos = iota
+ // IteratorEnd specifies the iterator should be positioned at the end of the index.
+ IteratorEnd
+)
+
+// ShapeIndexIterator is an iterator that provides low-level access to
+// the cells of the index. Cells are returned in increasing order of CellID.
+//
+// for it := index.Iterator(); !it.Done(); it.Next() {
+// fmt.Print(it.CellID())
+// }
+//
+type ShapeIndexIterator struct {
+ index *ShapeIndex
+ position int
+ id CellID
+ cell *ShapeIndexCell
+}
+
+// NewShapeIndexIterator creates a new iterator for the given index. If a starting
+// position is specified, the iterator is positioned at the given spot.
+func NewShapeIndexIterator(index *ShapeIndex, pos ...ShapeIndexIteratorPos) *ShapeIndexIterator {
+ s := &ShapeIndexIterator{
+ index: index,
+ }
+
+ if len(pos) > 0 {
+ if len(pos) > 1 {
+ panic("too many ShapeIndexIteratorPos arguments")
+ }
+ switch pos[0] {
+ case IteratorBegin:
+ s.Begin()
+ case IteratorEnd:
+ s.End()
+ default:
+ panic("unknown ShapeIndexIteratorPos value")
+ }
+ }
+
+ return s
+}
+
+// CellID returns the CellID of the current index cell.
+// If s.Done() is true, a value larger than any valid CellID is returned.
+func (s *ShapeIndexIterator) CellID() CellID {
+ return s.id
+}
+
+// IndexCell returns the current index cell.
+func (s *ShapeIndexIterator) IndexCell() *ShapeIndexCell {
+ // TODO(roberts): C++ has this call a virtual method to allow subclasses
+ // of ShapeIndexIterator to do other work before returning the cell. Do
+ // we need such a thing?
+ return s.cell
+}
+
+// Center returns the Point at the center of the current position of the iterator.
+func (s *ShapeIndexIterator) Center() Point {
+ return s.CellID().Point()
+}
+
+// Begin positions the iterator at the beginning of the index.
+func (s *ShapeIndexIterator) Begin() {
+ if !s.index.IsFresh() {
+ s.index.maybeApplyUpdates()
+ }
+ s.position = 0
+ s.refresh()
+}
+
+// Next positions the iterator at the next index cell.
+func (s *ShapeIndexIterator) Next() {
+ s.position++
+ s.refresh()
+}
+
+// Prev advances the iterator to the previous cell in the index and returns true to
+// indicate it was not yet at the beginning of the index. If the iterator is at the
+// first cell the call does nothing and returns false.
+func (s *ShapeIndexIterator) Prev() bool {
+ if s.position <= 0 {
+ return false
+ }
+
+ s.position--
+ s.refresh()
+ return true
+}
+
+// End positions the iterator at the end of the index.
+func (s *ShapeIndexIterator) End() {
+ s.position = len(s.index.cells)
+ s.refresh()
+}
+
+// Done reports if the iterator is positioned at or after the last index cell.
+func (s *ShapeIndexIterator) Done() bool {
+ return s.id == SentinelCellID
+}
+
+// refresh updates the stored internal iterator values.
+func (s *ShapeIndexIterator) refresh() {
+ if s.position < len(s.index.cells) {
+ s.id = s.index.cells[s.position]
+ s.cell = s.index.cellMap[s.CellID()]
+ } else {
+ s.id = SentinelCellID
+ s.cell = nil
+ }
+}
+
+// seek positions the iterator at the first cell whose ID >= target, or at the
+// end of the index if no such cell exists.
+func (s *ShapeIndexIterator) seek(target CellID) {
+ s.position = sort.Search(len(s.index.cells), func(i int) bool {
+ return s.index.cells[i] >= target
+ })
+ s.refresh()
+}
+
+// LocatePoint positions the iterator at the cell that contains the given Point.
+// If no such cell exists, the iterator position is unspecified, and false is returned.
+// The cell at the matched position is guaranteed to contain all edges that might
+// intersect the line segment between target and the cell's center.
+func (s *ShapeIndexIterator) LocatePoint(p Point) bool {
+ // Let I = cellMap.LowerBound(T), where T is the leaf cell containing
+ // point P. Then if T is contained by an index cell, then the
+ // containing cell is either I or I'. We test for containment by comparing
+ // the ranges of leaf cells spanned by T, I, and I'.
+ target := cellIDFromPoint(p)
+ s.seek(target)
+ if !s.Done() && s.CellID().RangeMin() <= target {
+ return true
+ }
+
+ if s.Prev() && s.CellID().RangeMax() >= target {
+ return true
+ }
+ return false
+}
+
+// LocateCellID attempts to position the iterator at the first matching index cell
+// in the index that has some relation to the given CellID. Let T be the target CellID.
+// If T is contained by (or equal to) some index cell I, then the iterator is positioned
+// at I and returns Indexed. Otherwise if T contains one or more (smaller) index cells,
+// then the iterator is positioned at the first such cell I and return Subdivided.
+// Otherwise Disjoint is returned and the iterator position is undefined.
+func (s *ShapeIndexIterator) LocateCellID(target CellID) CellRelation {
+ // Let T be the target, let I = cellMap.LowerBound(T.RangeMin()), and
+ // let I' be the predecessor of I. If T contains any index cells, then T
+ // contains I. Similarly, if T is contained by an index cell, then the
+ // containing cell is either I or I'. We test for containment by comparing
+ // the ranges of leaf cells spanned by T, I, and I'.
+ s.seek(target.RangeMin())
+ if !s.Done() {
+ if s.CellID() >= target && s.CellID().RangeMin() <= target {
+ return Indexed
+ }
+ if s.CellID() <= target.RangeMax() {
+ return Subdivided
+ }
+ }
+ if s.Prev() && s.CellID().RangeMax() >= target {
+ return Indexed
+ }
+ return Disjoint
+}
+
+// tracker keeps track of which shapes in a given set contain a particular point
+// (the focus). It provides an efficient way to move the focus from one point
+// to another and incrementally update the set of shapes which contain it. We use
+// this to compute which shapes contain the center of every CellID in the index,
+// by advancing the focus from one cell center to the next.
+//
+// Initially the focus is at the start of the CellID space-filling curve. We then
+// visit all the cells that are being added to the ShapeIndex in increasing order
+// of CellID. For each cell, we draw two edges: one from the entry vertex to the
+// center, and another from the center to the exit vertex (where entry and exit
+// refer to the points where the space-filling curve enters and exits the cell).
+// By counting edge crossings we can incrementally compute which shapes contain
+// the cell center. Note that the same set of shapes will always contain the exit
+// point of one cell and the entry point of the next cell in the index, because
+// either (a) these two points are actually the same, or (b) the intervening
+// cells in CellID order are all empty, and therefore there are no edge crossings
+// if we follow this path from one cell to the other.
+//
+// In C++, this is S2ShapeIndex::InteriorTracker.
+type tracker struct {
+ isActive bool
+ a Point
+ b Point
+ nextCellID CellID
+ crosser *EdgeCrosser
+ shapeIDs []int32
+
+ // Shape ids saved by saveAndClearStateBefore. The state is never saved
+ // recursively so we don't need to worry about maintaining a stack.
+ savedIDs []int32
+}
+
+// newTracker returns a new tracker with the appropriate defaults.
+func newTracker() *tracker {
+ // As shapes are added, we compute which ones contain the start of the
+ // CellID space-filling curve by drawing an edge from OriginPoint to this
+ // point and counting how many shape edges cross this edge.
+ t := &tracker{
+ isActive: false,
+ b: trackerOrigin(),
+ nextCellID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel),
+ }
+ t.drawTo(Point{faceUVToXYZ(0, -1, -1).Normalize()}) // CellID curve start
+
+ return t
+}
+
+// trackerOrigin returns the initial focus point when the tracker is created
+// (corresponding to the start of the CellID space-filling curve).
+func trackerOrigin() Point {
+ // The start of the S2CellId space-filling curve.
+ return Point{faceUVToXYZ(0, -1, -1).Normalize()}
+}
+
+// focus returns the current focus point of the tracker.
+func (t *tracker) focus() Point { return t.b }
+
+// addShape adds a shape whose interior should be tracked. containsOrigin indicates
+// whether the current focus point is inside the shape. Alternatively, if
+// the focus point is in the process of being moved (via moveTo/drawTo), you
+// can also specify containsOrigin at the old focus point and call testEdge
+// for every edge of the shape that might cross the current drawTo line.
+// This updates the state to correspond to the new focus point.
+//
+// This requires shape.HasInterior
+func (t *tracker) addShape(shapeID int32, containsFocus bool) {
+ t.isActive = true
+ if containsFocus {
+ t.toggleShape(shapeID)
+ }
+}
+
+// moveTo moves the focus of the tracker to the given point. This method should
+// only be used when it is known that there are no edge crossings between the old
+// and new focus locations; otherwise use drawTo.
+func (t *tracker) moveTo(b Point) { t.b = b }
+
+// drawTo moves the focus of the tracker to the given point. After this method is
+// called, testEdge should be called with all edges that may cross the line
+// segment between the old and new focus locations.
+func (t *tracker) drawTo(b Point) {
+ t.a = t.b
+ t.b = b
+ // TODO: the edge crosser may need an in-place Init method if this gets expensive
+ t.crosser = NewEdgeCrosser(t.a, t.b)
+}
+
+// testEdge checks if the given edge crosses the current edge, and if so, then
+// toggle the state of the given shapeID.
+// This requires shape to have an interior.
+func (t *tracker) testEdge(shapeID int32, edge Edge) {
+ if t.crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) {
+ t.toggleShape(shapeID)
+ }
+}
+
+// setNextCellID is used to indicate that the last argument to moveTo or drawTo
+// was the entry vertex of the given CellID, i.e. the tracker is positioned at the
+// start of this cell. By using this method together with atCellID, the caller
+// can avoid calling moveTo in cases where the exit vertex of the previous cell
+// is the same as the entry vertex of the current cell.
+func (t *tracker) setNextCellID(nextCellID CellID) {
+ t.nextCellID = nextCellID.RangeMin()
+}
+
+// atCellID reports if the focus is already at the entry vertex of the given
+// CellID (provided that the caller calls setNextCellID as each cell is processed).
+func (t *tracker) atCellID(cellid CellID) bool {
+ return cellid.RangeMin() == t.nextCellID
+}
+
+// toggleShape adds or removes the given shapeID from the set of IDs it is tracking.
+func (t *tracker) toggleShape(shapeID int32) {
+ // Most shapeIDs slices are small, so special case the common steps.
+
+ // If there is nothing here, add it.
+ if len(t.shapeIDs) == 0 {
+ t.shapeIDs = append(t.shapeIDs, shapeID)
+ return
+ }
+
+ // If it's the first element, drop it from the slice.
+ if t.shapeIDs[0] == shapeID {
+ t.shapeIDs = t.shapeIDs[1:]
+ return
+ }
+
+ for i, s := range t.shapeIDs {
+ if s < shapeID {
+ continue
+ }
+
+ // If it's in the set, cut it out.
+ if s == shapeID {
+ copy(t.shapeIDs[i:], t.shapeIDs[i+1:]) // overwrite the ith element
+ t.shapeIDs = t.shapeIDs[:len(t.shapeIDs)-1]
+ return
+ }
+
+ // We've got to a point in the slice where we should be inserted.
+ // (the given shapeID is now less than the current positions id.)
+ t.shapeIDs = append(t.shapeIDs[0:i],
+ append([]int32{shapeID}, t.shapeIDs[i:len(t.shapeIDs)]...)...)
+ return
+ }
+
+ // We got to the end and didn't find it, so add it to the list.
+ t.shapeIDs = append(t.shapeIDs, shapeID)
+}
+
+// saveAndClearStateBefore makes an internal copy of the state for shape ids below
+// the given limit, and then clear the state for those shapes. This is used during
+// incremental updates to track the state of added and removed shapes separately.
+func (t *tracker) saveAndClearStateBefore(limitShapeID int32) {
+ limit := t.lowerBound(limitShapeID)
+ t.savedIDs = append([]int32(nil), t.shapeIDs[:limit]...)
+ t.shapeIDs = t.shapeIDs[limit:]
+}
+
+// restoreStateBefore restores the state previously saved by saveAndClearStateBefore.
+// This only affects the state for shapeIDs below "limitShapeID".
+func (t *tracker) restoreStateBefore(limitShapeID int32) {
+ limit := t.lowerBound(limitShapeID)
+ t.shapeIDs = append(append([]int32(nil), t.savedIDs...), t.shapeIDs[limit:]...)
+ t.savedIDs = nil
+}
+
+// lowerBound returns the shapeID of the first entry x where x >= shapeID.
+func (t *tracker) lowerBound(shapeID int32) int32 {
+ panic("not implemented")
+}
+
+// removedShape represents a set of edges from the given shape that is queued for removal.
+type removedShape struct {
+ shapeID int32
+ hasInterior bool
+ containsTrackerOrigin bool
+ edges []Edge
+}
+
+// There are three basic states the index can be in.
+const (
+ stale int32 = iota // There are pending updates.
+ updating // Updates are currently being applied.
+ fresh // There are no pending updates.
+)
+
+// ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges
+// that optionally defines an interior. It can be used to represent a set of
+// points, a set of polylines, or a set of polygons. For Shapes that have
+// interiors, the index makes it very fast to determine which Shape(s) contain
+// a given point or region.
+//
+// The index can be updated incrementally by adding or removing shapes. It is
+// designed to handle up to hundreds of millions of edges. All data structures
+// are designed to be small, so the index is compact; generally it is smaller
+// than the underlying data being indexed. The index is also fast to construct.
+//
+// Polygon, Loop, and Polyline implement Shape which allows these objects to
+// be indexed easily. You can find useful query methods in CrossingEdgeQuery
+// and ClosestEdgeQuery (Not yet implemented in Go).
+//
+// Example showing how to build an index of Polylines:
+//
+// index := NewShapeIndex()
+// for _, polyline := range polylines {
+// index.Add(polyline);
+// }
+// // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here.
+//
+type ShapeIndex struct {
+ // shapes is a map of shape ID to shape.
+ shapes map[int32]Shape
+
+ // The maximum number of edges per cell.
+ // TODO(roberts): Update the comments when the usage of this is implemented.
+ maxEdgesPerCell int
+
+ // nextID tracks the next ID to hand out. IDs are not reused when shapes
+ // are removed from the index.
+ nextID int32
+
+ // cellMap is a map from CellID to the set of clipped shapes that intersect that
+ // cell. The cell IDs cover a set of non-overlapping regions on the sphere.
+ // In C++, this is a BTree, so the cells are ordered naturally by the data structure.
+ cellMap map[CellID]*ShapeIndexCell
+ // Track the ordered list of cell IDs.
+ cells []CellID
+
+ // The current status of the index; accessed atomically.
+ status int32
+
+ // Additions and removals are queued and processed on the first subsequent
+ // query. There are several reasons to do this:
+ //
+ // - It is significantly more efficient to process updates in batches if
+ // the amount of entities added grows.
+ // - Often the index will never be queried, in which case we can save both
+ // the time and memory required to build it. Examples:
+ // + Loops that are created simply to pass to an Polygon. (We don't
+ // need the Loop index, because Polygon builds its own index.)
+ // + Applications that load a database of geometry and then query only
+ // a small fraction of it.
+ //
+ // The main drawback is that we need to go to some extra work to ensure that
+ // some methods are still thread-safe. Note that the goal is *not* to
+ // make this thread-safe in general, but simply to hide the fact that
+ // we defer some of the indexing work until query time.
+ //
+ // This mutex protects all of following fields in the index.
+ mu sync.RWMutex
+
+ // pendingAdditionsPos is the index of the first entry that has not been processed
+ // via applyUpdatesInternal.
+ pendingAdditionsPos int32
+
+ // The set of shapes that have been queued for removal but not processed yet by
+ // applyUpdatesInternal.
+ pendingRemovals []*removedShape
+}
+
+// NewShapeIndex creates a new ShapeIndex.
+func NewShapeIndex() *ShapeIndex {
+ return &ShapeIndex{
+ maxEdgesPerCell: 10,
+ shapes: make(map[int32]Shape),
+ cellMap: make(map[CellID]*ShapeIndexCell),
+ cells: nil,
+ status: fresh,
+ }
+}
+
+// Iterator returns an iterator for this index.
+func (s *ShapeIndex) Iterator() *ShapeIndexIterator {
+ s.maybeApplyUpdates()
+ return NewShapeIndexIterator(s, IteratorBegin)
+}
+
+// Begin positions the iterator at the first cell in the index.
+func (s *ShapeIndex) Begin() *ShapeIndexIterator {
+ s.maybeApplyUpdates()
+ return NewShapeIndexIterator(s, IteratorBegin)
+}
+
+// End positions the iterator at the last cell in the index.
+func (s *ShapeIndex) End() *ShapeIndexIterator {
+ // TODO(roberts): It's possible that updates could happen to the index between
+ // the time this is called and the time the iterators position is used and this
+ // will be invalid or not the end. For now, things will be undefined if this
+ // happens. See about referencing the IsFresh to guard for this in the future.
+ s.maybeApplyUpdates()
+ return NewShapeIndexIterator(s, IteratorEnd)
+}
+
+// Len reports the number of Shapes in this index.
+func (s *ShapeIndex) Len() int {
+ return len(s.shapes)
+}
+
+// Reset resets the index to its original state.
+func (s *ShapeIndex) Reset() {
+ s.shapes = make(map[int32]Shape)
+ s.nextID = 0
+ s.cellMap = make(map[CellID]*ShapeIndexCell)
+ s.cells = nil
+ atomic.StoreInt32(&s.status, fresh)
+}
+
+// NumEdges returns the number of edges in this index.
+func (s *ShapeIndex) NumEdges() int {
+ numEdges := 0
+ for _, shape := range s.shapes {
+ numEdges += shape.NumEdges()
+ }
+ return numEdges
+}
+
+// NumEdgesUpTo returns the number of edges in the given index, up to the given
+// limit. If the limit is encountered, the current running total is returned,
+// which may be more than the limit.
+func (s *ShapeIndex) NumEdgesUpTo(limit int) int {
+ var numEdges int
+ // We choose to iterate over the shapes in order to match the counting
+ // up behavior in C++ and for test compatibility instead of using a
+ // more idiomatic range over the shape map.
+ for i := int32(0); i <= s.nextID; i++ {
+ s := s.Shape(i)
+ if s == nil {
+ continue
+ }
+ numEdges += s.NumEdges()
+ if numEdges >= limit {
+ break
+ }
+ }
+
+ return numEdges
+}
+
+// Shape returns the shape with the given ID, or nil if the shape has been removed from the index.
+func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] }
+
+// idForShape returns the id of the given shape in this index, or -1 if it is
+// not in the index.
+//
+// TODO(roberts): Need to figure out an appropriate way to expose this on a Shape.
+// C++ allows a given S2 type (Loop, Polygon, etc) to be part of multiple indexes.
+// By having each type extend S2Shape which has an id element, they all inherit their
+// own id field rather than having to track it themselves.
+func (s *ShapeIndex) idForShape(shape Shape) int32 {
+ for k, v := range s.shapes {
+ if v == shape {
+ return k
+ }
+ }
+ return -1
+}
+
+// Add adds the given shape to the index and returns the assigned ID..
+func (s *ShapeIndex) Add(shape Shape) int32 {
+ s.shapes[s.nextID] = shape
+ s.nextID++
+ atomic.StoreInt32(&s.status, stale)
+ return s.nextID - 1
+}
+
+// Remove removes the given shape from the index.
+func (s *ShapeIndex) Remove(shape Shape) {
+ // The index updates itself lazily because it is much more efficient to
+ // process additions and removals in batches.
+ id := s.idForShape(shape)
+
+ // If the shape wasn't found, it's already been removed or was not in the index.
+ if s.shapes[id] == nil {
+ return
+ }
+
+ // Remove the shape from the shapes map.
+ delete(s.shapes, id)
+
+ // We are removing a shape that has not yet been added to the index,
+ // so there is nothing else to do.
+ if id >= s.pendingAdditionsPos {
+ return
+ }
+
+ numEdges := shape.NumEdges()
+ removed := &removedShape{
+ shapeID: id,
+ hasInterior: shape.Dimension() == 2,
+ containsTrackerOrigin: shape.ReferencePoint().Contained,
+ edges: make([]Edge, numEdges),
+ }
+
+ for e := 0; e < numEdges; e++ {
+ removed.edges[e] = shape.Edge(e)
+ }
+
+ s.pendingRemovals = append(s.pendingRemovals, removed)
+ atomic.StoreInt32(&s.status, stale)
+}
+
+// IsFresh reports if there are no pending updates that need to be applied.
+// This can be useful to avoid building the index unnecessarily, or for
+// choosing between two different algorithms depending on whether the index
+// is available.
+//
+// The returned index status may be slightly out of date if the index was
+// built in a different thread. This is fine for the intended use (as an
+// efficiency hint), but it should not be used by internal methods.
+func (s *ShapeIndex) IsFresh() bool {
+ return atomic.LoadInt32(&s.status) == fresh
+}
+
+// isFirstUpdate reports if this is the first update to the index.
+func (s *ShapeIndex) isFirstUpdate() bool {
+ // Note that it is not sufficient to check whether cellMap is empty, since
+ // entries are added to it during the update process.
+ return s.pendingAdditionsPos == 0
+}
+
+// isShapeBeingRemoved reports if the shape with the given ID is currently slated for removal.
+func (s *ShapeIndex) isShapeBeingRemoved(shapeID int32) bool {
+ // All shape ids being removed fall below the index position of shapes being added.
+ return shapeID < s.pendingAdditionsPos
+}
+
+// maybeApplyUpdates checks if the index pieces have changed, and if so, applies pending updates.
+func (s *ShapeIndex) maybeApplyUpdates() {
+ // TODO(roberts): To avoid acquiring and releasing the mutex on every
+ // query, we should use atomic operations when testing whether the status
+ // is fresh and when updating the status to be fresh. This guarantees
+ // that any thread that sees a status of fresh will also see the
+ // corresponding index updates.
+ if atomic.LoadInt32(&s.status) != fresh {
+ s.mu.Lock()
+ s.applyUpdatesInternal()
+ atomic.StoreInt32(&s.status, fresh)
+ s.mu.Unlock()
+ }
+}
+
+// applyUpdatesInternal does the actual work of updating the index by applying all
+// pending additions and removals. It does *not* update the indexes status.
+func (s *ShapeIndex) applyUpdatesInternal() {
+ // TODO(roberts): Building the index can use up to 20x as much memory per
+ // edge as the final index memory size. If this causes issues, add in
+ // batched updating to limit the amount of items per batch to a
+ // configurable memory footprint overhead.
+ t := newTracker()
+
+ // allEdges maps a Face to a collection of faceEdges.
+ allEdges := make([][]faceEdge, 6)
+
+ for _, p := range s.pendingRemovals {
+ s.removeShapeInternal(p, allEdges, t)
+ }
+
+ for id := s.pendingAdditionsPos; id < int32(len(s.shapes)); id++ {
+ s.addShapeInternal(id, allEdges, t)
+ }
+
+ for face := 0; face < 6; face++ {
+ s.updateFaceEdges(face, allEdges[face], t)
+ }
+
+ s.pendingRemovals = s.pendingRemovals[:0]
+ s.pendingAdditionsPos = int32(len(s.shapes))
+ // It is the caller's responsibility to update the index status.
+}
+
+// addShapeInternal clips all edges of the given shape to the six cube faces,
+// adds the clipped edges to the set of allEdges, and starts tracking its
+// interior if necessary.
+func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *tracker) {
+ shape, ok := s.shapes[shapeID]
+ if !ok {
+ // This shape has already been removed.
+ return
+ }
+
+ faceEdge := faceEdge{
+ shapeID: shapeID,
+ hasInterior: shape.Dimension() == 2,
+ }
+
+ if faceEdge.hasInterior {
+ t.addShape(shapeID, containsBruteForce(shape, t.focus()))
+ }
+
+ numEdges := shape.NumEdges()
+ for e := 0; e < numEdges; e++ {
+ edge := shape.Edge(e)
+
+ faceEdge.edgeID = e
+ faceEdge.edge = edge
+ faceEdge.maxLevel = maxLevelForEdge(edge)
+ s.addFaceEdge(faceEdge, allEdges)
+ }
+}
+
+// addFaceEdge adds the given faceEdge into the collection of all edges.
+func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) {
+ aFace := face(fe.edge.V0.Vector)
+ // See if both endpoints are on the same face, and are far enough from
+ // the edge of the face that they don't intersect any (padded) adjacent face.
+ if aFace == face(fe.edge.V1.Vector) {
+ x, y := validFaceXYZToUV(aFace, fe.edge.V0.Vector)
+ fe.a = r2.Point{x, y}
+ x, y = validFaceXYZToUV(aFace, fe.edge.V1.Vector)
+ fe.b = r2.Point{x, y}
+
+ maxUV := 1 - cellPadding
+ if math.Abs(fe.a.X) <= maxUV && math.Abs(fe.a.Y) <= maxUV &&
+ math.Abs(fe.b.X) <= maxUV && math.Abs(fe.b.Y) <= maxUV {
+ allEdges[aFace] = append(allEdges[aFace], fe)
+ return
+ }
+ }
+
+ // Otherwise, we simply clip the edge to all six faces.
+ for face := 0; face < 6; face++ {
+ if aClip, bClip, intersects := ClipToPaddedFace(fe.edge.V0, fe.edge.V1, face, cellPadding); intersects {
+ fe.a = aClip
+ fe.b = bClip
+ allEdges[face] = append(allEdges[face], fe)
+ }
+ }
+}
+
+// updateFaceEdges adds or removes the various edges from the index.
+// An edge is added if shapes[id] is not nil, and removed otherwise.
+func (s *ShapeIndex) updateFaceEdges(face int, faceEdges []faceEdge, t *tracker) {
+ numEdges := len(faceEdges)
+ if numEdges == 0 && len(t.shapeIDs) == 0 {
+ return
+ }
+
+ // Create the initial clippedEdge for each faceEdge. Additional clipped
+ // edges are created when edges are split between child cells. We create
+ // two arrays, one containing the edge data and another containing pointers
+ // to those edges, so that during the recursion we only need to copy
+ // pointers in order to propagate an edge to the correct child.
+ clippedEdges := make([]*clippedEdge, numEdges)
+ bound := r2.EmptyRect()
+ for e := 0; e < numEdges; e++ {
+ clipped := &clippedEdge{
+ faceEdge: &faceEdges[e],
+ }
+ clipped.bound = r2.RectFromPoints(faceEdges[e].a, faceEdges[e].b)
+ clippedEdges[e] = clipped
+ bound = bound.AddRect(clipped.bound)
+ }
+
+ // Construct the initial face cell containing all the edges, and then update
+ // all the edges in the index recursively.
+ faceID := CellIDFromFace(face)
+ pcell := PaddedCellFromCellID(faceID, cellPadding)
+
+ disjointFromIndex := s.isFirstUpdate()
+ if numEdges > 0 {
+ shrunkID := s.shrinkToFit(pcell, bound)
+ if shrunkID != pcell.id {
+ // All the edges are contained by some descendant of the face cell. We
+ // can save a lot of work by starting directly with that cell, but if we
+ // are in the interior of at least one shape then we need to create
+ // index entries for the cells we are skipping over.
+ s.skipCellRange(faceID.RangeMin(), shrunkID.RangeMin(), t, disjointFromIndex)
+ pcell = PaddedCellFromCellID(shrunkID, cellPadding)
+ s.updateEdges(pcell, clippedEdges, t, disjointFromIndex)
+ s.skipCellRange(shrunkID.RangeMax().Next(), faceID.RangeMax().Next(), t, disjointFromIndex)
+ return
+ }
+ }
+
+ // Otherwise (no edges, or no shrinking is possible), subdivide normally.
+ s.updateEdges(pcell, clippedEdges, t, disjointFromIndex)
+}
+
+// shrinkToFit shrinks the PaddedCell to fit within the given bounds.
+func (s *ShapeIndex) shrinkToFit(pcell *PaddedCell, bound r2.Rect) CellID {
+ shrunkID := pcell.ShrinkToFit(bound)
+
+ if !s.isFirstUpdate() && shrunkID != pcell.CellID() {
+ // Don't shrink any smaller than the existing index cells, since we need
+ // to combine the new edges with those cells.
+ iter := s.Iterator()
+ if iter.LocateCellID(shrunkID) == Indexed {
+ shrunkID = iter.CellID()
+ }
+ }
+ return shrunkID
+}
+
+// skipCellRange skips over the cells in the given range, creating index cells if we are
+// currently in the interior of at least one shape.
+func (s *ShapeIndex) skipCellRange(begin, end CellID, t *tracker, disjointFromIndex bool) {
+ // If we aren't in the interior of a shape, then skipping over cells is easy.
+ if len(t.shapeIDs) == 0 {
+ return
+ }
+
+ // Otherwise generate the list of cell ids that we need to visit, and create
+ // an index entry for each one.
+ skipped := CellUnionFromRange(begin, end)
+ for _, cell := range skipped {
+ var clippedEdges []*clippedEdge
+ s.updateEdges(PaddedCellFromCellID(cell, cellPadding), clippedEdges, t, disjointFromIndex)
+ }
+}
+
+// updateEdges adds or removes the given edges whose bounding boxes intersect a
+// given cell. disjointFromIndex is an optimization hint indicating that cellMap
+// does not contain any entries that overlap the given cell.
+func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tracker, disjointFromIndex bool) {
+ // This function is recursive with a maximum recursion depth of 30 (maxLevel).
+
+ // Incremental updates are handled as follows. All edges being added or
+ // removed are combined together in edges, and all shapes with interiors
+ // are tracked using tracker. We subdivide recursively as usual until we
+ // encounter an existing index cell. At this point we absorb the index
+ // cell as follows:
+ //
+ // - Edges and shapes that are being removed are deleted from edges and
+ // tracker.
+ // - All remaining edges and shapes from the index cell are added to
+ // edges and tracker.
+ // - Continue subdividing recursively, creating new index cells as needed.
+ // - When the recursion gets back to the cell that was absorbed, we
+ // restore edges and tracker to their previous state.
+ //
+ // Note that the only reason that we include removed shapes in the recursive
+ // subdivision process is so that we can find all of the index cells that
+ // contain those shapes efficiently, without maintaining an explicit list of
+ // index cells for each shape (which would be expensive in terms of memory).
+ indexCellAbsorbed := false
+ if !disjointFromIndex {
+ // There may be existing index cells contained inside pcell. If we
+ // encounter such a cell, we need to combine the edges being updated with
+ // the existing cell contents by absorbing the cell.
+ iter := s.Iterator()
+ r := iter.LocateCellID(pcell.id)
+ if r == Disjoint {
+ disjointFromIndex = true
+ } else if r == Indexed {
+ // Absorb the index cell by transferring its contents to edges and
+ // deleting it. We also start tracking the interior of any new shapes.
+ s.absorbIndexCell(pcell, iter, edges, t)
+ indexCellAbsorbed = true
+ disjointFromIndex = true
+ } else {
+ // DCHECK_EQ(SUBDIVIDED, r)
+ }
+ }
+
+ // If there are existing index cells below us, then we need to keep
+ // subdividing so that we can merge with those cells. Otherwise,
+ // makeIndexCell checks if the number of edges is small enough, and creates
+ // an index cell if possible (returning true when it does so).
+ if !disjointFromIndex || !s.makeIndexCell(pcell, edges, t) {
+ // TODO(roberts): If it turns out to have memory problems when there
+ // are 10M+ edges in the index, look into pre-allocating space so we
+ // are not always appending.
+ childEdges := [2][2][]*clippedEdge{} // [i][j]
+
+ // Compute the middle of the padded cell, defined as the rectangle in
+ // (u,v)-space that belongs to all four (padded) children. By comparing
+ // against the four boundaries of middle we can determine which children
+ // each edge needs to be propagated to.
+ middle := pcell.Middle()
+
+ // Build up a vector edges to be passed to each child cell. The (i,j)
+ // directions are left (i=0), right (i=1), lower (j=0), and upper (j=1).
+ // Note that the vast majority of edges are propagated to a single child.
+ for _, edge := range edges {
+ if edge.bound.X.Hi <= middle.X.Lo {
+ // Edge is entirely contained in the two left children.
+ a, b := s.clipVAxis(edge, middle.Y)
+ if a != nil {
+ childEdges[0][0] = append(childEdges[0][0], a)
+ }
+ if b != nil {
+ childEdges[0][1] = append(childEdges[0][1], b)
+ }
+ } else if edge.bound.X.Lo >= middle.X.Hi {
+ // Edge is entirely contained in the two right children.
+ a, b := s.clipVAxis(edge, middle.Y)
+ if a != nil {
+ childEdges[1][0] = append(childEdges[1][0], a)
+ }
+ if b != nil {
+ childEdges[1][1] = append(childEdges[1][1], b)
+ }
+ } else if edge.bound.Y.Hi <= middle.Y.Lo {
+ // Edge is entirely contained in the two lower children.
+ if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil {
+ childEdges[0][0] = append(childEdges[0][0], a)
+ }
+ if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil {
+ childEdges[1][0] = append(childEdges[1][0], b)
+ }
+ } else if edge.bound.Y.Lo >= middle.Y.Hi {
+ // Edge is entirely contained in the two upper children.
+ if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil {
+ childEdges[0][1] = append(childEdges[0][1], a)
+ }
+ if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil {
+ childEdges[1][1] = append(childEdges[1][1], b)
+ }
+ } else {
+ // The edge bound spans all four children. The edge
+ // itself intersects either three or four padded children.
+ left := s.clipUBound(edge, 1, middle.X.Hi)
+ a, b := s.clipVAxis(left, middle.Y)
+ if a != nil {
+ childEdges[0][0] = append(childEdges[0][0], a)
+ }
+ if b != nil {
+ childEdges[0][1] = append(childEdges[0][1], b)
+ }
+ right := s.clipUBound(edge, 0, middle.X.Lo)
+ a, b = s.clipVAxis(right, middle.Y)
+ if a != nil {
+ childEdges[1][0] = append(childEdges[1][0], a)
+ }
+ if b != nil {
+ childEdges[1][1] = append(childEdges[1][1], b)
+ }
+ }
+ }
+
+ // Now recursively update the edges in each child. We call the children in
+ // increasing order of CellID so that when the index is first constructed,
+ // all insertions into cellMap are at the end (which is much faster).
+ for pos := 0; pos < 4; pos++ {
+ i, j := pcell.ChildIJ(pos)
+ if len(childEdges[i][j]) > 0 || len(t.shapeIDs) > 0 {
+ s.updateEdges(PaddedCellFromParentIJ(pcell, i, j), childEdges[i][j],
+ t, disjointFromIndex)
+ }
+ }
+ }
+
+ if indexCellAbsorbed {
+ // Restore the state for any edges being removed that we are tracking.
+ t.restoreStateBefore(s.pendingAdditionsPos)
+ }
+}
+
+// makeIndexCell builds an indexCell from the given padded cell and set of edges and adds
+// it to the index. If the cell or edges are empty, no cell is added.
+func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *tracker) bool {
+ // If the cell is empty, no index cell is needed. (In most cases this
+ // situation is detected before we get to this point, but this can happen
+ // when all shapes in a cell are removed.)
+ if len(edges) == 0 && len(t.shapeIDs) == 0 {
+ return true
+ }
+
+ // Count the number of edges that have not reached their maximum level yet.
+ // Return false if there are too many such edges.
+ count := 0
+ for _, ce := range edges {
+ if p.Level() < ce.faceEdge.maxLevel {
+ count++
+ }
+
+ if count > s.maxEdgesPerCell {
+ return false
+ }
+ }
+
+ // Possible optimization: Continue subdividing as long as exactly one child
+ // of the padded cell intersects the given edges. This can be done by finding
+ // the bounding box of all the edges and calling ShrinkToFit:
+ //
+ // cellID = p.ShrinkToFit(RectBound(edges));
+ //
+ // Currently this is not beneficial; it slows down construction by 4-25%
+ // (mainly computing the union of the bounding rectangles) and also slows
+ // down queries (since more recursive clipping is required to get down to
+ // the level of a spatial index cell). But it may be worth trying again
+ // once containsCenter is computed and all algorithms are modified to
+ // take advantage of it.
+
+ // We update the InteriorTracker as follows. For every Cell in the index
+ // we construct two edges: one edge from entry vertex of the cell to its
+ // center, and one from the cell center to its exit vertex. Here entry
+ // and exit refer the CellID ordering, i.e. the order in which points
+ // are encountered along the 2 space-filling curve. The exit vertex then
+ // becomes the entry vertex for the next cell in the index, unless there are
+ // one or more empty intervening cells, in which case the InteriorTracker
+ // state is unchanged because the intervening cells have no edges.
+
+ // Shift the InteriorTracker focus point to the center of the current cell.
+ if t.isActive && len(edges) != 0 {
+ if !t.atCellID(p.id) {
+ t.moveTo(p.EntryVertex())
+ }
+ t.drawTo(p.Center())
+ s.testAllEdges(edges, t)
+ }
+
+ // Allocate and fill a new index cell. To get the total number of shapes we
+ // need to merge the shapes associated with the intersecting edges together
+ // with the shapes that happen to contain the cell center.
+ cshapeIDs := t.shapeIDs
+ numShapes := s.countShapes(edges, cshapeIDs)
+ cell := NewShapeIndexCell(numShapes)
+
+ // To fill the index cell we merge the two sources of shapes: edge shapes
+ // (those that have at least one edge that intersects this cell), and
+ // containing shapes (those that contain the cell center). We keep track
+ // of the index of the next intersecting edge and the next containing shape
+ // as we go along. Both sets of shape ids are already sorted.
+ eNext := 0
+ cNextIdx := 0
+ for i := 0; i < numShapes; i++ {
+ var clipped *clippedShape
+ // advance to next value base + i
+ eshapeID := int32(s.Len())
+ cshapeID := eshapeID // Sentinels
+
+ if eNext != len(edges) {
+ eshapeID = edges[eNext].faceEdge.shapeID
+ }
+ if cNextIdx < len(cshapeIDs) {
+ cshapeID = cshapeIDs[cNextIdx]
+ }
+ eBegin := eNext
+ if cshapeID < eshapeID {
+ // The entire cell is in the shape interior.
+ clipped = newClippedShape(cshapeID, 0)
+ clipped.containsCenter = true
+ cNextIdx++
+ } else {
+ // Count the number of edges for this shape and allocate space for them.
+ for eNext < len(edges) && edges[eNext].faceEdge.shapeID == eshapeID {
+ eNext++
+ }
+ clipped = newClippedShape(eshapeID, eNext-eBegin)
+ for e := eBegin; e < eNext; e++ {
+ clipped.edges[e-eBegin] = edges[e].faceEdge.edgeID
+ }
+ if cshapeID == eshapeID {
+ clipped.containsCenter = true
+ cNextIdx++
+ }
+ }
+ cell.shapes[i] = clipped
+ }
+
+ // Add this cell to the map.
+ s.cellMap[p.id] = cell
+ s.cells = append(s.cells, p.id)
+
+ // Shift the tracker focus point to the exit vertex of this cell.
+ if t.isActive && len(edges) != 0 {
+ t.drawTo(p.ExitVertex())
+ s.testAllEdges(edges, t)
+ t.setNextCellID(p.id.Next())
+ }
+ return true
+}
+
+// updateBound updates the specified endpoint of the given clipped edge and returns the
+// resulting clipped edge.
+func (s *ShapeIndex) updateBound(edge *clippedEdge, uEnd int, u float64, vEnd int, v float64) *clippedEdge {
+ c := &clippedEdge{faceEdge: edge.faceEdge}
+ if uEnd == 0 {
+ c.bound.X.Lo = u
+ c.bound.X.Hi = edge.bound.X.Hi
+ } else {
+ c.bound.X.Lo = edge.bound.X.Lo
+ c.bound.X.Hi = u
+ }
+
+ if vEnd == 0 {
+ c.bound.Y.Lo = v
+ c.bound.Y.Hi = edge.bound.Y.Hi
+ } else {
+ c.bound.Y.Lo = edge.bound.Y.Lo
+ c.bound.Y.Hi = v
+ }
+
+ return c
+}
+
+// clipUBound clips the given endpoint (lo=0, hi=1) of the u-axis so that
+// it does not extend past the given value of the given edge.
+func (s *ShapeIndex) clipUBound(edge *clippedEdge, uEnd int, u float64) *clippedEdge {
+ // First check whether the edge actually requires any clipping. (Sometimes
+ // this method is called when clipping is not necessary, e.g. when one edge
+ // endpoint is in the overlap area between two padded child cells.)
+ if uEnd == 0 {
+ if edge.bound.X.Lo >= u {
+ return edge
+ }
+ } else {
+ if edge.bound.X.Hi <= u {
+ return edge
+ }
+ }
+ // We interpolate the new v-value from the endpoints of the original edge.
+ // This has two advantages: (1) we don't need to store the clipped endpoints
+ // at all, just their bounding box; and (2) it avoids the accumulation of
+ // roundoff errors due to repeated interpolations. The result needs to be
+ // clamped to ensure that it is in the appropriate range.
+ e := edge.faceEdge
+ v := edge.bound.Y.ClampPoint(interpolateFloat64(u, e.a.X, e.b.X, e.a.Y, e.b.Y))
+
+ // Determine which endpoint of the v-axis bound to update. If the edge
+ // slope is positive we update the same endpoint, otherwise we update the
+ // opposite endpoint.
+ var vEnd int
+ positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y)
+ if (uEnd == 1) == positiveSlope {
+ vEnd = 1
+ }
+ return s.updateBound(edge, uEnd, u, vEnd, v)
+}
+
+// clipVBound clips the given endpoint (lo=0, hi=1) of the v-axis so that
+// it does not extend past the given value of the given edge.
+func (s *ShapeIndex) clipVBound(edge *clippedEdge, vEnd int, v float64) *clippedEdge {
+ if vEnd == 0 {
+ if edge.bound.Y.Lo >= v {
+ return edge
+ }
+ } else {
+ if edge.bound.Y.Hi <= v {
+ return edge
+ }
+ }
+
+ // We interpolate the new v-value from the endpoints of the original edge.
+ // This has two advantages: (1) we don't need to store the clipped endpoints
+ // at all, just their bounding box; and (2) it avoids the accumulation of
+ // roundoff errors due to repeated interpolations. The result needs to be
+ // clamped to ensure that it is in the appropriate range.
+ e := edge.faceEdge
+ u := edge.bound.X.ClampPoint(interpolateFloat64(v, e.a.Y, e.b.Y, e.a.X, e.b.X))
+
+ // Determine which endpoint of the v-axis bound to update. If the edge
+ // slope is positive we update the same endpoint, otherwise we update the
+ // opposite endpoint.
+ var uEnd int
+ positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y)
+ if (vEnd == 1) == positiveSlope {
+ uEnd = 1
+ }
+ return s.updateBound(edge, uEnd, u, vEnd, v)
+}
+
+// cliupVAxis returns the given edge clipped to within the boundaries of the middle
+// interval along the v-axis, and adds the result to its children.
+func (s *ShapeIndex) clipVAxis(edge *clippedEdge, middle r1.Interval) (a, b *clippedEdge) {
+ if edge.bound.Y.Hi <= middle.Lo {
+ // Edge is entirely contained in the lower child.
+ return edge, nil
+ } else if edge.bound.Y.Lo >= middle.Hi {
+ // Edge is entirely contained in the upper child.
+ return nil, edge
+ }
+ // The edge bound spans both children.
+ return s.clipVBound(edge, 1, middle.Hi), s.clipVBound(edge, 0, middle.Lo)
+}
+
+// absorbIndexCell absorbs an index cell by transferring its contents to edges
+// and/or "tracker", and then delete this cell from the index. If edges includes
+// any edges that are being removed, this method also updates their
+// InteriorTracker state to correspond to the exit vertex of this cell.
+func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, edges []*clippedEdge, t *tracker) {
+ // When we absorb a cell, we erase all the edges that are being removed.
+ // However when we are finished with this cell, we want to restore the state
+ // of those edges (since that is how we find all the index cells that need
+ // to be updated). The edges themselves are restored automatically when
+ // UpdateEdges returns from its recursive call, but the InteriorTracker
+ // state needs to be restored explicitly.
+ //
+ // Here we first update the InteriorTracker state for removed edges to
+ // correspond to the exit vertex of this cell, and then save the
+ // InteriorTracker state. This state will be restored by UpdateEdges when
+ // it is finished processing the contents of this cell.
+ if t.isActive && len(edges) != 0 && s.isShapeBeingRemoved(edges[0].faceEdge.shapeID) {
+ // We probably need to update the tracker. ("Probably" because
+ // it's possible that all shapes being removed do not have interiors.)
+ if !t.atCellID(p.id) {
+ t.moveTo(p.EntryVertex())
+ }
+ t.drawTo(p.ExitVertex())
+ t.setNextCellID(p.id.Next())
+ for _, edge := range edges {
+ fe := edge.faceEdge
+ if !s.isShapeBeingRemoved(fe.shapeID) {
+ break // All shapes being removed come first.
+ }
+ if fe.hasInterior {
+ t.testEdge(fe.shapeID, fe.edge)
+ }
+ }
+ }
+
+ // Save the state of the edges being removed, so that it can be restored
+ // when we are finished processing this cell and its children. We don't
+ // need to save the state of the edges being added because they aren't being
+ // removed from "edges" and will therefore be updated normally as we visit
+ // this cell and its children.
+ t.saveAndClearStateBefore(s.pendingAdditionsPos)
+
+ // Create a faceEdge for each edge in this cell that isn't being removed.
+ var faceEdges []*faceEdge
+ trackerMoved := false
+
+ cell := iter.IndexCell()
+ for _, clipped := range cell.shapes {
+ shapeID := clipped.shapeID
+ shape := s.Shape(shapeID)
+ if shape == nil {
+ continue // This shape is being removed.
+ }
+
+ numClipped := clipped.numEdges()
+
+ // If this shape has an interior, start tracking whether we are inside the
+ // shape. updateEdges wants to know whether the entry vertex of this
+ // cell is inside the shape, but we only know whether the center of the
+ // cell is inside the shape, so we need to test all the edges against the
+ // line segment from the cell center to the entry vertex.
+ edge := &faceEdge{
+ shapeID: shapeID,
+ hasInterior: shape.Dimension() == 2,
+ }
+
+ if edge.hasInterior {
+ t.addShape(shapeID, clipped.containsCenter)
+ // There might not be any edges in this entire cell (i.e., it might be
+ // in the interior of all shapes), so we delay updating the tracker
+ // until we see the first edge.
+ if !trackerMoved && numClipped > 0 {
+ t.moveTo(p.Center())
+ t.drawTo(p.EntryVertex())
+ t.setNextCellID(p.id)
+ trackerMoved = true
+ }
+ }
+ for i := 0; i < numClipped; i++ {
+ edgeID := clipped.edges[i]
+ edge.edgeID = edgeID
+ edge.edge = shape.Edge(edgeID)
+ edge.maxLevel = maxLevelForEdge(edge.edge)
+ if edge.hasInterior {
+ t.testEdge(shapeID, edge.edge)
+ }
+ var ok bool
+ edge.a, edge.b, ok = ClipToPaddedFace(edge.edge.V0, edge.edge.V1, p.id.Face(), cellPadding)
+ if !ok {
+ panic("invariant failure in ShapeIndex")
+ }
+ faceEdges = append(faceEdges, edge)
+ }
+ }
+ // Now create a clippedEdge for each faceEdge, and put them in "new_edges".
+ var newEdges []*clippedEdge
+ for _, faceEdge := range faceEdges {
+ clipped := &clippedEdge{
+ faceEdge: faceEdge,
+ bound: clippedEdgeBound(faceEdge.a, faceEdge.b, p.bound),
+ }
+ newEdges = append(newEdges, clipped)
+ }
+
+ // Discard any edges from "edges" that are being removed, and append the
+ // remainder to "newEdges" (This keeps the edges sorted by shape id.)
+ for i, clipped := range edges {
+ if !s.isShapeBeingRemoved(clipped.faceEdge.shapeID) {
+ newEdges = append(newEdges, edges[i:]...)
+ break
+ }
+ }
+
+ // Update the edge list and delete this cell from the index.
+ edges, newEdges = newEdges, edges
+ delete(s.cellMap, p.id)
+ // TODO(roberts): delete from s.Cells
+}
+
+// testAllEdges calls the trackers testEdge on all edges from shapes that have interiors.
+func (s *ShapeIndex) testAllEdges(edges []*clippedEdge, t *tracker) {
+ for _, edge := range edges {
+ if edge.faceEdge.hasInterior {
+ t.testEdge(edge.faceEdge.shapeID, edge.faceEdge.edge)
+ }
+ }
+}
+
+// countShapes reports the number of distinct shapes that are either associated with the
+// given edges, or that are currently stored in the InteriorTracker.
+func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int {
+ count := 0
+ lastShapeID := int32(-1)
+
+ // next clipped shape id in the shapeIDs list.
+ clippedNext := int32(0)
+ // index of the current element in the shapeIDs list.
+ shapeIDidx := 0
+ for _, edge := range edges {
+ if edge.faceEdge.shapeID == lastShapeID {
+ continue
+ }
+
+ count++
+ lastShapeID = edge.faceEdge.shapeID
+
+ // Skip over any containing shapes up to and including this one,
+ // updating count as appropriate.
+ for ; shapeIDidx < len(shapeIDs); shapeIDidx++ {
+ clippedNext = shapeIDs[shapeIDidx]
+ if clippedNext > lastShapeID {
+ break
+ }
+ if clippedNext < lastShapeID {
+ count++
+ }
+ }
+ }
+
+ // Count any remaining containing shapes.
+ count += len(shapeIDs) - shapeIDidx
+ return count
+}
+
+// maxLevelForEdge reports the maximum level for a given edge.
+func maxLevelForEdge(edge Edge) int {
+ // Compute the maximum cell size for which this edge is considered long.
+ // The calculation does not need to be perfectly accurate, so we use Norm
+ // rather than Angle for speed.
+ cellSize := edge.V0.Sub(edge.V1.Vector).Norm() * cellSizeToLongEdgeRatio
+ // Now return the first level encountered during subdivision where the
+ // average cell size is at most cellSize.
+ return AvgEdgeMetric.MinLevel(cellSize)
+}
+
+// removeShapeInternal does the actual work for removing a given shape from the index.
+func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) {
+ // TODO(roberts): finish the implementation of this.
+}
diff --git a/vendor/github.com/golang/geo/s2/shapeutil.go b/vendor/github.com/golang/geo/s2/shapeutil.go
new file mode 100644
index 000000000..64245dfa1
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/shapeutil.go
@@ -0,0 +1,228 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// CrossingType defines different ways of reporting edge intersections.
+type CrossingType int
+
+const (
+ // CrossingTypeInterior reports intersections that occur at a point
+ // interior to both edges (i.e., not at a vertex).
+ CrossingTypeInterior CrossingType = iota
+
+ // CrossingTypeAll reports all intersections, even those where two edges
+ // intersect only because they share a common vertex.
+ CrossingTypeAll
+
+ // CrossingTypeNonAdjacent reports all intersections except for pairs of
+ // the form (AB, BC) where both edges are from the same ShapeIndex.
+ CrossingTypeNonAdjacent
+)
+
+// rangeIterator is a wrapper over ShapeIndexIterator with extra methods
+// that are useful for merging the contents of two or more ShapeIndexes.
+type rangeIterator struct {
+ it *ShapeIndexIterator
+ // The min and max leaf cell ids covered by the current cell. If done() is
+ // true, these methods return a value larger than any valid cell id.
+ rangeMin CellID
+ rangeMax CellID
+}
+
+// newRangeIterator creates a new rangeIterator positioned at the first cell of the given index.
+func newRangeIterator(index *ShapeIndex) *rangeIterator {
+ r := &rangeIterator{
+ it: index.Iterator(),
+ }
+ r.refresh()
+ return r
+}
+
+func (r *rangeIterator) cellID() CellID { return r.it.CellID() }
+func (r *rangeIterator) indexCell() *ShapeIndexCell { return r.it.IndexCell() }
+func (r *rangeIterator) next() { r.it.Next(); r.refresh() }
+func (r *rangeIterator) done() bool { return r.it.Done() }
+
+// seekTo positions the iterator at the first cell that overlaps or follows
+// the current range minimum of the target iterator, i.e. such that its
+// rangeMax >= target.rangeMin.
+func (r *rangeIterator) seekTo(target *rangeIterator) {
+ r.it.seek(target.rangeMin)
+ // If the current cell does not overlap target, it is possible that the
+ // previous cell is the one we are looking for. This can only happen when
+ // the previous cell contains target but has a smaller CellID.
+ if r.it.Done() || r.it.CellID().RangeMin() > target.rangeMax {
+ if r.it.Prev() && r.it.CellID().RangeMax() < target.cellID() {
+ r.it.Next()
+ }
+ }
+ r.refresh()
+}
+
+// seekBeyond positions the iterator at the first cell that follows the current
+// range minimum of the target iterator. i.e. the first cell such that its
+// rangeMin > target.rangeMax.
+func (r *rangeIterator) seekBeyond(target *rangeIterator) {
+ r.it.seek(target.rangeMax.Next())
+ if !r.it.Done() && r.it.CellID().RangeMin() <= target.rangeMax {
+ r.it.Next()
+ }
+ r.refresh()
+}
+
+// refresh updates the iterators min and max values.
+func (r *rangeIterator) refresh() {
+ r.rangeMin = r.cellID().RangeMin()
+ r.rangeMax = r.cellID().RangeMax()
+}
+
+// referencePointForShape is a helper function for implementing various Shapes
+// ReferencePoint functions.
+//
+// Given a shape consisting of closed polygonal loops, the interior of the
+// shape is defined as the region to the left of all edges (which must be
+// oriented consistently). This function then chooses an arbitrary point and
+// returns true if that point is contained by the shape.
+//
+// Unlike Loop and Polygon, this method allows duplicate vertices and
+// edges, which requires some extra care with definitions. The rule that we
+// apply is that an edge and its reverse edge cancel each other: the result
+// is the same as if that edge pair were not present. Therefore shapes that
+// consist only of degenerate loop(s) are either empty or full; by convention,
+// the shape is considered full if and only if it contains an empty loop (see
+// laxPolygon for details).
+//
+// Determining whether a loop on the sphere contains a point is harder than
+// the corresponding problem in 2D plane geometry. It cannot be implemented
+// just by counting edge crossings because there is no such thing as a point
+// at infinity that is guaranteed to be outside the loop.
+//
+// This function requires that the given Shape have an interior.
+func referencePointForShape(shape Shape) ReferencePoint {
+ if shape.NumEdges() == 0 {
+ // A shape with no edges is defined to be full if and only if it
+ // contains at least one chain.
+ return OriginReferencePoint(shape.NumChains() > 0)
+ }
+ // Define a "matched" edge as one that can be paired with a corresponding
+ // reversed edge. Define a vertex as "balanced" if all of its edges are
+ // matched. In order to determine containment, we must find an unbalanced
+ // vertex. Often every vertex is unbalanced, so we start by trying an
+ // arbitrary vertex.
+ edge := shape.Edge(0)
+
+ if ref, ok := referencePointAtVertex(shape, edge.V0); ok {
+ return ref
+ }
+
+ // That didn't work, so now we do some extra work to find an unbalanced
+ // vertex (if any). Essentially we gather a list of edges and a list of
+ // reversed edges, and then sort them. The first edge that appears in one
+ // list but not the other is guaranteed to be unmatched.
+ n := shape.NumEdges()
+ var edges = make([]Edge, n)
+ var revEdges = make([]Edge, n)
+ for i := 0; i < n; i++ {
+ edge := shape.Edge(i)
+ edges[i] = edge
+ revEdges[i] = Edge{V0: edge.V1, V1: edge.V0}
+ }
+
+ sortEdges(edges)
+ sortEdges(revEdges)
+
+ for i := 0; i < n; i++ {
+ if edges[i].Cmp(revEdges[i]) == -1 { // edges[i] is unmatched
+ if ref, ok := referencePointAtVertex(shape, edges[i].V0); ok {
+ return ref
+ }
+ }
+ if revEdges[i].Cmp(edges[i]) == -1 { // revEdges[i] is unmatched
+ if ref, ok := referencePointAtVertex(shape, revEdges[i].V0); ok {
+ return ref
+ }
+ }
+ }
+
+ // All vertices are balanced, so this polygon is either empty or full except
+ // for degeneracies. By convention it is defined to be full if it contains
+ // any chain with no edges.
+ for i := 0; i < shape.NumChains(); i++ {
+ if shape.Chain(i).Length == 0 {
+ return OriginReferencePoint(true)
+ }
+ }
+
+ return OriginReferencePoint(false)
+}
+
+// referencePointAtVertex reports whether the given vertex is unbalanced, and
+// returns a ReferencePoint indicating if the point is contained.
+// Otherwise returns false.
+func referencePointAtVertex(shape Shape, vTest Point) (ReferencePoint, bool) {
+ var ref ReferencePoint
+
+ // Let P be an unbalanced vertex. Vertex P is defined to be inside the
+ // region if the region contains a particular direction vector starting from
+ // P, namely the direction p.Ortho(). This can be calculated using
+ // ContainsVertexQuery.
+
+ containsQuery := NewContainsVertexQuery(vTest)
+ n := shape.NumEdges()
+ for e := 0; e < n; e++ {
+ edge := shape.Edge(e)
+ if edge.V0 == vTest {
+ containsQuery.AddEdge(edge.V1, 1)
+ }
+ if edge.V1 == vTest {
+ containsQuery.AddEdge(edge.V0, -1)
+ }
+ }
+ containsSign := containsQuery.ContainsVertex()
+ if containsSign == 0 {
+ return ref, false // There are no unmatched edges incident to this vertex.
+ }
+ ref.Point = vTest
+ ref.Contained = containsSign > 0
+
+ return ref, true
+}
+
+// containsBruteForce reports whether the given shape contains the given point.
+// Most clients should not use this method, since its running time is linear in
+// the number of shape edges. Instead clients should create a ShapeIndex and use
+// ContainsPointQuery, since this strategy is much more efficient when many
+// points need to be tested.
+//
+// Polygon boundaries are treated as being semi-open (see ContainsPointQuery
+// and VertexModel for other options).
+func containsBruteForce(shape Shape, point Point) bool {
+ if shape.Dimension() != 2 {
+ return false
+ }
+
+ refPoint := shape.ReferencePoint()
+ if refPoint.Point == point {
+ return refPoint.Contained
+ }
+
+ crosser := NewEdgeCrosser(refPoint.Point, point)
+ inside := refPoint.Contained
+ for e := 0; e < shape.NumEdges(); e++ {
+ edge := shape.Edge(e)
+ inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1)
+ }
+ return inside
+}
diff --git a/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go b/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go
new file mode 100644
index 000000000..2a0d82361
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go
@@ -0,0 +1,72 @@
+// Copyright 2020 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// EdgeIterator is an iterator that advances through all edges in an ShapeIndex.
+// This is different to the ShapeIndexIterator, which advances through the cells in the
+// ShapeIndex.
+type EdgeIterator struct {
+ index *ShapeIndex
+ shapeID int32
+ numEdges int32
+ edgeID int32
+}
+
+// NewEdgeIterator creates a new edge iterator for the given index.
+func NewEdgeIterator(index *ShapeIndex) *EdgeIterator {
+ e := &EdgeIterator{
+ index: index,
+ shapeID: -1,
+ edgeID: -1,
+ }
+
+ e.Next()
+ return e
+}
+
+// ShapeID returns the current shape ID.
+func (e *EdgeIterator) ShapeID() int32 { return e.shapeID }
+
+// EdgeID returns the current edge ID.
+func (e *EdgeIterator) EdgeID() int32 { return e.edgeID }
+
+// ShapeEdgeID returns the current (shapeID, edgeID).
+func (e *EdgeIterator) ShapeEdgeID() ShapeEdgeID { return ShapeEdgeID{e.shapeID, e.edgeID} }
+
+// Edge returns the current edge.
+func (e *EdgeIterator) Edge() Edge {
+ return e.index.Shape(e.shapeID).Edge(int(e.edgeID))
+}
+
+// Done reports if the iterator is positioned at or after the last index edge.
+func (e *EdgeIterator) Done() bool { return e.shapeID >= int32(len(e.index.shapes)) }
+
+// Next positions the iterator at the next index edge.
+func (e *EdgeIterator) Next() {
+ e.edgeID++
+ for ; e.edgeID >= e.numEdges; e.edgeID++ {
+ e.shapeID++
+ if e.shapeID >= int32(len(e.index.shapes)) {
+ break
+ }
+ shape := e.index.Shape(e.shapeID)
+ if shape == nil {
+ e.numEdges = 0
+ } else {
+ e.numEdges = int32(shape.NumEdges())
+ }
+ e.edgeID = -1
+ }
+}
diff --git a/vendor/github.com/golang/geo/s2/stuv.go b/vendor/github.com/golang/geo/s2/stuv.go
new file mode 100644
index 000000000..7663bb398
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/stuv.go
@@ -0,0 +1,427 @@
+// Copyright 2014 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r3"
+)
+
+//
+// This file contains documentation of the various coordinate systems used
+// throughout the library. Most importantly, S2 defines a framework for
+// decomposing the unit sphere into a hierarchy of "cells". Each cell is a
+// quadrilateral bounded by four geodesics. The top level of the hierarchy is
+// obtained by projecting the six faces of a cube onto the unit sphere, and
+// lower levels are obtained by subdividing each cell into four children
+// recursively. Cells are numbered such that sequentially increasing cells
+// follow a continuous space-filling curve over the entire sphere. The
+// transformation is designed to make the cells at each level fairly uniform
+// in size.
+//
+////////////////////////// S2 Cell Decomposition /////////////////////////
+//
+// The following methods define the cube-to-sphere projection used by
+// the Cell decomposition.
+//
+// In the process of converting a latitude-longitude pair to a 64-bit cell
+// id, the following coordinate systems are used:
+//
+// (id)
+// An CellID is a 64-bit encoding of a face and a Hilbert curve position
+// on that face. The Hilbert curve position implicitly encodes both the
+// position of a cell and its subdivision level (see s2cellid.go).
+//
+// (face, i, j)
+// Leaf-cell coordinates. "i" and "j" are integers in the range
+// [0,(2**30)-1] that identify a particular leaf cell on the given face.
+// The (i, j) coordinate system is right-handed on each face, and the
+// faces are oriented such that Hilbert curves connect continuously from
+// one face to the next.
+//
+// (face, s, t)
+// Cell-space coordinates. "s" and "t" are real numbers in the range
+// [0,1] that identify a point on the given face. For example, the point
+// (s, t) = (0.5, 0.5) corresponds to the center of the top-level face
+// cell. This point is also a vertex of exactly four cells at each
+// subdivision level greater than zero.
+//
+// (face, si, ti)
+// Discrete cell-space coordinates. These are obtained by multiplying
+// "s" and "t" by 2**31 and rounding to the nearest unsigned integer.
+// Discrete coordinates lie in the range [0,2**31]. This coordinate
+// system can represent the edge and center positions of all cells with
+// no loss of precision (including non-leaf cells). In binary, each
+// coordinate of a level-k cell center ends with a 1 followed by
+// (30 - k) 0s. The coordinates of its edges end with (at least)
+// (31 - k) 0s.
+//
+// (face, u, v)
+// Cube-space coordinates in the range [-1,1]. To make the cells at each
+// level more uniform in size after they are projected onto the sphere,
+// we apply a nonlinear transformation of the form u=f(s), v=f(t).
+// The (u, v) coordinates after this transformation give the actual
+// coordinates on the cube face (modulo some 90 degree rotations) before
+// it is projected onto the unit sphere.
+//
+// (face, u, v, w)
+// Per-face coordinate frame. This is an extension of the (face, u, v)
+// cube-space coordinates that adds a third axis "w" in the direction of
+// the face normal. It is always a right-handed 3D coordinate system.
+// Cube-space coordinates can be converted to this frame by setting w=1,
+// while (u,v,w) coordinates can be projected onto the cube face by
+// dividing by w, i.e. (face, u/w, v/w).
+//
+// (x, y, z)
+// Direction vector (Point). Direction vectors are not necessarily unit
+// length, and are often chosen to be points on the biunit cube
+// [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the
+// corresponding point on the unit sphere.
+//
+// (lat, lng)
+// Latitude and longitude (LatLng). Latitudes must be between -90 and
+// 90 degrees inclusive, and longitudes must be between -180 and 180
+// degrees inclusive.
+//
+// Note that the (i, j), (s, t), (si, ti), and (u, v) coordinate systems are
+// right-handed on all six faces.
+//
+//
+// There are a number of different projections from cell-space (s,t) to
+// cube-space (u,v): linear, quadratic, and tangent. They have the following
+// tradeoffs:
+//
+// Linear - This is the fastest transformation, but also produces the least
+// uniform cell sizes. Cell areas vary by a factor of about 5.2, with the
+// largest cells at the center of each face and the smallest cells in
+// the corners.
+//
+// Tangent - Transforming the coordinates via Atan makes the cell sizes
+// more uniform. The areas vary by a maximum ratio of 1.4 as opposed to a
+// maximum ratio of 5.2. However, each call to Atan is about as expensive
+// as all of the other calculations combined when converting from points to
+// cell ids, i.e. it reduces performance by a factor of 3.
+//
+// Quadratic - This is an approximation of the tangent projection that
+// is much faster and produces cells that are almost as uniform in size.
+// It is about 3 times faster than the tangent projection for converting
+// cell ids to points or vice versa. Cell areas vary by a maximum ratio of
+// about 2.1.
+//
+// Here is a table comparing the cell uniformity using each projection. Area
+// Ratio is the maximum ratio over all subdivision levels of the largest cell
+// area to the smallest cell area at that level, Edge Ratio is the maximum
+// ratio of the longest edge of any cell to the shortest edge of any cell at
+// the same level, and Diag Ratio is the ratio of the longest diagonal of
+// any cell to the shortest diagonal of any cell at the same level.
+//
+// Area Edge Diag
+// Ratio Ratio Ratio
+// -----------------------------------
+// Linear: 5.200 2.117 2.959
+// Tangent: 1.414 1.414 1.704
+// Quadratic: 2.082 1.802 1.932
+//
+// The worst-case cell aspect ratios are about the same with all three
+// projections. The maximum ratio of the longest edge to the shortest edge
+// within the same cell is about 1.4 and the maximum ratio of the diagonals
+// within the same cell is about 1.7.
+//
+// For Go we have chosen to use only the Quadratic approach. Other language
+// implementations may offer other choices.
+
+const (
+ // maxSiTi is the maximum value of an si- or ti-coordinate.
+ // It is one shift more than maxSize. The range of valid (si,ti)
+ // values is [0..maxSiTi].
+ maxSiTi = maxSize << 1
+)
+
+// siTiToST converts an si- or ti-value to the corresponding s- or t-value.
+// Value is capped at 1.0 because there is no DCHECK in Go.
+func siTiToST(si uint32) float64 {
+ if si > maxSiTi {
+ return 1.0
+ }
+ return float64(si) / float64(maxSiTi)
+}
+
+// stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate.
+// The result may be outside the range of valid (si,ti)-values. Value of
+// 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up.
+func stToSiTi(s float64) uint32 {
+ if s < 0 {
+ return uint32(s*maxSiTi - 0.5)
+ }
+ return uint32(s*maxSiTi + 0.5)
+}
+
+// stToUV converts an s or t value to the corresponding u or v value.
+// This is a non-linear transformation from [-1,1] to [-1,1] that
+// attempts to make the cell sizes more uniform.
+// This uses what the C++ version calls 'the quadratic transform'.
+func stToUV(s float64) float64 {
+ if s >= 0.5 {
+ return (1 / 3.) * (4*s*s - 1)
+ }
+ return (1 / 3.) * (1 - 4*(1-s)*(1-s))
+}
+
+// uvToST is the inverse of the stToUV transformation. Note that it
+// is not always true that uvToST(stToUV(x)) == x due to numerical
+// errors.
+func uvToST(u float64) float64 {
+ if u >= 0 {
+ return 0.5 * math.Sqrt(1+3*u)
+ }
+ return 1 - 0.5*math.Sqrt(1-3*u)
+}
+
+// face returns face ID from 0 to 5 containing the r. For points on the
+// boundary between faces, the result is arbitrary but deterministic.
+func face(r r3.Vector) int {
+ f := r.LargestComponent()
+ switch {
+ case f == r3.XAxis && r.X < 0:
+ f += 3
+ case f == r3.YAxis && r.Y < 0:
+ f += 3
+ case f == r3.ZAxis && r.Z < 0:
+ f += 3
+ }
+ return int(f)
+}
+
+// validFaceXYZToUV given a valid face for the given point r (meaning that
+// dot product of r with the face normal is positive), returns
+// the corresponding u and v values, which may lie outside the range [-1,1].
+func validFaceXYZToUV(face int, r r3.Vector) (float64, float64) {
+ switch face {
+ case 0:
+ return r.Y / r.X, r.Z / r.X
+ case 1:
+ return -r.X / r.Y, r.Z / r.Y
+ case 2:
+ return -r.X / r.Z, -r.Y / r.Z
+ case 3:
+ return r.Z / r.X, r.Y / r.X
+ case 4:
+ return r.Z / r.Y, -r.X / r.Y
+ }
+ return -r.Y / r.Z, -r.X / r.Z
+}
+
+// xyzToFaceUV converts a direction vector (not necessarily unit length) to
+// (face, u, v) coordinates.
+func xyzToFaceUV(r r3.Vector) (f int, u, v float64) {
+ f = face(r)
+ u, v = validFaceXYZToUV(f, r)
+ return f, u, v
+}
+
+// faceUVToXYZ turns face and UV coordinates into an unnormalized 3 vector.
+func faceUVToXYZ(face int, u, v float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{1, u, v}
+ case 1:
+ return r3.Vector{-u, 1, v}
+ case 2:
+ return r3.Vector{-u, -v, 1}
+ case 3:
+ return r3.Vector{-1, -v, -u}
+ case 4:
+ return r3.Vector{v, -1, -u}
+ default:
+ return r3.Vector{v, u, -1}
+ }
+}
+
+// faceXYZToUV returns the u and v values (which may lie outside the range
+// [-1, 1]) if the dot product of the point p with the given face normal is positive.
+func faceXYZToUV(face int, p Point) (u, v float64, ok bool) {
+ switch face {
+ case 0:
+ if p.X <= 0 {
+ return 0, 0, false
+ }
+ case 1:
+ if p.Y <= 0 {
+ return 0, 0, false
+ }
+ case 2:
+ if p.Z <= 0 {
+ return 0, 0, false
+ }
+ case 3:
+ if p.X >= 0 {
+ return 0, 0, false
+ }
+ case 4:
+ if p.Y >= 0 {
+ return 0, 0, false
+ }
+ default:
+ if p.Z >= 0 {
+ return 0, 0, false
+ }
+ }
+
+ u, v = validFaceXYZToUV(face, p.Vector)
+ return u, v, true
+}
+
+// faceXYZtoUVW transforms the given point P to the (u,v,w) coordinate frame of the given
+// face where the w-axis represents the face normal.
+func faceXYZtoUVW(face int, p Point) Point {
+ // The result coordinates are simply the dot products of P with the (u,v,w)
+ // axes for the given face (see faceUVWAxes).
+ switch face {
+ case 0:
+ return Point{r3.Vector{p.Y, p.Z, p.X}}
+ case 1:
+ return Point{r3.Vector{-p.X, p.Z, p.Y}}
+ case 2:
+ return Point{r3.Vector{-p.X, -p.Y, p.Z}}
+ case 3:
+ return Point{r3.Vector{-p.Z, -p.Y, -p.X}}
+ case 4:
+ return Point{r3.Vector{-p.Z, p.X, -p.Y}}
+ default:
+ return Point{r3.Vector{p.Y, p.X, -p.Z}}
+ }
+}
+
+// faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily
+// unit length) Point on the given face.
+func faceSiTiToXYZ(face int, si, ti uint32) Point {
+ return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))}
+}
+
+// xyzToFaceSiTi transforms the (not necessarily unit length) Point to
+// (face, si, ti) coordinates and the level the Point is at.
+func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) {
+ face, u, v := xyzToFaceUV(p.Vector)
+ si = stToSiTi(uvToST(u))
+ ti = stToSiTi(uvToST(v))
+
+ // If the levels corresponding to si,ti are not equal, then p is not a cell
+ // center. The si,ti values of 0 and maxSiTi need to be handled specially
+ // because they do not correspond to cell centers at any valid level; they
+ // are mapped to level -1 by the code at the end.
+ level = maxLevel - findLSBSetNonZero64(uint64(si|maxSiTi))
+ if level < 0 || level != maxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) {
+ return face, si, ti, -1
+ }
+
+ // In infinite precision, this test could be changed to ST == SiTi. However,
+ // due to rounding errors, uvToST(xyzToFaceUV(faceUVToXYZ(stToUV(...)))) is
+ // not idempotent. On the other hand, the center is computed exactly the same
+ // way p was originally computed (if it is indeed the center of a Cell);
+ // the comparison can be exact.
+ if p.Vector == faceSiTiToXYZ(face, si, ti).Normalize() {
+ return face, si, ti, level
+ }
+
+ return face, si, ti, -1
+}
+
+// uNorm returns the right-handed normal (not necessarily unit length) for an
+// edge in the direction of the positive v-axis at the given u-value on
+// the given face. (This vector is perpendicular to the plane through
+// the sphere origin that contains the given edge.)
+func uNorm(face int, u float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{u, -1, 0}
+ case 1:
+ return r3.Vector{1, u, 0}
+ case 2:
+ return r3.Vector{1, 0, u}
+ case 3:
+ return r3.Vector{-u, 0, 1}
+ case 4:
+ return r3.Vector{0, -u, 1}
+ default:
+ return r3.Vector{0, -1, -u}
+ }
+}
+
+// vNorm returns the right-handed normal (not necessarily unit length) for an
+// edge in the direction of the positive u-axis at the given v-value on
+// the given face.
+func vNorm(face int, v float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{-v, 0, 1}
+ case 1:
+ return r3.Vector{0, -v, 1}
+ case 2:
+ return r3.Vector{0, -1, -v}
+ case 3:
+ return r3.Vector{v, -1, 0}
+ case 4:
+ return r3.Vector{1, v, 0}
+ default:
+ return r3.Vector{1, 0, v}
+ }
+}
+
+// faceUVWAxes are the U, V, and W axes for each face.
+var faceUVWAxes = [6][3]Point{
+ {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{1, 0, 0}}},
+ {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{0, 1, 0}}},
+ {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{0, 0, 1}}},
+ {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{-1, 0, 0}}},
+ {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, -1, 0}}},
+ {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 0, -1}}},
+}
+
+// faceUVWFaces are the precomputed neighbors of each face.
+var faceUVWFaces = [6][3][2]int{
+ {{4, 1}, {5, 2}, {3, 0}},
+ {{0, 3}, {5, 2}, {4, 1}},
+ {{0, 3}, {1, 4}, {5, 2}},
+ {{2, 5}, {1, 4}, {0, 3}},
+ {{2, 5}, {3, 0}, {1, 4}},
+ {{4, 1}, {3, 0}, {2, 5}},
+}
+
+// uvwAxis returns the given axis of the given face.
+func uvwAxis(face, axis int) Point {
+ return faceUVWAxes[face][axis]
+}
+
+// uvwFaces returns the face in the (u,v,w) coordinate system on the given axis
+// in the given direction.
+func uvwFace(face, axis, direction int) int {
+ return faceUVWFaces[face][axis][direction]
+}
+
+// uAxis returns the u-axis for the given face.
+func uAxis(face int) Point {
+ return uvwAxis(face, 0)
+}
+
+// vAxis returns the v-axis for the given face.
+func vAxis(face int) Point {
+ return uvwAxis(face, 1)
+}
+
+// Return the unit-length normal for the given face.
+func unitNorm(face int) Point {
+ return uvwAxis(face, 2)
+}
diff --git a/vendor/github.com/golang/geo/s2/util.go b/vendor/github.com/golang/geo/s2/util.go
new file mode 100644
index 000000000..7cab746d8
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/util.go
@@ -0,0 +1,125 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+import "github.com/golang/geo/s1"
+
+// roundAngle returns the value rounded to nearest as an int32.
+// This does not match C++ exactly for the case of x.5.
+func roundAngle(val s1.Angle) int32 {
+ if val < 0 {
+ return int32(val - 0.5)
+ }
+ return int32(val + 0.5)
+}
+
+// minAngle returns the smallest of the given values.
+func minAngle(x s1.Angle, others ...s1.Angle) s1.Angle {
+ min := x
+ for _, y := range others {
+ if y < min {
+ min = y
+ }
+ }
+ return min
+}
+
+// maxAngle returns the largest of the given values.
+func maxAngle(x s1.Angle, others ...s1.Angle) s1.Angle {
+ max := x
+ for _, y := range others {
+ if y > max {
+ max = y
+ }
+ }
+ return max
+}
+
+// minChordAngle returns the smallest of the given values.
+func minChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle {
+ min := x
+ for _, y := range others {
+ if y < min {
+ min = y
+ }
+ }
+ return min
+}
+
+// maxChordAngle returns the largest of the given values.
+func maxChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle {
+ max := x
+ for _, y := range others {
+ if y > max {
+ max = y
+ }
+ }
+ return max
+}
+
+// minFloat64 returns the smallest of the given values.
+func minFloat64(x float64, others ...float64) float64 {
+ min := x
+ for _, y := range others {
+ if y < min {
+ min = y
+ }
+ }
+ return min
+}
+
+// maxFloat64 returns the largest of the given values.
+func maxFloat64(x float64, others ...float64) float64 {
+ max := x
+ for _, y := range others {
+ if y > max {
+ max = y
+ }
+ }
+ return max
+}
+
+// minInt returns the smallest of the given values.
+func minInt(x int, others ...int) int {
+ min := x
+ for _, y := range others {
+ if y < min {
+ min = y
+ }
+ }
+ return min
+}
+
+// maxInt returns the largest of the given values.
+func maxInt(x int, others ...int) int {
+ max := x
+ for _, y := range others {
+ if y > max {
+ max = y
+ }
+ }
+ return max
+}
+
+// clampInt returns the number closest to x within the range min..max.
+func clampInt(x, min, max int) int {
+ if x < min {
+ return min
+ }
+ if x > max {
+ return max
+ }
+ return x
+}
diff --git a/vendor/github.com/golang/geo/s2/wedge_relations.go b/vendor/github.com/golang/geo/s2/wedge_relations.go
new file mode 100644
index 000000000..d637bb68c
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/wedge_relations.go
@@ -0,0 +1,97 @@
+// Copyright 2017 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package s2
+
+// WedgeRel enumerates the possible relation between two wedges A and B.
+type WedgeRel int
+
+// Define the different possible relationships between two wedges.
+//
+// Given an edge chain (x0, x1, x2), the wedge at x1 is the region to the
+// left of the edges. More precisely, it is the set of all rays from x1x0
+// (inclusive) to x1x2 (exclusive) in the *clockwise* direction.
+const (
+ WedgeEquals WedgeRel = iota // A and B are equal.
+ WedgeProperlyContains // A is a strict superset of B.
+ WedgeIsProperlyContained // A is a strict subset of B.
+ WedgeProperlyOverlaps // A-B, B-A, and A intersect B are non-empty.
+ WedgeIsDisjoint // A and B are disjoint.
+)
+
+// WedgeRelation reports the relation between two non-empty wedges
+// A=(a0, ab1, a2) and B=(b0, ab1, b2).
+func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel {
+ // There are 6 possible edge orderings at a shared vertex (all
+ // of these orderings are circular, i.e. abcd == bcda):
+ //
+ // (1) a2 b2 b0 a0: A contains B
+ // (2) a2 a0 b0 b2: B contains A
+ // (3) a2 a0 b2 b0: A and B are disjoint
+ // (4) a2 b0 a0 b2: A and B intersect in one wedge
+ // (5) a2 b2 a0 b0: A and B intersect in one wedge
+ // (6) a2 b0 b2 a0: A and B intersect in two wedges
+ //
+ // We do not distinguish between 4, 5, and 6.
+ // We pay extra attention when some of the edges overlap. When edges
+ // overlap, several of these orderings can be satisfied, and we take
+ // the most specific.
+ if a0 == b0 && a2 == b2 {
+ return WedgeEquals
+ }
+
+ // Cases 1, 2, 5, and 6
+ if OrderedCCW(a0, a2, b2, ab1) {
+ // The cases with this vertex ordering are 1, 5, and 6,
+ if OrderedCCW(b2, b0, a0, ab1) {
+ return WedgeProperlyContains
+ }
+
+ // We are in case 5 or 6, or case 2 if a2 == b2.
+ if a2 == b2 {
+ return WedgeIsProperlyContained
+ }
+ return WedgeProperlyOverlaps
+
+ }
+ // We are in case 2, 3, or 4.
+ if OrderedCCW(a0, b0, b2, ab1) {
+ return WedgeIsProperlyContained
+ }
+
+ if OrderedCCW(a0, b0, a2, ab1) {
+ return WedgeIsDisjoint
+ }
+ return WedgeProperlyOverlaps
+}
+
+// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2).
+// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals.
+func WedgeContains(a0, ab1, a2, b0, b2 Point) bool {
+ // For A to contain B (where each loop interior is defined to be its left
+ // side), the CCW edge order around ab1 must be a2 b2 b0 a0. We split
+ // this test into two parts that test three vertices each.
+ return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1)
+}
+
+// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2).
+// Equivalent but faster than WedgeRelation != WedgeIsDisjoint
+func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool {
+ // For A not to intersect B (where each loop interior is defined to be
+ // its left side), the CCW edge order around ab1 must be a0 b2 b0 a2.
+ // Note that it's important to write these conditions as negatives
+ // (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct
+ // results when two vertices are the same.
+ return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1))
+}