summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLibravatar dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>2024-09-16 09:26:41 +0200
committerLibravatar GitHub <noreply@github.com>2024-09-16 09:26:41 +0200
commitca729aa4a06b5d7e5587e2b2e2a0b87bfb17513c (patch)
tree15147a61f3a12edba73c2add3aabf620b49c5d36
parent[bugfix] Hoist filterable text field extraction out of loop (#3297) (diff)
downloadgotosocial-ca729aa4a06b5d7e5587e2b2e2a0b87bfb17513c.tar.xz
[chore]: Bump github.com/jackc/pgx/v5 from 5.6.0 to 5.7.1 (#3302)
Bumps [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) from 5.6.0 to 5.7.1. - [Changelog](https://github.com/jackc/pgx/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v5.6.0...v5.7.1) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
-rw-r--r--go.mod6
-rw-r--r--go.sum12
-rw-r--r--vendor/github.com/jackc/pgservicefile/.travis.yml9
-rw-r--r--vendor/github.com/jackc/pgservicefile/README.md5
-rw-r--r--vendor/github.com/jackc/pgservicefile/pgservicefile.go4
-rw-r--r--vendor/github.com/jackc/pgx/v5/CHANGELOG.md22
-rw-r--r--vendor/github.com/jackc/pgx/v5/batch.go44
-rw-r--r--vendor/github.com/jackc/pgx/v5/conn.go26
-rw-r--r--vendor/github.com/jackc/pgx/v5/derived_types.go262
-rw-r--r--vendor/github.com/jackc/pgx/v5/doc.go2
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgconn/config.go64
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/doc.go3
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/interval.go32
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/json.go9
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go44
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go3
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/time.go8
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/uint32.go22
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgtype/xml.go198
-rw-r--r--vendor/github.com/jackc/pgx/v5/pgxpool/tx.go13
-rw-r--r--vendor/github.com/jackc/pgx/v5/rows.go21
-rw-r--r--vendor/github.com/jackc/pgx/v5/stdlib/sql.go24
-rw-r--r--vendor/github.com/jackc/puddle/v2/CHANGELOG.md5
-rw-r--r--vendor/github.com/jackc/puddle/v2/README.md2
-rw-r--r--vendor/github.com/jackc/puddle/v2/nanotime.go16
-rw-r--r--vendor/github.com/jackc/puddle/v2/nanotime_time.go13
-rw-r--r--vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go12
-rw-r--r--vendor/github.com/jackc/puddle/v2/pool.go20
-rw-r--r--vendor/modules.txt8
29 files changed, 749 insertions, 160 deletions
diff --git a/go.mod b/go.mod
index 624486a41..c7aa112b2 100644
--- a/go.mod
+++ b/go.mod
@@ -39,7 +39,7 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/feeds v1.2.0
github.com/gorilla/websocket v1.5.2
- github.com/jackc/pgx/v5 v5.6.0
+ github.com/jackc/pgx/v5 v5.7.1
github.com/microcosm-cc/bluemonday v1.0.27
github.com/miekg/dns v1.1.62
github.com/minio/minio-go/v7 v7.0.76
@@ -154,8 +154,8 @@ require (
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
- github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
- github.com/jackc/puddle/v2 v2.2.1 // indirect
+ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
+ github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
diff --git a/go.sum b/go.sum
index d0826e1b9..6a353ce43 100644
--- a/go.sum
+++ b/go.sum
@@ -364,12 +364,12 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
-github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
-github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
-github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
-github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
-github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
-github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
+github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
+github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
+github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
diff --git a/vendor/github.com/jackc/pgservicefile/.travis.yml b/vendor/github.com/jackc/pgservicefile/.travis.yml
deleted file mode 100644
index e176228e8..000000000
--- a/vendor/github.com/jackc/pgservicefile/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/jackc/pgservicefile/README.md b/vendor/github.com/jackc/pgservicefile/README.md
index e50ca1262..2fc7e012c 100644
--- a/vendor/github.com/jackc/pgservicefile/README.md
+++ b/vendor/github.com/jackc/pgservicefile/README.md
@@ -1,5 +1,6 @@
-[![](https://godoc.org/github.com/jackc/pgservicefile?status.svg)](https://godoc.org/github.com/jackc/pgservicefile)
-[![Build Status](https://travis-ci.org/jackc/pgservicefile.svg)](https://travis-ci.org/jackc/pgservicefile)
+[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgservicefile.svg)](https://pkg.go.dev/github.com/jackc/pgservicefile)
+[![Build Status](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
+
# pgservicefile
diff --git a/vendor/github.com/jackc/pgservicefile/pgservicefile.go b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
index 797bbab9e..c62caa7fe 100644
--- a/vendor/github.com/jackc/pgservicefile/pgservicefile.go
+++ b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
@@ -57,7 +57,7 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
servicefile.Services = append(servicefile.Services, service)
- } else {
+ } else if service != nil {
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("unable to parse line %d", lineNum)
@@ -67,6 +67,8 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
value := strings.TrimSpace(parts[1])
service.Settings[key] = value
+ } else {
+ return nil, fmt.Errorf("line %d is not in a section", lineNum)
}
}
diff --git a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
index 61b4695fd..a0ff9ba3b 100644
--- a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
+++ b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md
@@ -1,3 +1,25 @@
+# 5.7.1 (September 10, 2024)
+
+* Fix data race in tracelog.TraceLog
+* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
+* Update golang.org/x/crypto and golang.org/x/text
+
+# 5.7.0 (September 7, 2024)
+
+* Add support for sslrootcert=system (Yann Soubeyrand)
+* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
+* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
+* Add MultiTrace (Stepan Rabotkin)
+* Add TraceLogConfig with customizable TimeKey (stringintech)
+* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
+* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
+* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
+* Update pgservicefile - fixes panic when parsing invalid file
+* Better error message when reading past end of batch
+* Don't print url when url.Parse returns an error (Kevin Biju)
+* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
+* Fix: Scan and encode types with underlying types of arrays
+
# 5.6.0 (May 25, 2024)
* Add StrictNamedArgs (Tomas Zahradnicek)
diff --git a/vendor/github.com/jackc/pgx/v5/batch.go b/vendor/github.com/jackc/pgx/v5/batch.go
index 3540f57f5..c3c2834f2 100644
--- a/vendor/github.com/jackc/pgx/v5/batch.go
+++ b/vendor/github.com/jackc/pgx/v5/batch.go
@@ -60,9 +60,13 @@ type Batch struct {
QueuedQueries []*QueuedQuery
}
-// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
-// The only pgx option argument that is supported is QueryRewriter. Queries are executed using the
-// connection's DefaultQueryExecMode.
+// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
+// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
+//
+// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
+// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
+// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
+// include the current query may reference the wrong query.
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
qq := &QueuedQuery{
SQL: query,
@@ -128,7 +132,7 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
if !br.mrr.NextResult() {
err := br.mrr.Close()
if err == nil {
- err = errors.New("no result")
+ err = errors.New("no more results in batch")
}
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
@@ -180,7 +184,7 @@ func (br *batchResults) Query() (Rows, error) {
if !br.mrr.NextResult() {
rows.err = br.mrr.Close()
if rows.err == nil {
- rows.err = errors.New("no result")
+ rows.err = errors.New("no more results in batch")
}
rows.closed = true
@@ -287,7 +291,10 @@ func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
return pgconn.CommandTag{}, br.err
}
- query, arguments, _ := br.nextQueryAndArgs()
+ query, arguments, err := br.nextQueryAndArgs()
+ if err != nil {
+ return pgconn.CommandTag{}, err
+ }
results, err := br.pipeline.GetResults()
if err != nil {
@@ -330,9 +337,9 @@ func (br *pipelineBatchResults) Query() (Rows, error) {
return &baseRows{err: br.err, closed: true}, br.err
}
- query, arguments, ok := br.nextQueryAndArgs()
- if !ok {
- query = "batch query"
+ query, arguments, err := br.nextQueryAndArgs()
+ if err != nil {
+ return &baseRows{err: err, closed: true}, err
}
rows := br.conn.getRows(br.ctx, query, arguments)
@@ -421,13 +428,16 @@ func (br *pipelineBatchResults) earlyError() error {
return br.err
}
-func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
- if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
- bi := br.b.QueuedQueries[br.qqIdx]
- query = bi.SQL
- args = bi.Arguments
- ok = true
- br.qqIdx++
+func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
+ if br.b == nil {
+ return "", nil, errors.New("no reference to batch")
}
- return
+
+ if br.qqIdx >= len(br.b.QueuedQueries) {
+ return "", nil, errors.New("no more results in batch")
+ }
+
+ bi := br.b.QueuedQueries[br.qqIdx]
+ br.qqIdx++
+ return bi.SQL, bi.Arguments, nil
}
diff --git a/vendor/github.com/jackc/pgx/v5/conn.go b/vendor/github.com/jackc/pgx/v5/conn.go
index 311721459..187b3dd57 100644
--- a/vendor/github.com/jackc/pgx/v5/conn.go
+++ b/vendor/github.com/jackc/pgx/v5/conn.go
@@ -3,6 +3,7 @@ package pgx
import (
"context"
"crypto/sha256"
+ "database/sql"
"encoding/hex"
"errors"
"fmt"
@@ -102,13 +103,31 @@ func (ident Identifier) Sanitize() string {
var (
// ErrNoRows occurs when rows are expected but none are returned.
- ErrNoRows = errors.New("no rows in result set")
+ ErrNoRows = newProxyErr(sql.ErrNoRows, "no rows in result set")
// ErrTooManyRows occurs when more rows than expected are returned.
ErrTooManyRows = errors.New("too many rows in result set")
)
-var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
-var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
+func newProxyErr(background error, msg string) error {
+ return &proxyError{
+ msg: msg,
+ background: background,
+ }
+}
+
+type proxyError struct {
+ msg string
+ background error
+}
+
+func (err *proxyError) Error() string { return err.msg }
+
+func (err *proxyError) Unwrap() error { return err.background }
+
+var (
+ errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
+ errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
+)
// Connect establishes a connection with a PostgreSQL server with a connection string. See
// pgconn.Connect for details.
@@ -843,7 +862,6 @@ func (c *Conn) getStatementDescription(
mode QueryExecMode,
sql string,
) (sd *pgconn.StatementDescription, err error) {
-
switch mode {
case QueryExecModeCacheStatement:
if c.statementCache == nil {
diff --git a/vendor/github.com/jackc/pgx/v5/derived_types.go b/vendor/github.com/jackc/pgx/v5/derived_types.go
new file mode 100644
index 000000000..22ab069cf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/derived_types.go
@@ -0,0 +1,262 @@
+package pgx
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/v5/pgtype"
+)
+
+/*
+buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
+
+ pgVersion: the major version of the PostgreSQL server
+ typeNames: the names of the types to load. If nil, load all types.
+*/
+func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
+ supportsMultirange := (pgVersion >= 14)
+ var typeNamesClause string
+
+ if typeNames == nil {
+ // This should not occur; this will not return any types
+ typeNamesClause = "= ''"
+ } else {
+ typeNamesClause = "= ANY($1)"
+ }
+ parts := make([]string, 0, 10)
+
+ // Each of the type names provided might be found in pg_class or pg_type.
+ // Additionally, it may or may not include a schema portion.
+ parts = append(parts, `
+WITH RECURSIVE
+-- find the OIDs in pg_class which match one of the provided type names
+selected_classes(oid,reltype) AS (
+ -- this query uses the namespace search path, so will match type names without a schema prefix
+ SELECT pg_class.oid, pg_class.reltype
+ FROM pg_catalog.pg_class
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
+ WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
+ AND relname `, typeNamesClause, `
+UNION ALL
+ -- this query will only match type names which include the schema prefix
+ SELECT pg_class.oid, pg_class.reltype
+ FROM pg_class
+ INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
+ WHERE nspname || '.' || relname `, typeNamesClause, `
+),
+selected_types(oid) AS (
+ -- collect the OIDs from pg_types which correspond to the selected classes
+ SELECT reltype AS oid
+ FROM selected_classes
+UNION ALL
+ -- as well as any other type names which match our criteria
+ SELECT pg_type.oid
+ FROM pg_type
+ LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
+ WHERE typname `, typeNamesClause, `
+ OR nspname || '.' || typname `, typeNamesClause, `
+),
+-- this builds a parent/child mapping of objects, allowing us to know
+-- all the child (ie: dependent) types that a parent (type) requires
+-- As can be seen, there are 3 ways this can occur (the last of which
+-- is due to being a composite class, where the composite fields are children)
+pc(parent, child) AS (
+ SELECT parent.oid, parent.typelem
+ FROM pg_type parent
+ WHERE parent.typtype = 'b' AND parent.typelem != 0
+UNION ALL
+ SELECT parent.oid, parent.typbasetype
+ FROM pg_type parent
+ WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
+UNION ALL
+ SELECT pg_type.oid, atttypid
+ FROM pg_attribute
+ INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
+ INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
+ WHERE NOT attisdropped
+ AND attnum > 0
+),
+-- Now construct a recursive query which includes a 'depth' element.
+-- This is used to ensure that the "youngest" children are registered before
+-- their parents.
+relationships(parent, child, depth) AS (
+ SELECT DISTINCT 0::OID, selected_types.oid, 0
+ FROM selected_types
+UNION ALL
+ SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
+ FROM selected_classes c
+ inner join pg_type ON (c.reltype = pg_type.oid)
+ inner join pg_attribute on (c.oid = pg_attribute.attrelid)
+UNION ALL
+ SELECT pc.parent, pc.child, relationships.depth + 1
+ FROM pc
+ INNER JOIN relationships ON (pc.parent = relationships.child)
+),
+-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
+composite AS (
+ SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
+ FROM pg_attribute
+ INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
+ INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
+ WHERE NOT attisdropped
+ AND attnum > 0
+ GROUP BY pg_type.oid
+)
+-- Bring together this information, showing all the information which might possibly be required
+-- to complete the registration, applying filters to only show the items which relate to the selected
+-- types/classes.
+SELECT typname,
+ pg_namespace.nspname,
+ typtype,
+ typbasetype,
+ typelem,
+ pg_type.oid,`)
+ if supportsMultirange {
+ parts = append(parts, `
+ COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
+ } else {
+ parts = append(parts, `
+ 0 AS rngtypid,`)
+ }
+ parts = append(parts, `
+ COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
+ attnames, atttypids
+ FROM relationships
+ INNER JOIN pg_type ON (pg_type.oid = relationships.child)
+ LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
+ if supportsMultirange {
+ parts = append(parts, `
+ LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
+ }
+
+ parts = append(parts, `
+ LEFT OUTER JOIN composite USING (oid)
+ LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
+ WHERE NOT (typtype = 'b' AND typelem = 0)`)
+ parts = append(parts, `
+ GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
+ if supportsMultirange {
+ parts = append(parts, `
+ multirange.rngtypid,`)
+ }
+ parts = append(parts, `
+ attnames, atttypids
+ ORDER BY MAX(depth) desc, typname;`)
+ return strings.Join(parts, "")
+}
+
+type derivedTypeInfo struct {
+ Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
+ TypeName, Typtype, NspName string
+ Attnames []string
+ Atttypids []uint32
+}
+
+// LoadTypes performs a single (complex) query, returning all the required
+// information to register the named types, as well as any other types directly
+// or indirectly required to complete the registration.
+// The result of this call can be passed into RegisterTypes to complete the process.
+func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
+ m := c.TypeMap()
+ if typeNames == nil || len(typeNames) == 0 {
+ return nil, fmt.Errorf("No type names were supplied.")
+ }
+
+ // Disregard server version errors. This will result in
+ // the SQL not support recent structures such as multirange
+ serverVersion, _ := serverVersion(c)
+ sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
+ var rows Rows
+ var err error
+ if typeNames == nil {
+ rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol)
+ } else {
+ rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("While generating load types query: %w", err)
+ }
+ defer rows.Close()
+ result := make([]*pgtype.Type, 0, 100)
+ for rows.Next() {
+ ti := derivedTypeInfo{}
+ err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
+ if err != nil {
+ return nil, fmt.Errorf("While scanning type information: %w", err)
+ }
+ var type_ *pgtype.Type
+ switch ti.Typtype {
+ case "b": // array
+ dt, ok := m.TypeForOID(ti.Typelem)
+ if !ok {
+ return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
+ }
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
+ case "c": // composite
+ var fields []pgtype.CompositeCodecField
+ for i, fieldName := range ti.Attnames {
+ dt, ok := m.TypeForOID(ti.Atttypids[i])
+ if !ok {
+ return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
+ }
+ fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
+ case "d": // domain
+ dt, ok := m.TypeForOID(ti.Typbasetype)
+ if !ok {
+ return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
+ case "e": // enum
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
+ case "r": // range
+ dt, ok := m.TypeForOID(ti.Rngsubtype)
+ if !ok {
+ return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
+ case "m": // multirange
+ dt, ok := m.TypeForOID(ti.Rngtypid)
+ if !ok {
+ return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
+ }
+
+ type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
+ default:
+ return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
+ }
+ if type_ != nil {
+ m.RegisterType(type_)
+ if ti.NspName != "" {
+ nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
+ m.RegisterType(nspType)
+ result = append(result, nspType)
+ }
+ result = append(result, type_)
+ }
+ }
+ return result, nil
+}
+
+// serverVersion returns the postgresql server version.
+func serverVersion(c *Conn) (int64, error) {
+ serverVersionStr := c.PgConn().ParameterStatus("server_version")
+ serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ // if not PostgreSQL do nothing
+ if serverVersionStr == "" {
+ return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
+ }
+
+ version, err := strconv.ParseInt(serverVersionStr, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("postgres version parsing failed: %w", err)
+ }
+ return version, nil
+}
diff --git a/vendor/github.com/jackc/pgx/v5/doc.go b/vendor/github.com/jackc/pgx/v5/doc.go
index bc0391dde..0e91d64e8 100644
--- a/vendor/github.com/jackc/pgx/v5/doc.go
+++ b/vendor/github.com/jackc/pgx/v5/doc.go
@@ -175,7 +175,7 @@ notification is received or the context is canceled.
Tracing and Logging
-pgx supports tracing by setting ConnConfig.Tracer.
+pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
diff --git a/vendor/github.com/jackc/pgx/v5/pgconn/config.go b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
index 598917f55..6a198e675 100644
--- a/vendor/github.com/jackc/pgx/v5/pgconn/config.go
+++ b/vendor/github.com/jackc/pgx/v5/pgconn/config.go
@@ -467,14 +467,17 @@ func parseEnvSettings() map[string]string {
func parseURLSettings(connString string) (map[string]string, error) {
settings := make(map[string]string)
- url, err := url.Parse(connString)
+ parsedURL, err := url.Parse(connString)
if err != nil {
+ if urlErr := new(url.Error); errors.As(err, &urlErr) {
+ return nil, urlErr.Err
+ }
return nil, err
}
- if url.User != nil {
- settings["user"] = url.User.Username()
- if password, present := url.User.Password(); present {
+ if parsedURL.User != nil {
+ settings["user"] = parsedURL.User.Username()
+ if password, present := parsedURL.User.Password(); present {
settings["password"] = password
}
}
@@ -482,7 +485,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
var hosts []string
var ports []string
- for _, host := range strings.Split(url.Host, ",") {
+ for _, host := range strings.Split(parsedURL.Host, ",") {
if host == "" {
continue
}
@@ -508,7 +511,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
settings["port"] = strings.Join(ports, ",")
}
- database := strings.TrimLeft(url.Path, "/")
+ database := strings.TrimLeft(parsedURL.Path, "/")
if database != "" {
settings["database"] = database
}
@@ -517,7 +520,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
"dbname": "database",
}
- for k, v := range url.Query() {
+ for k, v := range parsedURL.Query() {
if k2, present := nameMap[k]; present {
k = k2
}
@@ -654,6 +657,36 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
tlsConfig := &tls.Config{}
+ if sslrootcert != "" {
+ var caCertPool *x509.CertPool
+
+ if sslrootcert == "system" {
+ var err error
+
+ caCertPool, err = x509.SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
+ }
+
+ sslmode = "verify-full"
+ } else {
+ caCertPool = x509.NewCertPool()
+
+ caPath := sslrootcert
+ caCert, err := os.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read CA file: %w", err)
+ }
+
+ if !caCertPool.AppendCertsFromPEM(caCert) {
+ return nil, errors.New("unable to add CA to cert pool")
+ }
+ }
+
+ tlsConfig.RootCAs = caCertPool
+ tlsConfig.ClientCAs = caCertPool
+ }
+
switch sslmode {
case "disable":
return []*tls.Config{nil}, nil
@@ -711,23 +744,6 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
return nil, errors.New("sslmode is invalid")
}
- if sslrootcert != "" {
- caCertPool := x509.NewCertPool()
-
- caPath := sslrootcert
- caCert, err := os.ReadFile(caPath)
- if err != nil {
- return nil, fmt.Errorf("unable to read CA file: %w", err)
- }
-
- if !caCertPool.AppendCertsFromPEM(caCert) {
- return nil, errors.New("unable to add CA to cert pool")
- }
-
- tlsConfig.RootCAs = caCertPool
- tlsConfig.ClientCAs = caCertPool
- }
-
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
index d56c1dc70..7687ea8fe 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/doc.go
@@ -53,6 +53,9 @@ similar fashion to database/sql. The second is to use a pointer to a pointer.
return err
}
+When using nullable pgtype types as parameters for queries, one has to remember
+to explicitly set their Valid field to true, otherwise the parameter's value will be NULL.
+
JSON Support
pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
index 06703d4dc..4b5116295 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/interval.go
@@ -132,29 +132,25 @@ func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte,
if interval.Days != 0 {
buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...)
- buf = append(buf, " day"...)
+ buf = append(buf, " day "...)
}
- if interval.Microseconds != 0 {
- buf = append(buf, " "...)
-
- absMicroseconds := interval.Microseconds
- if absMicroseconds < 0 {
- absMicroseconds = -absMicroseconds
- buf = append(buf, '-')
- }
+ absMicroseconds := interval.Microseconds
+ if absMicroseconds < 0 {
+ absMicroseconds = -absMicroseconds
+ buf = append(buf, '-')
+ }
- hours := absMicroseconds / microsecondsPerHour
- minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
- seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
+ hours := absMicroseconds / microsecondsPerHour
+ minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
+ seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
- timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
- buf = append(buf, timeStr...)
+ timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
+ buf = append(buf, timeStr...)
- microseconds := absMicroseconds % microsecondsPerSecond
- if microseconds != 0 {
- buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
- }
+ microseconds := absMicroseconds % microsecondsPerSecond
+ if microseconds != 0 {
+ buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
}
return buf, nil
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/json.go b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
index e71dcb9bf..c2aa0d3bf 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/json.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/json.go
@@ -37,7 +37,7 @@ func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Enco
//
// https://github.com/jackc/pgx/issues/1430
//
- // Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to beused
+ // Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to be used
// when both are implemented https://github.com/jackc/pgx/issues/1805
case driver.Valuer:
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
@@ -177,13 +177,6 @@ func (scanPlanJSONToByteSlice) Scan(src []byte, dst any) error {
return nil
}
-type scanPlanJSONToBytesScanner struct{}
-
-func (scanPlanJSONToBytesScanner) Scan(src []byte, dst any) error {
- scanner := (dst).(BytesScanner)
- return scanner.ScanBytes(src)
-}
-
type scanPlanJSONToJSONUnmarshal struct {
unmarshal func(data []byte, v any) error
}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
index 408295683..bdd9f05ca 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
@@ -26,6 +26,8 @@ const (
XIDOID = 28
CIDOID = 29
JSONOID = 114
+ XMLOID = 142
+ XMLArrayOID = 143
JSONArrayOID = 199
PointOID = 600
LsegOID = 601
@@ -214,6 +216,15 @@ type Map struct {
TryWrapScanPlanFuncs []TryWrapScanPlanFunc
}
+// Copy returns a new Map containing the same registered types.
+func (m *Map) Copy() *Map {
+ newMap := NewMap()
+ for _, type_ := range m.oidToType {
+ newMap.RegisterType(type_)
+ }
+ return newMap
+}
+
func NewMap() *Map {
defaultMapInitOnce.Do(initDefaultMap)
@@ -248,6 +259,13 @@ func NewMap() *Map {
}
}
+// RegisterTypes registers multiple data types in the sequence they are provided.
+func (m *Map) RegisterTypes(types []*Type) {
+ for _, t := range types {
+ m.RegisterType(t)
+ }
+}
+
// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
func (m *Map) RegisterType(t *Type) {
m.oidToType[t.OID] = t
@@ -555,17 +573,24 @@ func TryFindUnderlyingTypeScanPlan(dst any) (plan WrappedScanPlanNextSetter, nex
elemValue = dstValue.Elem()
}
nextDstType := elemKindToPointerTypes[elemValue.Kind()]
- if nextDstType == nil && elemValue.Kind() == reflect.Slice {
- if elemValue.Type().Elem().Kind() == reflect.Uint8 {
- var v *[]byte
- nextDstType = reflect.TypeOf(v)
+ if nextDstType == nil {
+ if elemValue.Kind() == reflect.Slice {
+ if elemValue.Type().Elem().Kind() == reflect.Uint8 {
+ var v *[]byte
+ nextDstType = reflect.TypeOf(v)
+ }
+ }
+
+ // Get underlying type of any array.
+ // https://github.com/jackc/pgx/issues/2107
+ if elemValue.Kind() == reflect.Array {
+ nextDstType = reflect.PointerTo(reflect.ArrayOf(elemValue.Len(), elemValue.Type().Elem()))
}
}
if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
}
-
}
return nil, nil, false
@@ -1405,6 +1430,15 @@ func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextS
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
}
+ // Get underlying type of any array.
+ // https://github.com/jackc/pgx/issues/2107
+ if refValue.Kind() == reflect.Array {
+ underlyingArrayType := reflect.ArrayOf(refValue.Len(), refValue.Type().Elem())
+ if refValue.Type() != underlyingArrayType {
+ return &underlyingTypeEncodePlan{nextValueType: underlyingArrayType}, refValue.Convert(underlyingArrayType).Interface(), true
+ }
+ }
+
return nil, nil, false
}
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
index 9525f37c9..c81257311 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
@@ -2,6 +2,7 @@ package pgtype
import (
"encoding/json"
+ "encoding/xml"
"net"
"net/netip"
"reflect"
@@ -89,6 +90,7 @@ func initDefaultMap() {
defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
+ defaultMap.RegisterType(&Type{Name: "xml", OID: XMLOID, Codec: &XMLCodec{Marshal: xml.Marshal, Unmarshal: xml.Unmarshal}})
// Range types
defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
@@ -153,6 +155,7 @@ func initDefaultMap() {
defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
+ defaultMap.RegisterType(&Type{Name: "_xml", OID: XMLArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XMLOID]}})
// Integer types that directly map to a PostgreSQL type
registerDefaultPgTypeVariants[int16](defaultMap, "int2")
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/time.go b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
index 61a3abdfd..f8fd94891 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/time.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/time.go
@@ -19,9 +19,11 @@ type TimeValuer interface {
// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
//
-// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time
-// and date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due
-// to needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
+// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time and
+// date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due to
+// needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
+//
+// The time with time zone type is not supported. Use of time with time zone is discouraged by the PostgreSQL documentation.
type Time struct {
Microseconds int64 // Number of microseconds since midnight
Valid bool
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
index 098c516c1..f2b2fa6d4 100644
--- a/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
@@ -205,6 +205,8 @@ func (Uint32Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPl
return scanPlanBinaryUint32ToUint32{}
case Uint32Scanner:
return scanPlanBinaryUint32ToUint32Scanner{}
+ case TextScanner:
+ return scanPlanBinaryUint32ToTextScanner{}
}
case TextFormatCode:
switch target.(type) {
@@ -282,6 +284,26 @@ func (scanPlanBinaryUint32ToUint32Scanner) Scan(src []byte, dst any) error {
return s.ScanUint32(Uint32{Uint32: n, Valid: true})
}
+type scanPlanBinaryUint32ToTextScanner struct{}
+
+func (scanPlanBinaryUint32ToTextScanner) Scan(src []byte, dst any) error {
+ s, ok := (dst).(TextScanner)
+ if !ok {
+ return ErrScanTargetTypeChanged
+ }
+
+ if src == nil {
+ return s.ScanText(Text{})
+ }
+
+ if len(src) != 4 {
+ return fmt.Errorf("invalid length for uint32: %v", len(src))
+ }
+
+ n := uint64(binary.BigEndian.Uint32(src))
+ return s.ScanText(Text{String: strconv.FormatUint(n, 10), Valid: true})
+}
+
type scanPlanTextAnyToUint32Scanner struct{}
func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {
diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/xml.go b/vendor/github.com/jackc/pgx/v5/pgtype/xml.go
new file mode 100644
index 000000000..fb4c49ad9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/v5/pgtype/xml.go
@@ -0,0 +1,198 @@
+package pgtype
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+)
+
+type XMLCodec struct {
+ Marshal func(v any) ([]byte, error)
+ Unmarshal func(data []byte, v any) error
+}
+
+func (*XMLCodec) FormatSupported(format int16) bool {
+ return format == TextFormatCode || format == BinaryFormatCode
+}
+
+func (*XMLCodec) PreferredFormat() int16 {
+ return TextFormatCode
+}
+
+func (c *XMLCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
+ switch value.(type) {
+ case string:
+ return encodePlanXMLCodecEitherFormatString{}
+ case []byte:
+ return encodePlanXMLCodecEitherFormatByteSlice{}
+
+ // Cannot rely on driver.Valuer being handled later because anything can be marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1430
+ //
+ // Check for driver.Valuer must come before xml.Marshaler so that it is guaranteed to be used
+ // when both are implemented https://github.com/jackc/pgx/issues/1805
+ case driver.Valuer:
+ return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
+
+ // Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
+ // marshalled.
+ //
+ // https://github.com/jackc/pgx/issues/1681
+ case xml.Marshaler:
+ return &encodePlanXMLCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+ }
+
+ // Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
+ // appropriate wrappers here.
+ for _, f := range []TryWrapEncodePlanFunc{
+ TryWrapDerefPointerEncodePlan,
+ TryWrapFindUnderlyingTypeEncodePlan,
+ } {
+ if wrapperPlan, nextValue, ok := f(value); ok {
+ if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ return &encodePlanXMLCodecEitherFormatMarshal{
+ marshal: c.Marshal,
+ }
+}
+
+type encodePlanXMLCodecEitherFormatString struct{}
+
+func (encodePlanXMLCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlString := value.(string)
+ buf = append(buf, xmlString...)
+ return buf, nil
+}
+
+type encodePlanXMLCodecEitherFormatByteSlice struct{}
+
+func (encodePlanXMLCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlBytes := value.([]byte)
+ if xmlBytes == nil {
+ return nil, nil
+ }
+
+ buf = append(buf, xmlBytes...)
+ return buf, nil
+}
+
+type encodePlanXMLCodecEitherFormatMarshal struct {
+ marshal func(v any) ([]byte, error)
+}
+
+func (e *encodePlanXMLCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
+ xmlBytes, err := e.marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ buf = append(buf, xmlBytes...)
+ return buf, nil
+}
+
+func (c *XMLCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
+ switch target.(type) {
+ case *string:
+ return scanPlanAnyToString{}
+
+ case **string:
+ // This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
+ // solution would be.
+ //
+ // https://github.com/jackc/pgx/issues/1470 -- **string
+ // https://github.com/jackc/pgx/issues/1691 -- ** anything else
+
+ if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok {
+ if nextPlan := m.planScan(oid, format, nextDst); nextPlan != nil {
+ if _, failed := nextPlan.(*scanPlanFail); !failed {
+ wrapperPlan.SetNext(nextPlan)
+ return wrapperPlan
+ }
+ }
+ }
+
+ case *[]byte:
+ return scanPlanXMLToByteSlice{}
+ case BytesScanner:
+ return scanPlanBinaryBytesToBytesScanner{}
+
+ // Cannot rely on sql.Scanner being handled later because scanPlanXMLToXMLUnmarshal will take precedence.
+ //
+ // https://github.com/jackc/pgx/issues/1418
+ case sql.Scanner:
+ return &scanPlanSQLScanner{formatCode: format}
+ }
+
+ return &scanPlanXMLToXMLUnmarshal{
+ unmarshal: c.Unmarshal,
+ }
+}
+
+type scanPlanXMLToByteSlice struct{}
+
+func (scanPlanXMLToByteSlice) Scan(src []byte, dst any) error {
+ dstBuf := dst.(*[]byte)
+ if src == nil {
+ *dstBuf = nil
+ return nil
+ }
+
+ *dstBuf = make([]byte, len(src))
+ copy(*dstBuf, src)
+ return nil
+}
+
+type scanPlanXMLToXMLUnmarshal struct {
+ unmarshal func(data []byte, v any) error
+}
+
+func (s *scanPlanXMLToXMLUnmarshal) Scan(src []byte, dst any) error {
+ if src == nil {
+ dstValue := reflect.ValueOf(dst)
+ if dstValue.Kind() == reflect.Ptr {
+ el := dstValue.Elem()
+ switch el.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Struct:
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return fmt.Errorf("cannot scan NULL into %T", dst)
+ }
+
+ elem := reflect.ValueOf(dst).Elem()
+ elem.Set(reflect.Zero(elem.Type()))
+
+ return s.unmarshal(src, dst)
+}
+
+func (c *XMLCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ dstBuf := make([]byte, len(src))
+ copy(dstBuf, src)
+ return dstBuf, nil
+}
+
+func (c *XMLCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
+ if src == nil {
+ return nil, nil
+ }
+
+ var dst any
+ err := c.Unmarshal(src, &dst)
+ return dst, err
+}
diff --git a/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
index 74df8593a..b49e7f4d9 100644
--- a/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
+++ b/vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
@@ -18,9 +18,10 @@ func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
return tx.t.Begin(ctx)
}
-// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
-// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
-// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
+// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return an error
+// where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call multiple times. If
+// the commit fails with a rollback status (e.g. the transaction was already in a broken state) then ErrTxCommitRollback
+// will be returned.
func (tx *Tx) Commit(ctx context.Context) error {
err := tx.t.Commit(ctx)
if tx.c != nil {
@@ -30,9 +31,9 @@ func (tx *Tx) Commit(ctx context.Context) error {
return err
}
-// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
-// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
-// tx.Commit() will be called first in a non-error condition.
+// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return
+// where an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call
+// multiple times. Hence, defer tx.Rollback() is safe even if tx.Commit() will be called first in a non-error condition.
func (tx *Tx) Rollback(ctx context.Context) error {
err := tx.t.Rollback(ctx)
if tx.c != nil {
diff --git a/vendor/github.com/jackc/pgx/v5/rows.go b/vendor/github.com/jackc/pgx/v5/rows.go
index d4f7a9016..f23625d4c 100644
--- a/vendor/github.com/jackc/pgx/v5/rows.go
+++ b/vendor/github.com/jackc/pgx/v5/rows.go
@@ -797,7 +797,7 @@ func computeNamedStructFields(
if !dbTagPresent {
colName = sf.Name
}
- fpos := fieldPosByName(fldDescs, colName)
+ fpos := fieldPosByName(fldDescs, colName, !dbTagPresent)
if fpos == -1 {
if missingField == "" {
missingField = colName
@@ -816,16 +816,21 @@ func computeNamedStructFields(
const structTagKey = "db"
-func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
+func fieldPosByName(fldDescs []pgconn.FieldDescription, field string, normalize bool) (i int) {
i = -1
- for i, desc := range fldDescs {
- // Snake case support.
+ if normalize {
field = strings.ReplaceAll(field, "_", "")
- descName := strings.ReplaceAll(desc.Name, "_", "")
-
- if strings.EqualFold(descName, field) {
- return i
+ }
+ for i, desc := range fldDescs {
+ if normalize {
+ if strings.EqualFold(strings.ReplaceAll(desc.Name, "_", ""), field) {
+ return i
+ }
+ } else {
+ if desc.Name == field {
+ return i
+ }
}
}
return
diff --git a/vendor/github.com/jackc/pgx/v5/stdlib/sql.go b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go
index 29cd3fbbf..c1d00ab40 100644
--- a/vendor/github.com/jackc/pgx/v5/stdlib/sql.go
+++ b/vendor/github.com/jackc/pgx/v5/stdlib/sql.go
@@ -75,6 +75,7 @@ import (
"math"
"math/rand"
"reflect"
+ "slices"
"strconv"
"strings"
"sync"
@@ -98,7 +99,7 @@ func init() {
// if pgx driver was already registered by different pgx major version then we
// skip registration under the default name.
- if !contains(sql.Drivers(), "pgx") {
+ if !slices.Contains(sql.Drivers(), "pgx") {
sql.Register("pgx", pgxDriver)
}
sql.Register("pgx/v5", pgxDriver)
@@ -120,17 +121,6 @@ func init() {
}
}
-// TODO replace by slices.Contains when experimental package will be merged to stdlib
-// https://pkg.go.dev/golang.org/x/exp/slices#Contains
-func contains(list []string, y string) bool {
- for _, x := range list {
- if x == y {
- return true
- }
- }
- return false
-}
-
// OptionOpenDB options for configuring the driver when opening a new db pool.
type OptionOpenDB func(*connector)
@@ -805,6 +795,16 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
+ case pgtype.XMLOID:
+ var d []byte
+ scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
+ err := scanPlan.Scan(src, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ }
default:
var d string
scanPlan := m.PlanScan(dataTypeOID, format, &d)
diff --git a/vendor/github.com/jackc/puddle/v2/CHANGELOG.md b/vendor/github.com/jackc/puddle/v2/CHANGELOG.md
index a15991c58..d0d202c74 100644
--- a/vendor/github.com/jackc/puddle/v2/CHANGELOG.md
+++ b/vendor/github.com/jackc/puddle/v2/CHANGELOG.md
@@ -1,3 +1,8 @@
+# 2.2.2 (September 10, 2024)
+
+* Add empty acquire time to stats (Maxim Ivanov)
+* Stop importing nanotime from runtime via linkname (maypok86)
+
# 2.2.1 (July 15, 2023)
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
diff --git a/vendor/github.com/jackc/puddle/v2/README.md b/vendor/github.com/jackc/puddle/v2/README.md
index 0ad07ec43..fa82a9d46 100644
--- a/vendor/github.com/jackc/puddle/v2/README.md
+++ b/vendor/github.com/jackc/puddle/v2/README.md
@@ -1,4 +1,4 @@
-[![](https://godoc.org/github.com/jackc/puddle?status.svg)](https://godoc.org/github.com/jackc/puddle)
+[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/puddle/v2.svg)](https://pkg.go.dev/github.com/jackc/puddle/v2)
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
# Puddle
diff --git a/vendor/github.com/jackc/puddle/v2/nanotime.go b/vendor/github.com/jackc/puddle/v2/nanotime.go
new file mode 100644
index 000000000..8a5351a0d
--- /dev/null
+++ b/vendor/github.com/jackc/puddle/v2/nanotime.go
@@ -0,0 +1,16 @@
+package puddle
+
+import "time"
+
+// nanotime returns the time in nanoseconds since process start.
+//
+// This approach, described at
+// https://github.com/golang/go/issues/61765#issuecomment-1672090302,
+// is fast, monotonic, and portable, and avoids the previous
+// dependence on runtime.nanotime using the (unsafe) linkname hack.
+// In particular, time.Since does less work than time.Now.
+func nanotime() int64 {
+ return time.Since(globalStart).Nanoseconds()
+}
+
+var globalStart = time.Now()
diff --git a/vendor/github.com/jackc/puddle/v2/nanotime_time.go b/vendor/github.com/jackc/puddle/v2/nanotime_time.go
deleted file mode 100644
index f8e759386..000000000
--- a/vendor/github.com/jackc/puddle/v2/nanotime_time.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build purego || appengine || js
-
-// This file contains the safe implementation of nanotime using time.Now().
-
-package puddle
-
-import (
- "time"
-)
-
-func nanotime() int64 {
- return time.Now().UnixNano()
-}
diff --git a/vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go b/vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
deleted file mode 100644
index fc3b8a258..000000000
--- a/vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !purego && !appengine && !js
-
-// This file contains the implementation of nanotime using runtime.nanotime.
-
-package puddle
-
-import "unsafe"
-
-var _ = unsafe.Sizeof(0)
-
-//go:linkname nanotime runtime.nanotime
-func nanotime() int64
diff --git a/vendor/github.com/jackc/puddle/v2/pool.go b/vendor/github.com/jackc/puddle/v2/pool.go
index c8edc0fb6..c411d2f6e 100644
--- a/vendor/github.com/jackc/puddle/v2/pool.go
+++ b/vendor/github.com/jackc/puddle/v2/pool.go
@@ -139,6 +139,7 @@ type Pool[T any] struct {
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
+ emptyAcquireWaitTime time.Duration
canceledAcquireCount atomic.Int64
resetCount int
@@ -154,7 +155,7 @@ type Config[T any] struct {
MaxSize int32
}
-// NewPool creates a new pool. Panics if maxSize is less than 1.
+// NewPool creates a new pool. Returns an error iff MaxSize is less than 1.
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
if config.MaxSize < 1 {
return nil, errors.New("MaxSize must be >= 1")
@@ -202,6 +203,7 @@ type Stat struct {
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
+ emptyAcquireWaitTime time.Duration
canceledAcquireCount int64
}
@@ -251,6 +253,13 @@ func (s *Stat) EmptyAcquireCount() int64 {
return s.emptyAcquireCount
}
+// EmptyAcquireWaitTime returns the cumulative time waited for successful acquires
+// from the pool for a resource to be released or constructed because the pool was
+// empty.
+func (s *Stat) EmptyAcquireWaitTime() time.Duration {
+ return s.emptyAcquireWaitTime
+}
+
// CanceledAcquireCount returns the cumulative count of acquires from the pool
// that were canceled by a context.
func (s *Stat) CanceledAcquireCount() int64 {
@@ -266,6 +275,7 @@ func (p *Pool[T]) Stat() *Stat {
maxResources: p.maxSize,
acquireCount: p.acquireCount,
emptyAcquireCount: p.emptyAcquireCount,
+ emptyAcquireWaitTime: p.emptyAcquireWaitTime,
canceledAcquireCount: p.canceledAcquireCount.Load(),
acquireDuration: p.acquireDuration,
}
@@ -363,11 +373,13 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
// If a resource is available in the pool.
if res := p.tryAcquireIdleResource(); res != nil {
+ waitTime := time.Duration(nanotime() - startNano)
if waitedForLock {
p.emptyAcquireCount += 1
+ p.emptyAcquireWaitTime += waitTime
}
p.acquireCount += 1
- p.acquireDuration += time.Duration(nanotime() - startNano)
+ p.acquireDuration += waitTime
p.mux.Unlock()
return res, nil
}
@@ -391,7 +403,9 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
p.emptyAcquireCount += 1
p.acquireCount += 1
- p.acquireDuration += time.Duration(nanotime() - startNano)
+ waitTime := time.Duration(nanotime() - startNano)
+ p.acquireDuration += waitTime
+ p.emptyAcquireWaitTime += waitTime
return res, nil
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7cd7827d4..0b578c391 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -413,11 +413,11 @@ github.com/inconshreveable/mousetrap
# github.com/jackc/pgpassfile v1.0.0
## explicit; go 1.12
github.com/jackc/pgpassfile
-# github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a
+# github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761
## explicit; go 1.14
github.com/jackc/pgservicefile
-# github.com/jackc/pgx/v5 v5.6.0
-## explicit; go 1.20
+# github.com/jackc/pgx/v5 v5.7.1
+## explicit; go 1.21
github.com/jackc/pgx/v5
github.com/jackc/pgx/v5/internal/iobufpool
github.com/jackc/pgx/v5/internal/pgio
@@ -430,7 +430,7 @@ github.com/jackc/pgx/v5/pgproto3
github.com/jackc/pgx/v5/pgtype
github.com/jackc/pgx/v5/pgxpool
github.com/jackc/pgx/v5/stdlib
-# github.com/jackc/puddle/v2 v2.2.1
+# github.com/jackc/puddle/v2 v2.2.2
## explicit; go 1.19
github.com/jackc/puddle/v2
github.com/jackc/puddle/v2/internal/genstack