summaryrefslogtreecommitdiff
path: root/vendor/github.com/jackc/pgx
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2022-09-28 18:30:40 +0100
committerLibravatar GitHub <noreply@github.com>2022-09-28 18:30:40 +0100
commita156188b3eb5cb3da44aa1b7452265f5fa38a607 (patch)
tree7097fa48d56fbabc7c2c8750b1f3bc9321d71c0f /vendor/github.com/jackc/pgx
parent[bugfix] Fix emphasis being added to emoji shortcodes with markdown parsing (... (diff)
downloadgotosocial-a156188b3eb5cb3da44aa1b7452265f5fa38a607.tar.xz
[chore] update dependencies, bump to Go 1.19.1 (#826)
* update dependencies, bump Go version to 1.19 * bump test image Go version * update golangci-lint * update gotosocial-drone-build * sign * linting, go fmt * update swagger docs * update swagger docs * whitespace * update contributing.md * fuckin whoopsie doopsie * linterino, linteroni * fix followrequest test not starting processor * fix other api/client tests not starting processor * fix remaining tests where processor not started * bump go-runners version * don't check last-webfingered-at, processor may have updated this * update swagger command * update bun to latest version * fix embed to work the same as before with new bun Signed-off-by: kim <grufwub@gmail.com> Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
Diffstat (limited to 'vendor/github.com/jackc/pgx')
-rw-r--r--vendor/github.com/jackc/pgx/v4/CHANGELOG.md35
-rw-r--r--vendor/github.com/jackc/pgx/v4/README.md33
-rw-r--r--vendor/github.com/jackc/pgx/v4/conn.go85
-rw-r--r--vendor/github.com/jackc/pgx/v4/copy_from.go4
-rw-r--r--vendor/github.com/jackc/pgx/v4/internal/sanitize/sanitize.go2
-rw-r--r--vendor/github.com/jackc/pgx/v4/large_objects.go12
-rw-r--r--vendor/github.com/jackc/pgx/v4/logger.go9
-rw-r--r--vendor/github.com/jackc/pgx/v4/rows.go5
-rw-r--r--vendor/github.com/jackc/pgx/v4/stdlib/sql.go6
-rw-r--r--vendor/github.com/jackc/pgx/v4/tx.go32
10 files changed, 127 insertions, 96 deletions
diff --git a/vendor/github.com/jackc/pgx/v4/CHANGELOG.md b/vendor/github.com/jackc/pgx/v4/CHANGELOG.md
index 4dd93b30e..e8f201295 100644
--- a/vendor/github.com/jackc/pgx/v4/CHANGELOG.md
+++ b/vendor/github.com/jackc/pgx/v4/CHANGELOG.md
@@ -1,3 +1,38 @@
+# 4.17.2 (September 3, 2022)
+
+* Fix panic when logging batch error (Tom Mòˆller)
+
+# 4.17.1 (August 27, 2022)
+
+* Upgrade puddle to v1.3.0 - fixes context failing to cancel Acquire when acquire is creating resource which was introduced in v4.17.0 (James Hartig)
+* Fix atomic alignment on 32-bit platforms
+
+# 4.17.0 (August 6, 2022)
+
+* Upgrade pgconn to v1.13.0
+* Upgrade pgproto3 to v2.3.1
+* Upgrade pgtype to v1.12.0
+* Allow background pool connections to continue even if cause is canceled (James Hartig)
+* Add LoggerFunc (Gabor Szabad)
+* pgxpool: health check should avoid going below minConns (James Hartig)
+* Add pgxpool.Conn.Hijack()
+* Logging improvements (Stepan Rabotkin)
+
+# 4.16.1 (May 7, 2022)
+
+* Upgrade pgconn to v1.12.1
+* Fix explicitly prepared statements with describe statement cache mode
+
+# 4.16.0 (April 21, 2022)
+
+* Upgrade pgconn to v1.12.0
+* Upgrade pgproto3 to v2.3.0
+* Upgrade pgtype to v1.11.0
+* Fix: Do not panic when context cancelled while getting statement from cache.
+* Fix: Less memory pinning from old Rows.
+* Fix: Support '\r' line ending when sanitizing SQL comment.
+* Add pluggable GSSAPI support (Oliver Tan)
+
# 4.15.0 (February 7, 2022)
* Upgrade to pgconn v1.11.0
diff --git a/vendor/github.com/jackc/pgx/v4/README.md b/vendor/github.com/jackc/pgx/v4/README.md
index 110d4f02e..16d8f46f7 100644
--- a/vendor/github.com/jackc/pgx/v4/README.md
+++ b/vendor/github.com/jackc/pgx/v4/README.md
@@ -1,6 +1,11 @@
[![](https://godoc.org/github.com/jackc/pgx?status.svg)](https://pkg.go.dev/github.com/jackc/pgx/v4)
[![Build Status](https://travis-ci.org/jackc/pgx.svg)](https://travis-ci.org/jackc/pgx)
+---
+
+This is the stable `v4` release. `v5` is now in beta testing with final release expected in September. See https://github.com/jackc/pgx/issues/1273 for more information. Please consider testing `v5`.
+
+---
# pgx - PostgreSQL Driver and Toolkit
pgx is a pure Go driver and toolkit for PostgreSQL.
@@ -98,26 +103,6 @@ There are three areas in particular where pgx can provide a significant performa
perform nearly 3x the number of queries per second.
3. Batched queries - Multiple queries can be batched together to minimize network round trips.
-## Comparison with Alternatives
-
-* [pq](http://godoc.org/github.com/lib/pq)
-* [go-pg](https://github.com/go-pg/pg)
-
-For prepared queries with small sets of simple data types, all drivers will have have similar performance. However, if prepared statements aren't being explicitly used, pgx can have a significant performance advantage due to automatic statement preparation.
-pgx also can perform better when using PostgreSQL-specific data types or query batching. See
-[go_db_bench](https://github.com/jackc/go_db_bench) for some database driver benchmarks.
-
-### Compatibility with `database/sql`
-
-pq is exclusively used with `database/sql`. go-pg does not use `database/sql` at all. pgx supports `database/sql` as well as
-its own interface.
-
-### Level of access, ORM
-
-go-pg is a PostgreSQL client and ORM. It includes many features that traditionally sit above the database driver, such as ORM, struct mapping, soft deletes, schema migrations, and sharding support.
-
-pgx is "closer to the metal" and such abstractions are beyond the scope of the pgx project, which first and foremost, aims to be a performant driver and toolkit.
-
## Testing
pgx tests naturally require a PostgreSQL database. It will connect to the database specified in the `PGX_TEST_DATABASE` environment
@@ -201,3 +186,11 @@ pgerrcode contains constants for the PostgreSQL error codes.
### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
Library for scanning data from a database into Go structs and more.
+
+### [https://github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
+
+Adds GSSAPI / Kerberos authentication support.
+
+### [https://github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
+
+Adds support for [`github.com/google/uuid`](https://github.com/google/uuid).
diff --git a/vendor/github.com/jackc/pgx/v4/conn.go b/vendor/github.com/jackc/pgx/v4/conn.go
index 102158ab5..854561e02 100644
--- a/vendor/github.com/jackc/pgx/v4/conn.go
+++ b/vendor/github.com/jackc/pgx/v4/conn.go
@@ -73,9 +73,8 @@ type Conn struct {
connInfo *pgtype.ConnInfo
- wbuf []byte
- preallocatedRows []connRows
- eqb extendedQueryBuilder
+ wbuf []byte
+ eqb extendedQueryBuilder
}
// Identifier a PostgreSQL identifier or name. Identifiers can be composed of
@@ -117,14 +116,14 @@ func ConnectConfig(ctx context.Context, connConfig *ConnConfig) (*Conn, error) {
// ParseConfig creates a ConnConfig from a connection string. ParseConfig handles all options that pgconn.ParseConfig
// does. In addition, it accepts the following options:
//
-// statement_cache_capacity
-// The maximum size of the automatic statement cache. Set to 0 to disable automatic statement caching. Default: 512.
+// statement_cache_capacity
+// The maximum size of the automatic statement cache. Set to 0 to disable automatic statement caching. Default: 512.
//
-// statement_cache_mode
-// Possible values: "prepare" and "describe". "prepare" will create prepared statements on the PostgreSQL server.
-// "describe" will use the anonymous prepared statement to describe a statement without creating a statement on the
-// server. "describe" is primarily useful when the environment does not allow prepared statements such as when
-// running a connection pooler like PgBouncer. Default: "prepare"
+// statement_cache_mode
+// Possible values: "prepare" and "describe". "prepare" will create prepared statements on the PostgreSQL server.
+// "describe" will use the anonymous prepared statement to describe a statement without creating a statement on the
+// server. "describe" is primarily useful when the environment does not allow prepared statements such as when
+// running a connection pooler like PgBouncer. Default: "prepare"
//
// prefer_simple_protocol
// Possible values: "true" and "false". Use the simple protocol instead of extended protocol. Default: false
@@ -366,30 +365,6 @@ func (c *Conn) Ping(ctx context.Context) error {
return err
}
-func connInfoFromRows(rows Rows, err error) (map[string]uint32, error) {
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- nameOIDs := make(map[string]uint32, 256)
- for rows.Next() {
- var oid uint32
- var name pgtype.Text
- if err = rows.Scan(&oid, &name); err != nil {
- return nil, err
- }
-
- nameOIDs[name.String] = oid
- }
-
- if err = rows.Err(); err != nil {
- return nil, err
- }
-
- return nameOIDs, err
-}
-
// PgConn returns the underlying *pgconn.PgConn. This is an escape hatch method that allows lower level access to the
// PostgreSQL connection than pgx exposes.
//
@@ -414,7 +389,8 @@ func (c *Conn) Exec(ctx context.Context, sql string, arguments ...interface{}) (
commandTag, err := c.exec(ctx, sql, arguments...)
if err != nil {
if c.shouldLog(LogLevelError) {
- c.log(ctx, LogLevelError, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "err": err})
+ endTime := time.Now()
+ c.log(ctx, LogLevelError, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "err": err, "time": endTime.Sub(startTime)})
}
return commandTag, err
}
@@ -537,12 +513,7 @@ func (c *Conn) execPrepared(ctx context.Context, sd *pgconn.StatementDescription
}
func (c *Conn) getRows(ctx context.Context, sql string, args []interface{}) *connRows {
- if len(c.preallocatedRows) == 0 {
- c.preallocatedRows = make([]connRows, 64)
- }
-
- r := &c.preallocatedRows[len(c.preallocatedRows)-1]
- c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1]
+ r := &connRows{}
r.ctx = ctx
r.logger = c
@@ -674,7 +645,7 @@ optionLoop:
resultFormats = c.eqb.resultFormats
}
- if c.stmtcache != nil && c.stmtcache.Mode() == stmtcache.ModeDescribe {
+ if c.stmtcache != nil && c.stmtcache.Mode() == stmtcache.ModeDescribe && !ok {
rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.paramValues, sd.ParamOIDs, c.eqb.paramFormats, resultFormats)
} else {
rows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.paramValues, c.eqb.paramFormats, resultFormats)
@@ -739,6 +710,8 @@ func (c *Conn) QueryFunc(ctx context.Context, sql string, args []interface{}, sc
// explicit transaction control statements are executed. The returned BatchResults must be closed before the connection
// is used again.
func (c *Conn) SendBatch(ctx context.Context, b *Batch) BatchResults {
+ startTime := time.Now()
+
simpleProtocol := c.config.PreferSimpleProtocol
var sb strings.Builder
if simpleProtocol {
@@ -797,24 +770,23 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) BatchResults {
var err error
sd, err = stmtCache.Get(ctx, bi.query)
if err != nil {
- // the stmtCache was prefilled from distinctUnpreparedQueries above so we are guaranteed no errors
- panic("BUG: unexpected error from stmtCache")
+ return c.logBatchResults(ctx, startTime, &batchResults{ctx: ctx, conn: c, err: err})
}
}
if len(sd.ParamOIDs) != len(bi.arguments) {
- return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("mismatched param and argument count")}
+ return c.logBatchResults(ctx, startTime, &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("mismatched param and argument count")})
}
args, err := convertDriverValuers(bi.arguments)
if err != nil {
- return &batchResults{ctx: ctx, conn: c, err: err}
+ return c.logBatchResults(ctx, startTime, &batchResults{ctx: ctx, conn: c, err: err})
}
for i := range args {
err = c.eqb.AppendParam(c.connInfo, sd.ParamOIDs[i], args[i])
if err != nil {
- return &batchResults{ctx: ctx, conn: c, err: err}
+ return c.logBatchResults(ctx, startTime, &batchResults{ctx: ctx, conn: c, err: err})
}
}
@@ -833,13 +805,30 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) BatchResults {
mrr := c.pgConn.ExecBatch(ctx, batch)
- return &batchResults{
+ return c.logBatchResults(ctx, startTime, &batchResults{
ctx: ctx,
conn: c,
mrr: mrr,
b: b,
ix: 0,
+ })
+}
+
+func (c *Conn) logBatchResults(ctx context.Context, startTime time.Time, results *batchResults) BatchResults {
+ if results.err != nil {
+ if c.shouldLog(LogLevelError) {
+ endTime := time.Now()
+ c.log(ctx, LogLevelError, "SendBatch", map[string]interface{}{"err": results.err, "time": endTime.Sub(startTime)})
+ }
+ return results
}
+
+ if c.shouldLog(LogLevelInfo) {
+ endTime := time.Now()
+ c.log(ctx, LogLevelInfo, "SendBatch", map[string]interface{}{"batchLen": results.b.Len(), "time": endTime.Sub(startTime)})
+ }
+
+ return results
}
func (c *Conn) sanitizeForSimpleQuery(sql string, args ...interface{}) (string, error) {
diff --git a/vendor/github.com/jackc/pgx/v4/copy_from.go b/vendor/github.com/jackc/pgx/v4/copy_from.go
index 3494e28f9..49139d050 100644
--- a/vendor/github.com/jackc/pgx/v4/copy_from.go
+++ b/vendor/github.com/jackc/pgx/v4/copy_from.go
@@ -153,13 +153,13 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
<-doneChan
rowsAffected := commandTag.RowsAffected()
+ endTime := time.Now()
if err == nil {
if ct.conn.shouldLog(LogLevelInfo) {
- endTime := time.Now()
ct.conn.log(ctx, LogLevelInfo, "CopyFrom", map[string]interface{}{"tableName": ct.tableName, "columnNames": ct.columnNames, "time": endTime.Sub(startTime), "rowCount": rowsAffected})
}
} else if ct.conn.shouldLog(LogLevelError) {
- ct.conn.log(ctx, LogLevelError, "CopyFrom", map[string]interface{}{"err": err, "tableName": ct.tableName, "columnNames": ct.columnNames})
+ ct.conn.log(ctx, LogLevelError, "CopyFrom", map[string]interface{}{"err": err, "tableName": ct.tableName, "columnNames": ct.columnNames, "time": endTime.Sub(startTime)})
}
return rowsAffected, err
diff --git a/vendor/github.com/jackc/pgx/v4/internal/sanitize/sanitize.go b/vendor/github.com/jackc/pgx/v4/internal/sanitize/sanitize.go
index 2dba3b810..a7a94e93e 100644
--- a/vendor/github.com/jackc/pgx/v4/internal/sanitize/sanitize.go
+++ b/vendor/github.com/jackc/pgx/v4/internal/sanitize/sanitize.go
@@ -246,7 +246,7 @@ func oneLineCommentState(l *sqlLexer) stateFn {
case '\\':
_, width = utf8.DecodeRuneInString(l.src[l.pos:])
l.pos += width
- case '\n':
+ case '\n', '\r':
return rawState
case utf8.RuneError:
if l.pos-l.start > 0 {
diff --git a/vendor/github.com/jackc/pgx/v4/large_objects.go b/vendor/github.com/jackc/pgx/v4/large_objects.go
index 5255a3b48..c238ab9c2 100644
--- a/vendor/github.com/jackc/pgx/v4/large_objects.go
+++ b/vendor/github.com/jackc/pgx/v4/large_objects.go
@@ -56,10 +56,10 @@ func (o *LargeObjects) Unlink(ctx context.Context, oid uint32) error {
// A LargeObject is a large object stored on the server. It is only valid within the transaction that it was initialized
// in. It uses the context it was initialized with for all operations. It implements these interfaces:
//
-// io.Writer
-// io.Reader
-// io.Seeker
-// io.Closer
+// io.Writer
+// io.Reader
+// io.Seeker
+// io.Closer
type LargeObject struct {
ctx context.Context
tx Tx
@@ -108,13 +108,13 @@ func (o *LargeObject) Tell() (n int64, err error) {
return n, err
}
-// Trunctes the large object to size.
+// Truncate the large object to size.
func (o *LargeObject) Truncate(size int64) (err error) {
_, err = o.tx.Exec(o.ctx, "select lo_truncate64($1, $2)", o.fd, size)
return err
}
-// Close closees the large object descriptor.
+// Close the large object descriptor.
func (o *LargeObject) Close() error {
_, err := o.tx.Exec(o.ctx, "select lo_close($1)", o.fd)
return err
diff --git a/vendor/github.com/jackc/pgx/v4/logger.go b/vendor/github.com/jackc/pgx/v4/logger.go
index 89fd5af51..41f8b7e87 100644
--- a/vendor/github.com/jackc/pgx/v4/logger.go
+++ b/vendor/github.com/jackc/pgx/v4/logger.go
@@ -47,9 +47,18 @@ type Logger interface {
Log(ctx context.Context, level LogLevel, msg string, data map[string]interface{})
}
+// LoggerFunc is a wrapper around a function to satisfy the pgx.Logger interface
+type LoggerFunc func(ctx context.Context, level LogLevel, msg string, data map[string]interface{})
+
+// Log delegates the logging request to the wrapped function
+func (f LoggerFunc) Log(ctx context.Context, level LogLevel, msg string, data map[string]interface{}) {
+ f(ctx, level, msg, data)
+}
+
// LogLevelFromString converts log level string to constant
//
// Valid levels:
+//
// trace
// debug
// info
diff --git a/vendor/github.com/jackc/pgx/v4/rows.go b/vendor/github.com/jackc/pgx/v4/rows.go
index 271c6e527..4749ead99 100644
--- a/vendor/github.com/jackc/pgx/v4/rows.go
+++ b/vendor/github.com/jackc/pgx/v4/rows.go
@@ -143,14 +143,15 @@ func (rows *connRows) Close() {
}
if rows.logger != nil {
+ endTime := time.Now()
+
if rows.err == nil {
if rows.logger.shouldLog(LogLevelInfo) {
- endTime := time.Now()
rows.logger.log(rows.ctx, LogLevelInfo, "Query", map[string]interface{}{"sql": rows.sql, "args": logQueryArgs(rows.args), "time": endTime.Sub(rows.startTime), "rowCount": rows.rowCount})
}
} else {
if rows.logger.shouldLog(LogLevelError) {
- rows.logger.log(rows.ctx, LogLevelError, "Query", map[string]interface{}{"err": rows.err, "sql": rows.sql, "args": logQueryArgs(rows.args)})
+ rows.logger.log(rows.ctx, LogLevelError, "Query", map[string]interface{}{"err": rows.err, "sql": rows.sql, "time": endTime.Sub(rows.startTime), "args": logQueryArgs(rows.args)})
}
if rows.err != nil && rows.conn.stmtcache != nil {
rows.conn.stmtcache.StatementErrored(rows.sql, rows.err)
diff --git a/vendor/github.com/jackc/pgx/v4/stdlib/sql.go b/vendor/github.com/jackc/pgx/v4/stdlib/sql.go
index fa81e73d5..da377ecee 100644
--- a/vendor/github.com/jackc/pgx/v4/stdlib/sql.go
+++ b/vendor/github.com/jackc/pgx/v4/stdlib/sql.go
@@ -163,7 +163,7 @@ func RandomizeHostOrderFunc(ctx context.Context, connConfig *pgx.ConnConfig) err
return nil
}
-func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
+func GetConnector(config pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector {
c := connector{
ConnConfig: config,
BeforeConnect: func(context.Context, *pgx.ConnConfig) error { return nil }, // noop before connect by default
@@ -175,7 +175,11 @@ func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
for _, opt := range opts {
opt(&c)
}
+ return c
+}
+func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
+ c := GetConnector(config, opts...)
return sql.OpenDB(c)
}
diff --git a/vendor/github.com/jackc/pgx/v4/tx.go b/vendor/github.com/jackc/pgx/v4/tx.go
index 1971ed673..2914ada7d 100644
--- a/vendor/github.com/jackc/pgx/v4/tx.go
+++ b/vendor/github.com/jackc/pgx/v4/tx.go
@@ -192,7 +192,7 @@ func (tx *dbTx) Begin(ctx context.Context) (Tx, error) {
return nil, err
}
- return &dbSavepoint{tx: tx, savepointNum: tx.savepointNum}, nil
+ return &dbSimulatedNestedTx{tx: tx, savepointNum: tx.savepointNum}, nil
}
func (tx *dbTx) BeginFunc(ctx context.Context, f func(Tx) error) (err error) {
@@ -329,15 +329,15 @@ func (tx *dbTx) Conn() *Conn {
return tx.conn
}
-// dbSavepoint represents a nested transaction implemented by a savepoint.
-type dbSavepoint struct {
+// dbSimulatedNestedTx represents a simulated nested transaction implemented by a savepoint.
+type dbSimulatedNestedTx struct {
tx Tx
savepointNum int64
closed bool
}
// Begin starts a pseudo nested transaction implemented with a savepoint.
-func (sp *dbSavepoint) Begin(ctx context.Context) (Tx, error) {
+func (sp *dbSimulatedNestedTx) Begin(ctx context.Context) (Tx, error) {
if sp.closed {
return nil, ErrTxClosed
}
@@ -345,7 +345,7 @@ func (sp *dbSavepoint) Begin(ctx context.Context) (Tx, error) {
return sp.tx.Begin(ctx)
}
-func (sp *dbSavepoint) BeginFunc(ctx context.Context, f func(Tx) error) (err error) {
+func (sp *dbSimulatedNestedTx) BeginFunc(ctx context.Context, f func(Tx) error) (err error) {
if sp.closed {
return ErrTxClosed
}
@@ -354,7 +354,7 @@ func (sp *dbSavepoint) BeginFunc(ctx context.Context, f func(Tx) error) (err err
}
// Commit releases the savepoint essentially committing the pseudo nested transaction.
-func (sp *dbSavepoint) Commit(ctx context.Context) error {
+func (sp *dbSimulatedNestedTx) Commit(ctx context.Context) error {
if sp.closed {
return ErrTxClosed
}
@@ -367,7 +367,7 @@ func (sp *dbSavepoint) Commit(ctx context.Context) error {
// Rollback rolls back to the savepoint essentially rolling back the pseudo nested transaction. Rollback will return
// ErrTxClosed if the dbSavepoint is already closed, but is otherwise safe to call multiple times. Hence, a defer sp.Rollback()
// is safe even if sp.Commit() will be called first in a non-error condition.
-func (sp *dbSavepoint) Rollback(ctx context.Context) error {
+func (sp *dbSimulatedNestedTx) Rollback(ctx context.Context) error {
if sp.closed {
return ErrTxClosed
}
@@ -378,7 +378,7 @@ func (sp *dbSavepoint) Rollback(ctx context.Context) error {
}
// Exec delegates to the underlying Tx
-func (sp *dbSavepoint) Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) {
+func (sp *dbSimulatedNestedTx) Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) {
if sp.closed {
return nil, ErrTxClosed
}
@@ -387,7 +387,7 @@ func (sp *dbSavepoint) Exec(ctx context.Context, sql string, arguments ...interf
}
// Prepare delegates to the underlying Tx
-func (sp *dbSavepoint) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
+func (sp *dbSimulatedNestedTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
if sp.closed {
return nil, ErrTxClosed
}
@@ -396,7 +396,7 @@ func (sp *dbSavepoint) Prepare(ctx context.Context, name, sql string) (*pgconn.S
}
// Query delegates to the underlying Tx
-func (sp *dbSavepoint) Query(ctx context.Context, sql string, args ...interface{}) (Rows, error) {
+func (sp *dbSimulatedNestedTx) Query(ctx context.Context, sql string, args ...interface{}) (Rows, error) {
if sp.closed {
// Because checking for errors can be deferred to the *Rows, build one with the error
err := ErrTxClosed
@@ -407,13 +407,13 @@ func (sp *dbSavepoint) Query(ctx context.Context, sql string, args ...interface{
}
// QueryRow delegates to the underlying Tx
-func (sp *dbSavepoint) QueryRow(ctx context.Context, sql string, args ...interface{}) Row {
+func (sp *dbSimulatedNestedTx) QueryRow(ctx context.Context, sql string, args ...interface{}) Row {
rows, _ := sp.Query(ctx, sql, args...)
return (*connRow)(rows.(*connRows))
}
// QueryFunc delegates to the underlying Tx.
-func (sp *dbSavepoint) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(QueryFuncRow) error) (pgconn.CommandTag, error) {
+func (sp *dbSimulatedNestedTx) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(QueryFuncRow) error) (pgconn.CommandTag, error) {
if sp.closed {
return nil, ErrTxClosed
}
@@ -422,7 +422,7 @@ func (sp *dbSavepoint) QueryFunc(ctx context.Context, sql string, args []interfa
}
// CopyFrom delegates to the underlying *Conn
-func (sp *dbSavepoint) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
+func (sp *dbSimulatedNestedTx) CopyFrom(ctx context.Context, tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int64, error) {
if sp.closed {
return 0, ErrTxClosed
}
@@ -431,7 +431,7 @@ func (sp *dbSavepoint) CopyFrom(ctx context.Context, tableName Identifier, colum
}
// SendBatch delegates to the underlying *Conn
-func (sp *dbSavepoint) SendBatch(ctx context.Context, b *Batch) BatchResults {
+func (sp *dbSimulatedNestedTx) SendBatch(ctx context.Context, b *Batch) BatchResults {
if sp.closed {
return &batchResults{err: ErrTxClosed}
}
@@ -439,10 +439,10 @@ func (sp *dbSavepoint) SendBatch(ctx context.Context, b *Batch) BatchResults {
return sp.tx.SendBatch(ctx, b)
}
-func (sp *dbSavepoint) LargeObjects() LargeObjects {
+func (sp *dbSimulatedNestedTx) LargeObjects() LargeObjects {
return LargeObjects{tx: sp}
}
-func (sp *dbSavepoint) Conn() *Conn {
+func (sp *dbSimulatedNestedTx) Conn() *Conn {
return sp.tx.Conn()
}