diff options
Diffstat (limited to 'vendor')
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/CHANGELOG.md | 10 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md | 18 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/batch.go | 42 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/conn.go | 76 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go | 14 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go | 9 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go | 12 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/large_objects.go | 81 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/pgtype/ltree.go | 122 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go | 2 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/pgtype/uuid.go | 14 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/rows.go | 11 | ||||
| -rw-r--r-- | vendor/github.com/jackc/pgx/v5/values.go | 6 | ||||
| -rw-r--r-- | vendor/modules.txt | 2 | 
14 files changed, 307 insertions, 112 deletions
| diff --git a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md index 6469c183b..4fcbc2473 100644 --- a/vendor/github.com/jackc/pgx/v5/CHANGELOG.md +++ b/vendor/github.com/jackc/pgx/v5/CHANGELOG.md @@ -1,3 +1,13 @@ +# 5.5.3 (February 3, 2024) + +* Fix: prepared statement already exists +* Improve CopyFrom auto-conversion of text-ish values +* Add ltree type support (Florent Viel) +* Make some properties of Batch and QueuedQuery public (Pavlo Golub) +* Add AppendRows function (Edoardo Spadolini) +* Optimize convert UUID [16]byte to string (Kirill Malikov) +* Fix: LargeObject Read and Write of more than ~1GB at a time (Mitar) +  # 5.5.2 (January 13, 2024)  * Allow NamedArgs to start with underscore diff --git a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md index 3eb0da5b9..6ed3205ce 100644 --- a/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md +++ b/vendor/github.com/jackc/pgx/v5/CONTRIBUTING.md @@ -79,20 +79,11 @@ echo "listen_addresses = '127.0.0.1'" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql  echo "port = $PGPORT" >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf  cat testsetup/postgresql_ssl.conf >> .testdb/$POSTGRESQL_DATA_DIR/postgresql.conf  cp testsetup/pg_hba.conf .testdb/$POSTGRESQL_DATA_DIR/pg_hba.conf -cp testsetup/ca.cnf .testdb -cp testsetup/localhost.cnf .testdb -cp testsetup/pgx_sslcert.cnf .testdb  cd .testdb -# Generate a CA public / private key pair. -openssl genrsa -out ca.key 4096 -openssl req -x509 -config ca.cnf -new -nodes -key ca.key -sha256 -days 365 -subj '/O=pgx-test-root' -out ca.pem - -# Generate the certificate for localhost (the server). -openssl genrsa -out localhost.key 2048 -openssl req -new -config localhost.cnf -key localhost.key -out localhost.csr -openssl x509 -req -in localhost.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out localhost.crt -days 364 -sha256 -extfile localhost.cnf -extensions v3_req +# Generate CA, server, and encrypted client certificates. +go run ../testsetup/generate_certs.go  # Copy certificates to server directory and set permissions.  cp ca.pem $POSTGRESQL_DATA_DIR/root.crt @@ -100,11 +91,6 @@ cp localhost.key $POSTGRESQL_DATA_DIR/server.key  chmod 600 $POSTGRESQL_DATA_DIR/server.key  cp localhost.crt $POSTGRESQL_DATA_DIR/server.crt -# Generate the certificate for client authentication. -openssl genrsa -des3 -out pgx_sslcert.key -passout pass:certpw 2048 -openssl req -new -config pgx_sslcert.cnf -key pgx_sslcert.key -passin pass:certpw -out pgx_sslcert.csr -openssl x509 -req -in pgx_sslcert.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out pgx_sslcert.crt -days 363 -sha256 -extfile pgx_sslcert.cnf -extensions v3_req -  cd ..  ``` diff --git a/vendor/github.com/jackc/pgx/v5/batch.go b/vendor/github.com/jackc/pgx/v5/batch.go index 9b943621e..b9b46d1d7 100644 --- a/vendor/github.com/jackc/pgx/v5/batch.go +++ b/vendor/github.com/jackc/pgx/v5/batch.go @@ -10,8 +10,8 @@ import (  // QueuedQuery is a query that has been queued for execution via a Batch.  type QueuedQuery struct { -	query     string -	arguments []any +	SQL       string +	Arguments []any  	fn        batchItemFunc  	sd        *pgconn.StatementDescription  } @@ -57,7 +57,7 @@ func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {  // Batch queries are a way of bundling multiple queries together to avoid  // unnecessary network round trips. A Batch must only be sent once.  type Batch struct { -	queuedQueries []*QueuedQuery +	QueuedQueries []*QueuedQuery  }  // Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. @@ -65,16 +65,16 @@ type Batch struct {  // connection's DefaultQueryExecMode.  func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {  	qq := &QueuedQuery{ -		query:     query, -		arguments: arguments, +		SQL:       query, +		Arguments: arguments,  	} -	b.queuedQueries = append(b.queuedQueries, qq) +	b.QueuedQueries = append(b.QueuedQueries, qq)  	return qq  }  // Len returns number of queries that have been queued so far.  func (b *Batch) Len() int { -	return len(b.queuedQueries) +	return len(b.QueuedQueries)  }  type BatchResults interface { @@ -227,9 +227,9 @@ func (br *batchResults) Close() error {  	}  	// Read and run fn for all remaining items -	for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.queuedQueries) { -		if br.b.queuedQueries[br.qqIdx].fn != nil { -			err := br.b.queuedQueries[br.qqIdx].fn(br) +	for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { +		if br.b.QueuedQueries[br.qqIdx].fn != nil { +			err := br.b.QueuedQueries[br.qqIdx].fn(br)  			if err != nil {  				br.err = err  			} @@ -253,10 +253,10 @@ func (br *batchResults) earlyError() error {  }  func (br *batchResults) nextQueryAndArgs() (query string, args []any, ok bool) { -	if br.b != nil && br.qqIdx < len(br.b.queuedQueries) { -		bi := br.b.queuedQueries[br.qqIdx] -		query = bi.query -		args = bi.arguments +	if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { +		bi := br.b.QueuedQueries[br.qqIdx] +		query = bi.SQL +		args = bi.Arguments  		ok = true  		br.qqIdx++  	} @@ -396,9 +396,9 @@ func (br *pipelineBatchResults) Close() error {  	}  	// Read and run fn for all remaining items -	for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.queuedQueries) { -		if br.b.queuedQueries[br.qqIdx].fn != nil { -			err := br.b.queuedQueries[br.qqIdx].fn(br) +	for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { +		if br.b.QueuedQueries[br.qqIdx].fn != nil { +			err := br.b.QueuedQueries[br.qqIdx].fn(br)  			if err != nil {  				br.err = err  			} @@ -422,10 +422,10 @@ func (br *pipelineBatchResults) earlyError() error {  }  func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, ok bool) { -	if br.b != nil && br.qqIdx < len(br.b.queuedQueries) { -		bi := br.b.queuedQueries[br.qqIdx] -		query = bi.query -		args = bi.arguments +	if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) { +		bi := br.b.QueuedQueries[br.qqIdx] +		query = bi.SQL +		args = bi.Arguments  		ok = true  		br.qqIdx++  	} diff --git a/vendor/github.com/jackc/pgx/v5/conn.go b/vendor/github.com/jackc/pgx/v5/conn.go index 64ae48cab..a7a5ef73d 100644 --- a/vendor/github.com/jackc/pgx/v5/conn.go +++ b/vendor/github.com/jackc/pgx/v5/conn.go @@ -903,10 +903,10 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {  		return &batchResults{ctx: ctx, conn: c, err: err}  	} -	for _, bi := range b.queuedQueries { +	for _, bi := range b.QueuedQueries {  		var queryRewriter QueryRewriter -		sql := bi.query -		arguments := bi.arguments +		sql := bi.SQL +		arguments := bi.Arguments  	optionLoop:  		for len(arguments) > 0 { @@ -928,8 +928,8 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {  			}  		} -		bi.query = sql -		bi.arguments = arguments +		bi.SQL = sql +		bi.Arguments = arguments  	}  	// TODO: changing mode per batch? Update Batch.Queue function comment when implemented @@ -939,8 +939,8 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {  	}  	// All other modes use extended protocol and thus can use prepared statements. -	for _, bi := range b.queuedQueries { -		if sd, ok := c.preparedStatements[bi.query]; ok { +	for _, bi := range b.QueuedQueries { +		if sd, ok := c.preparedStatements[bi.SQL]; ok {  			bi.sd = sd  		}  	} @@ -961,11 +961,11 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {  func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batch) *batchResults {  	var sb strings.Builder -	for i, bi := range b.queuedQueries { +	for i, bi := range b.QueuedQueries {  		if i > 0 {  			sb.WriteByte(';')  		} -		sql, err := c.sanitizeForSimpleQuery(bi.query, bi.arguments...) +		sql, err := c.sanitizeForSimpleQuery(bi.SQL, bi.Arguments...)  		if err != nil {  			return &batchResults{ctx: ctx, conn: c, err: err}  		} @@ -984,21 +984,21 @@ func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batc  func (c *Conn) sendBatchQueryExecModeExec(ctx context.Context, b *Batch) *batchResults {  	batch := &pgconn.Batch{} -	for _, bi := range b.queuedQueries { +	for _, bi := range b.QueuedQueries {  		sd := bi.sd  		if sd != nil { -			err := c.eqb.Build(c.typeMap, sd, bi.arguments) +			err := c.eqb.Build(c.typeMap, sd, bi.Arguments)  			if err != nil {  				return &batchResults{ctx: ctx, conn: c, err: err}  			}  			batch.ExecPrepared(sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)  		} else { -			err := c.eqb.Build(c.typeMap, nil, bi.arguments) +			err := c.eqb.Build(c.typeMap, nil, bi.Arguments)  			if err != nil {  				return &batchResults{ctx: ctx, conn: c, err: err}  			} -			batch.ExecParams(bi.query, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats) +			batch.ExecParams(bi.SQL, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)  		}  	} @@ -1023,18 +1023,18 @@ func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batc  	distinctNewQueries := []*pgconn.StatementDescription{}  	distinctNewQueriesIdxMap := make(map[string]int) -	for _, bi := range b.queuedQueries { +	for _, bi := range b.QueuedQueries {  		if bi.sd == nil { -			sd := c.statementCache.Get(bi.query) +			sd := c.statementCache.Get(bi.SQL)  			if sd != nil {  				bi.sd = sd  			} else { -				if idx, present := distinctNewQueriesIdxMap[bi.query]; present { +				if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {  					bi.sd = distinctNewQueries[idx]  				} else {  					sd = &pgconn.StatementDescription{ -						Name: stmtcache.StatementName(bi.query), -						SQL:  bi.query, +						Name: stmtcache.StatementName(bi.SQL), +						SQL:  bi.SQL,  					}  					distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)  					distinctNewQueries = append(distinctNewQueries, sd) @@ -1055,17 +1055,17 @@ func (c *Conn) sendBatchQueryExecModeCacheDescribe(ctx context.Context, b *Batch  	distinctNewQueries := []*pgconn.StatementDescription{}  	distinctNewQueriesIdxMap := make(map[string]int) -	for _, bi := range b.queuedQueries { +	for _, bi := range b.QueuedQueries {  		if bi.sd == nil { -			sd := c.descriptionCache.Get(bi.query) +			sd := c.descriptionCache.Get(bi.SQL)  			if sd != nil {  				bi.sd = sd  			} else { -				if idx, present := distinctNewQueriesIdxMap[bi.query]; present { +				if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {  					bi.sd = distinctNewQueries[idx]  				} else {  					sd = &pgconn.StatementDescription{ -						SQL: bi.query, +						SQL: bi.SQL,  					}  					distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)  					distinctNewQueries = append(distinctNewQueries, sd) @@ -1082,13 +1082,13 @@ func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch)  	distinctNewQueries := []*pgconn.StatementDescription{}  	distinctNewQueriesIdxMap := make(map[string]int) -	for _, bi := range b.queuedQueries { +	for _, bi := range b.QueuedQueries {  		if bi.sd == nil { -			if idx, present := distinctNewQueriesIdxMap[bi.query]; present { +			if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {  				bi.sd = distinctNewQueries[idx]  			} else {  				sd := &pgconn.StatementDescription{ -					SQL: bi.query, +					SQL: bi.SQL,  				}  				distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)  				distinctNewQueries = append(distinctNewQueries, sd) @@ -1154,11 +1154,11 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d  	}  	// Queue the queries. -	for _, bi := range b.queuedQueries { -		err := c.eqb.Build(c.typeMap, bi.sd, bi.arguments) +	for _, bi := range b.QueuedQueries { +		err := c.eqb.Build(c.typeMap, bi.sd, bi.Arguments)  		if err != nil {  			// we wrap the error so we the user can understand which query failed inside the batch -			err = fmt.Errorf("error building query %s: %w", bi.query, err) +			err = fmt.Errorf("error building query %s: %w", bi.SQL, err)  			return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}  		} @@ -1203,7 +1203,15 @@ func (c *Conn) sanitizeForSimpleQuery(sql string, args ...any) (string, error) {  	return sanitize.SanitizeSQL(sql, valueArgs...)  } -// LoadType inspects the database for typeName and produces a pgtype.Type suitable for registration. +// LoadType inspects the database for typeName and produces a pgtype.Type suitable for registration. typeName must be +// the name of a type where the underlying type(s) is already understood by pgx. It is for derived types. In particular, +// typeName must be one of the following: +//   - An array type name of a type that is already registered. e.g. "_foo" when "foo" is registered. +//   - A composite type name where all field types are already registered. +//   - A domain type name where the base type is already registered. +//   - An enum type name. +//   - A range type name where the element type is already registered. +//   - A multirange type name where the element type is already registered.  func (c *Conn) LoadType(ctx context.Context, typeName string) (*pgtype.Type, error) {  	var oid uint32 @@ -1351,12 +1359,12 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error  	}  	if c.descriptionCache != nil { -		c.descriptionCache.HandleInvalidated() +		c.descriptionCache.RemoveInvalidated()  	}  	var invalidatedStatements []*pgconn.StatementDescription  	if c.statementCache != nil { -		invalidatedStatements = c.statementCache.HandleInvalidated() +		invalidatedStatements = c.statementCache.GetInvalidated()  	}  	if len(invalidatedStatements) == 0 { @@ -1368,7 +1376,6 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error  	for _, sd := range invalidatedStatements {  		pipeline.SendDeallocate(sd.Name) -		delete(c.preparedStatements, sd.Name)  	}  	err := pipeline.Sync() @@ -1381,5 +1388,10 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error  		return fmt.Errorf("failed to deallocate cached statement(s): %w", err)  	} +	c.statementCache.RemoveInvalidated() +	for _, sd := range invalidatedStatements { +		delete(c.preparedStatements, sd.Name) +	} +  	return nil  } diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go index 859345fcb..dec83f47b 100644 --- a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go +++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go @@ -81,12 +81,16 @@ func (c *LRUCache) InvalidateAll() {  	c.l = list.New()  } -// HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated. -// Typically, the caller will then deallocate them. -func (c *LRUCache) HandleInvalidated() []*pgconn.StatementDescription { -	invalidStmts := c.invalidStmts +// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated. +func (c *LRUCache) GetInvalidated() []*pgconn.StatementDescription { +	return c.invalidStmts +} + +// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a +// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were +// never seen by the call to GetInvalidated. +func (c *LRUCache) RemoveInvalidated() {  	c.invalidStmts = nil -	return invalidStmts  }  // Len returns the number of cached prepared statement descriptions. diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go index b2940e230..d57bdd29e 100644 --- a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go +++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go @@ -29,8 +29,13 @@ type Cache interface {  	// InvalidateAll invalidates all statement descriptions.  	InvalidateAll() -	// HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated. -	HandleInvalidated() []*pgconn.StatementDescription +	// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated. +	GetInvalidated() []*pgconn.StatementDescription + +	// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a +	// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were +	// never seen by the call to GetInvalidated. +	RemoveInvalidated()  	// Len returns the number of cached prepared statement descriptions.  	Len() int diff --git a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go index f5f59396e..696413291 100644 --- a/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go +++ b/vendor/github.com/jackc/pgx/v5/internal/stmtcache/unlimited_cache.go @@ -54,10 +54,16 @@ func (c *UnlimitedCache) InvalidateAll() {  	c.m = make(map[string]*pgconn.StatementDescription)  } -func (c *UnlimitedCache) HandleInvalidated() []*pgconn.StatementDescription { -	invalidStmts := c.invalidStmts +// GetInvalidated returns a slice of all statement descriptions invalidated since the last call to RemoveInvalidated. +func (c *UnlimitedCache) GetInvalidated() []*pgconn.StatementDescription { +	return c.invalidStmts +} + +// RemoveInvalidated removes all invalidated statement descriptions. No other calls to Cache must be made between a +// call to GetInvalidated and RemoveInvalidated or RemoveInvalidated may remove statement descriptions that were +// never seen by the call to GetInvalidated. +func (c *UnlimitedCache) RemoveInvalidated() {  	c.invalidStmts = nil -	return invalidStmts  }  // Len returns the number of cached prepared statement descriptions. diff --git a/vendor/github.com/jackc/pgx/v5/large_objects.go b/vendor/github.com/jackc/pgx/v5/large_objects.go index 676667450..a3028b638 100644 --- a/vendor/github.com/jackc/pgx/v5/large_objects.go +++ b/vendor/github.com/jackc/pgx/v5/large_objects.go @@ -6,6 +6,11 @@ import (  	"io"  ) +// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of +// PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data +// in the message,maxLargeObjectMessageLength should be no larger than 1 GB - 1 KB. +var maxLargeObjectMessageLength = 1024*1024*1024 - 1024 +  // LargeObjects is a structure used to access the large objects API. It is only valid within the transaction where it  // was created.  // @@ -67,41 +72,65 @@ type LargeObject struct {  }  // Write writes p to the large object and returns the number of bytes written and an error if not all of p was written. -// -// Write is implemented with a single call to lowrite. The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. -// See definition of PQ_LARGE_MESSAGE_LIMIT in the PostgreSQL source code. To allow for the other data in the message, -// len(p) should be no larger than 1 GB - 1 KB.  func (o *LargeObject) Write(p []byte) (int, error) { -	var n int -	err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p).Scan(&n) -	if err != nil { -		return n, err -	} - -	if n < 0 { -		return 0, errors.New("failed to write to large object") +	nTotal := 0 +	for { +		expected := len(p) - nTotal +		if expected == 0 { +			break +		} else if expected > maxLargeObjectMessageLength { +			expected = maxLargeObjectMessageLength +		} + +		var n int +		err := o.tx.QueryRow(o.ctx, "select lowrite($1, $2)", o.fd, p[nTotal:nTotal+expected]).Scan(&n) +		if err != nil { +			return nTotal, err +		} + +		if n < 0 { +			return nTotal, errors.New("failed to write to large object") +		} + +		nTotal += n + +		if n < expected { +			return nTotal, errors.New("short write to large object") +		} else if n > expected { +			return nTotal, errors.New("invalid write to large object") +		}  	} -	return n, nil +	return nTotal, nil  }  // Read reads up to len(p) bytes into p returning the number of bytes read. -// -// Read is implemented with a single call to loread. PostgreSQL internally allocates a single buffer for the response. -// The largest buffer PostgreSQL will allocate is 1 GB - 1. See definition of MaxAllocSize in the PostgreSQL source -// code. To allow for the other data in the message, len(p) should be no larger than 1 GB - 1 KB.  func (o *LargeObject) Read(p []byte) (int, error) { -	var res []byte -	err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, len(p)).Scan(&res) -	copy(p, res) -	if err != nil { -		return len(res), err +	nTotal := 0 +	for { +		expected := len(p) - nTotal +		if expected == 0 { +			break +		} else if expected > maxLargeObjectMessageLength { +			expected = maxLargeObjectMessageLength +		} + +		var res []byte +		err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res) +		copy(p[nTotal:], res) +		nTotal += len(res) +		if err != nil { +			return nTotal, err +		} + +		if len(res) < expected { +			return nTotal, io.EOF +		} else if len(res) > expected { +			return nTotal, errors.New("invalid read of large object") +		}  	} -	if len(res) < len(p) { -		err = io.EOF -	} -	return len(res), err +	return nTotal, nil  }  // Seek moves the current location pointer to the new location specified by offset. diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go new file mode 100644 index 000000000..6af317794 --- /dev/null +++ b/vendor/github.com/jackc/pgx/v5/pgtype/ltree.go @@ -0,0 +1,122 @@ +package pgtype + +import ( +	"database/sql/driver" +	"fmt" +) + +type LtreeCodec struct{} + +func (l LtreeCodec) FormatSupported(format int16) bool { +	return format == TextFormatCode || format == BinaryFormatCode +} + +// PreferredFormat returns the preferred format. +func (l LtreeCodec) PreferredFormat() int16 { +	return TextFormatCode +} + +// PlanEncode returns an EncodePlan for encoding value into PostgreSQL format for oid and format. If no plan can be +// found then nil is returned. +func (l LtreeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan { +	switch format { +	case TextFormatCode: +		return (TextCodec)(l).PlanEncode(m, oid, format, value) +	case BinaryFormatCode: +		switch value.(type) { +		case string: +			return encodeLtreeCodecBinaryString{} +		case []byte: +			return encodeLtreeCodecBinaryByteSlice{} +		case TextValuer: +			return encodeLtreeCodecBinaryTextValuer{} +		} +	} + +	return nil +} + +type encodeLtreeCodecBinaryString struct{} + +func (encodeLtreeCodecBinaryString) Encode(value any, buf []byte) (newBuf []byte, err error) { +	ltree := value.(string) +	buf = append(buf, 1) +	return append(buf, ltree...), nil +} + +type encodeLtreeCodecBinaryByteSlice struct{} + +func (encodeLtreeCodecBinaryByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) { +	ltree := value.([]byte) +	buf = append(buf, 1) +	return append(buf, ltree...), nil +} + +type encodeLtreeCodecBinaryTextValuer struct{} + +func (encodeLtreeCodecBinaryTextValuer) Encode(value any, buf []byte) (newBuf []byte, err error) { +	t, err := value.(TextValuer).TextValue() +	if err != nil { +		return nil, err +	} +	if !t.Valid { +		return nil, nil +	} + +	buf = append(buf, 1) +	return append(buf, t.String...), nil +} + +// PlanScan returns a ScanPlan for scanning a PostgreSQL value into a destination with the same type as target. If +// no plan can be found then nil is returned. +func (l LtreeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan { +	switch format { +	case TextFormatCode: +		return (TextCodec)(l).PlanScan(m, oid, format, target) +	case BinaryFormatCode: +		switch target.(type) { +		case *string: +			return scanPlanBinaryLtreeToString{} +		case TextScanner: +			return scanPlanBinaryLtreeToTextScanner{} +		} +	} + +	return nil +} + +type scanPlanBinaryLtreeToString struct{} + +func (scanPlanBinaryLtreeToString) Scan(src []byte, target any) error { +	version := src[0] +	if version != 1 { +		return fmt.Errorf("unsupported ltree version %d", version) +	} + +	p := (target).(*string) +	*p = string(src[1:]) + +	return nil +} + +type scanPlanBinaryLtreeToTextScanner struct{} + +func (scanPlanBinaryLtreeToTextScanner) Scan(src []byte, target any) error { +	version := src[0] +	if version != 1 { +		return fmt.Errorf("unsupported ltree version %d", version) +	} + +	scanner := (target).(TextScanner) +	return scanner.ScanText(Text{String: string(src[1:]), Valid: true}) +} + +// DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface. +func (l LtreeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) { +	return (TextCodec)(l).DecodeDatabaseSQLValue(m, oid, format, src) +} + +// DecodeValue returns src decoded into its default format. +func (l LtreeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) { +	return (TextCodec)(l).DecodeValue(m, oid, format, src) +} diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go index 4c2532d23..08833f876 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go @@ -81,6 +81,8 @@ const (  	IntervalOID            = 1186  	IntervalArrayOID       = 1187  	NumericArrayOID        = 1231 +	TimetzOID              = 1266 +	TimetzArrayOID         = 1270  	BitOID                 = 1560  	BitArrayOID            = 1561  	VarbitOID              = 1562 diff --git a/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go index b59d6e766..d57c0f2fa 100644 --- a/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go +++ b/vendor/github.com/jackc/pgx/v5/pgtype/uuid.go @@ -52,7 +52,19 @@ func parseUUID(src string) (dst [16]byte, err error) {  // encodeUUID converts a uuid byte array to UUID standard string form.  func encodeUUID(src [16]byte) string { -	return fmt.Sprintf("%x-%x-%x-%x-%x", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16]) +	var buf [36]byte + +	hex.Encode(buf[0:8], src[:4]) +	buf[8] = '-' +	hex.Encode(buf[9:13], src[4:6]) +	buf[13] = '-' +	hex.Encode(buf[14:18], src[6:8]) +	buf[18] = '-' +	hex.Encode(buf[19:23], src[8:10]) +	buf[23] = '-' +	hex.Encode(buf[24:], src[10:]) + +	return string(buf[:])  }  // Scan implements the database/sql Scanner interface. diff --git a/vendor/github.com/jackc/pgx/v5/rows.go b/vendor/github.com/jackc/pgx/v5/rows.go index 1ad917655..17e36cba2 100644 --- a/vendor/github.com/jackc/pgx/v5/rows.go +++ b/vendor/github.com/jackc/pgx/v5/rows.go @@ -417,12 +417,10 @@ type CollectableRow interface {  // RowToFunc is a function that scans or otherwise converts row to a T.  type RowToFunc[T any] func(row CollectableRow) (T, error) -// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T. -func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) { +// AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T. +func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) {  	defer rows.Close() -	slice := []T{} -  	for rows.Next() {  		value, err := fn(rows)  		if err != nil { @@ -438,6 +436,11 @@ func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {  	return slice, nil  } +// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T. +func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) { +	return AppendRows([]T(nil), rows, fn) +} +  // CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.  // CollectOneRow is to CollectRows as QueryRow is to Query.  func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) { diff --git a/vendor/github.com/jackc/pgx/v5/values.go b/vendor/github.com/jackc/pgx/v5/values.go index 19c642fa9..cab717d0a 100644 --- a/vendor/github.com/jackc/pgx/v5/values.go +++ b/vendor/github.com/jackc/pgx/v5/values.go @@ -55,7 +55,11 @@ func encodeCopyValue(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, er  func tryScanStringCopyValueThenEncode(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {  	s, ok := arg.(string)  	if !ok { -		return nil, errors.New("not a string") +		textBuf, err := m.Encode(oid, TextFormatCode, arg, nil) +		if err != nil { +			return nil, errors.New("not a string and cannot be encoded as text") +		} +		s = string(textBuf)  	}  	var v any diff --git a/vendor/modules.txt b/vendor/modules.txt index a257b8249..00ee7a1a5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -324,7 +324,7 @@ github.com/jackc/pgpassfile  # github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a  ## explicit; go 1.14  github.com/jackc/pgservicefile -# github.com/jackc/pgx/v5 v5.5.2 +# github.com/jackc/pgx/v5 v5.5.3  ## explicit; go 1.19  github.com/jackc/pgx/v5  github.com/jackc/pgx/v5/internal/anynil | 
