summaryrefslogtreecommitdiff
path: root/vendor/github.com/uptrace/bun/migrate
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/uptrace/bun/migrate')
-rw-r--r--vendor/github.com/uptrace/bun/migrate/auto.go450
-rw-r--r--vendor/github.com/uptrace/bun/migrate/diff.go416
-rw-r--r--vendor/github.com/uptrace/bun/migrate/migration.go302
-rw-r--r--vendor/github.com/uptrace/bun/migrate/migrations.go167
-rw-r--r--vendor/github.com/uptrace/bun/migrate/migrator.go458
-rw-r--r--vendor/github.com/uptrace/bun/migrate/operations.go340
-rw-r--r--vendor/github.com/uptrace/bun/migrate/sqlschema/column.go75
-rw-r--r--vendor/github.com/uptrace/bun/migrate/sqlschema/database.go127
-rw-r--r--vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go242
-rw-r--r--vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go49
-rw-r--r--vendor/github.com/uptrace/bun/migrate/sqlschema/table.go60
11 files changed, 0 insertions, 2686 deletions
diff --git a/vendor/github.com/uptrace/bun/migrate/auto.go b/vendor/github.com/uptrace/bun/migrate/auto.go
deleted file mode 100644
index 16804cd99..000000000
--- a/vendor/github.com/uptrace/bun/migrate/auto.go
+++ /dev/null
@@ -1,450 +0,0 @@
-package migrate
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "path/filepath"
-
- "github.com/uptrace/bun"
- "github.com/uptrace/bun/internal"
- "github.com/uptrace/bun/migrate/sqlschema"
- "github.com/uptrace/bun/schema"
-)
-
-type AutoMigratorOption func(m *AutoMigrator)
-
-// WithModel adds a bun.Model to the scope of migrations.
-func WithModel(models ...interface{}) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.includeModels = append(m.includeModels, models...)
- }
-}
-
-// WithExcludeTable tells the AutoMigrator to ignore a table in the database.
-// This prevents AutoMigrator from dropping tables which may exist in the schema
-// but which are not used by the application.
-//
-// Do not exclude tables included via WithModel, as BunModelInspector ignores this setting.
-func WithExcludeTable(tables ...string) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.excludeTables = append(m.excludeTables, tables...)
- }
-}
-
-// WithSchemaName changes the default database schema to migrate objects in.
-func WithSchemaName(schemaName string) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.schemaName = schemaName
- }
-}
-
-// WithTableNameAuto overrides default migrations table name.
-func WithTableNameAuto(table string) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.table = table
- m.migratorOpts = append(m.migratorOpts, WithTableName(table))
- }
-}
-
-// WithLocksTableNameAuto overrides default migration locks table name.
-func WithLocksTableNameAuto(table string) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.locksTable = table
- m.migratorOpts = append(m.migratorOpts, WithLocksTableName(table))
- }
-}
-
-// WithMarkAppliedOnSuccessAuto sets the migrator to only mark migrations as applied/unapplied
-// when their up/down is successful.
-func WithMarkAppliedOnSuccessAuto(enabled bool) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.migratorOpts = append(m.migratorOpts, WithMarkAppliedOnSuccess(enabled))
- }
-}
-
-// WithMigrationsDirectoryAuto overrides the default directory for migration files.
-func WithMigrationsDirectoryAuto(directory string) AutoMigratorOption {
- return func(m *AutoMigrator) {
- m.migrationsOpts = append(m.migrationsOpts, WithMigrationsDirectory(directory))
- }
-}
-
-// AutoMigrator performs automated schema migrations.
-//
-// It is designed to be a drop-in replacement for some Migrator functionality and supports all existing
-// configuration options.
-// Similarly to Migrator, it has methods to create SQL migrations, write them to a file, and apply them.
-// Unlike Migrator, it detects the differences between the state defined by bun models and the current
-// database schema automatically.
-//
-// Usage:
-// 1. Generate migrations and apply them au once with AutoMigrator.Migrate().
-// 2. Create up- and down-SQL migration files and apply migrations using Migrator.Migrate().
-//
-// While both methods produce complete, reversible migrations (with entries in the database
-// and SQL migration files), prefer creating migrations and applying them separately for
-// any non-trivial cases to ensure AutoMigrator detects expected changes correctly.
-//
-// Limitations:
-// - AutoMigrator only supports a subset of the possible ALTER TABLE modifications.
-// - Some changes are not automatically reversible. For example, you would need to manually
-// add a CREATE TABLE query to the .down migration file to revert a DROP TABLE migration.
-// - Does not validate most dialect-specific constraints. For example, when changing column
-// data type, make sure the data con be auto-casted to the new type.
-// - Due to how the schema-state diff is calculated, it is not possible to rename a table and
-// modify any of its columns' _data type_ in a single run. This will cause the AutoMigrator
-// to drop and re-create the table under a different name; it is better to apply this change in 2 steps.
-// Renaming a table and renaming its columns at the same time is possible.
-// - Renaming table/column to an existing name, i.e. like this [A->B] [B->C], is not possible due to how
-// AutoMigrator distinguishes "rename" and "unchanged" columns.
-//
-// Dialect must implement both sqlschema.Inspector and sqlschema.Migrator to be used with AutoMigrator.
-type AutoMigrator struct {
- db *bun.DB
-
- // dbInspector creates the current state for the target database.
- dbInspector sqlschema.Inspector
-
- // modelInspector creates the desired state based on the model definitions.
- modelInspector sqlschema.Inspector
-
- // dbMigrator executes ALTER TABLE queries.
- dbMigrator sqlschema.Migrator
-
- table string // Migrations table (excluded from database inspection)
- locksTable string // Migration locks table (excluded from database inspection)
-
- // schemaName is the database schema considered for migration.
- schemaName string
-
- // includeModels define the migration scope.
- includeModels []interface{}
-
- // excludeTables are excluded from database inspection.
- excludeTables []string
-
- // diffOpts are passed to detector constructor.
- diffOpts []diffOption
-
- // migratorOpts are passed to Migrator constructor.
- migratorOpts []MigratorOption
-
- // migrationsOpts are passed to Migrations constructor.
- migrationsOpts []MigrationsOption
-}
-
-func NewAutoMigrator(db *bun.DB, opts ...AutoMigratorOption) (*AutoMigrator, error) {
- am := &AutoMigrator{
- db: db,
- table: defaultTable,
- locksTable: defaultLocksTable,
- schemaName: db.Dialect().DefaultSchema(),
- }
-
- for _, opt := range opts {
- opt(am)
- }
- am.excludeTables = append(am.excludeTables, am.table, am.locksTable)
-
- dbInspector, err := sqlschema.NewInspector(db, sqlschema.WithSchemaName(am.schemaName), sqlschema.WithExcludeTables(am.excludeTables...))
- if err != nil {
- return nil, err
- }
- am.dbInspector = dbInspector
- am.diffOpts = append(am.diffOpts, withCompareTypeFunc(db.Dialect().(sqlschema.InspectorDialect).CompareType))
-
- dbMigrator, err := sqlschema.NewMigrator(db, am.schemaName)
- if err != nil {
- return nil, err
- }
- am.dbMigrator = dbMigrator
-
- tables := schema.NewTables(db.Dialect())
- tables.Register(am.includeModels...)
- am.modelInspector = sqlschema.NewBunModelInspector(tables, sqlschema.WithSchemaName(am.schemaName))
-
- return am, nil
-}
-
-func (am *AutoMigrator) plan(ctx context.Context) (*changeset, error) {
- var err error
-
- got, err := am.dbInspector.Inspect(ctx)
- if err != nil {
- return nil, err
- }
-
- want, err := am.modelInspector.Inspect(ctx)
- if err != nil {
- return nil, err
- }
-
- changes := diff(got, want, am.diffOpts...)
- if err := changes.ResolveDependencies(); err != nil {
- return nil, fmt.Errorf("plan migrations: %w", err)
- }
- return changes, nil
-}
-
-// Migrate writes required changes to a new migration file and runs the migration.
-// This will create and entry in the migrations table, making it possible to revert
-// the changes with Migrator.Rollback(). MigrationOptions are passed on to Migrator.Migrate().
-func (am *AutoMigrator) Migrate(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
- migrations, _, err := am.createSQLMigrations(ctx, false)
- if err != nil {
- if err == errNothingToMigrate {
- return new(MigrationGroup), nil
- }
- return nil, fmt.Errorf("auto migrate: %w", err)
- }
-
- migrator := NewMigrator(am.db, migrations, am.migratorOpts...)
- if err := migrator.Init(ctx); err != nil {
- return nil, fmt.Errorf("auto migrate: %w", err)
- }
-
- group, err := migrator.Migrate(ctx, opts...)
- if err != nil {
- return nil, fmt.Errorf("auto migrate: %w", err)
- }
- return group, nil
-}
-
-// CreateSQLMigration writes required changes to a new migration file.
-// Use migrate.Migrator to apply the generated migrations.
-func (am *AutoMigrator) CreateSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
- _, files, err := am.createSQLMigrations(ctx, false)
- if err == errNothingToMigrate {
- return files, nil
- }
- return files, err
-}
-
-// CreateTxSQLMigration writes required changes to a new migration file making sure they will be executed
-// in a transaction when applied. Use migrate.Migrator to apply the generated migrations.
-func (am *AutoMigrator) CreateTxSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
- _, files, err := am.createSQLMigrations(ctx, true)
- if err == errNothingToMigrate {
- return files, nil
- }
- return files, err
-}
-
-// errNothingToMigrate is a sentinel error which means the database is already in a desired state.
-// Should not be returned to the user -- return a nil-error instead.
-var errNothingToMigrate = errors.New("nothing to migrate")
-
-func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional bool) (*Migrations, []*MigrationFile, error) {
- changes, err := am.plan(ctx)
- if err != nil {
- return nil, nil, fmt.Errorf("create sql migrations: %w", err)
- }
-
- if changes.Len() == 0 {
- return nil, nil, errNothingToMigrate
- }
-
- name, _ := genMigrationName(am.schemaName + "_auto")
- migrations := NewMigrations(am.migrationsOpts...)
- migrations.Add(Migration{
- Name: name,
- Up: changes.Up(am.dbMigrator),
- Down: changes.Down(am.dbMigrator),
- Comment: "Changes detected by bun.AutoMigrator",
- })
-
- // Append .tx.up.sql or .up.sql to migration name, dependin if it should be transactional.
- fname := func(direction string) string {
- return name + map[bool]string{true: ".tx.", false: "."}[transactional] + direction + ".sql"
- }
-
- up, err := am.createSQL(ctx, migrations, fname("up"), changes, transactional)
- if err != nil {
- return nil, nil, fmt.Errorf("create sql migration up: %w", err)
- }
-
- down, err := am.createSQL(ctx, migrations, fname("down"), changes.GetReverse(), transactional)
- if err != nil {
- return nil, nil, fmt.Errorf("create sql migration down: %w", err)
- }
- return migrations, []*MigrationFile{up, down}, nil
-}
-
-func (am *AutoMigrator) createSQL(_ context.Context, migrations *Migrations, fname string, changes *changeset, transactional bool) (*MigrationFile, error) {
- var buf bytes.Buffer
-
- if transactional {
- buf.WriteString("SET statement_timeout = 0;")
- }
-
- if err := changes.WriteTo(&buf, am.dbMigrator); err != nil {
- return nil, err
- }
- content := buf.Bytes()
-
- fpath := filepath.Join(migrations.getDirectory(), fname)
- if err := os.WriteFile(fpath, content, 0o644); err != nil {
- return nil, err
- }
-
- mf := &MigrationFile{
- Name: fname,
- Path: fpath,
- Content: string(content),
- }
- return mf, nil
-}
-
-func (c *changeset) Len() int {
- return len(c.operations)
-}
-
-// Func creates a MigrationFunc that applies all operations all the changeset.
-func (c *changeset) Func(m sqlschema.Migrator) MigrationFunc {
- return func(ctx context.Context, db *bun.DB) error {
- return c.apply(ctx, db, m)
- }
-}
-
-// GetReverse returns a new changeset with each operation in it "reversed" and in reverse order.
-func (c *changeset) GetReverse() *changeset {
- var reverse changeset
- for i := len(c.operations) - 1; i >= 0; i-- {
- reverse.Add(c.operations[i].GetReverse())
- }
- return &reverse
-}
-
-// Up is syntactic sugar.
-func (c *changeset) Up(m sqlschema.Migrator) MigrationFunc {
- return c.Func(m)
-}
-
-// Down is syntactic sugar.
-func (c *changeset) Down(m sqlschema.Migrator) MigrationFunc {
- return c.GetReverse().Func(m)
-}
-
-// apply generates SQL for each operation and executes it.
-func (c *changeset) apply(ctx context.Context, db *bun.DB, m sqlschema.Migrator) error {
- if len(c.operations) == 0 {
- return nil
- }
-
- for _, op := range c.operations {
- if _, isComment := op.(*comment); isComment {
- continue
- }
-
- b := internal.MakeQueryBytes()
- b, err := m.AppendSQL(b, op)
- if err != nil {
- return fmt.Errorf("apply changes: %w", err)
- }
-
- query := internal.String(b)
- if _, err = db.ExecContext(ctx, query); err != nil {
- return fmt.Errorf("apply changes: %w", err)
- }
- }
- return nil
-}
-
-func (c *changeset) WriteTo(w io.Writer, m sqlschema.Migrator) error {
- var err error
-
- b := internal.MakeQueryBytes()
- for _, op := range c.operations {
- if c, isComment := op.(*comment); isComment {
- b = append(b, "/*\n"...)
- b = append(b, *c...)
- b = append(b, "\n*/"...)
- continue
- }
-
- b, err = m.AppendSQL(b, op)
- if err != nil {
- return fmt.Errorf("write changeset: %w", err)
- }
- b = append(b, ";\n"...)
- }
- if _, err := w.Write(b); err != nil {
- return fmt.Errorf("write changeset: %w", err)
- }
- return nil
-}
-
-func (c *changeset) ResolveDependencies() error {
- if len(c.operations) <= 1 {
- return nil
- }
-
- const (
- unvisited = iota
- current
- visited
- )
-
- status := make(map[Operation]int, len(c.operations))
- for _, op := range c.operations {
- status[op] = unvisited
- }
-
- var resolved []Operation
- var nextOp Operation
- var visit func(op Operation) error
-
- next := func() bool {
- for op, s := range status {
- if s == unvisited {
- nextOp = op
- return true
- }
- }
- return false
- }
-
- // visit iterates over c.operations until it finds all operations that depend on the current one
- // or runs into cirtular dependency, in which case it will return an error.
- visit = func(op Operation) error {
- switch status[op] {
- case visited:
- return nil
- case current:
- // TODO: add details (circle) to the error message
- return errors.New("detected circular dependency")
- }
-
- status[op] = current
-
- for _, another := range c.operations {
- if dop, hasDeps := another.(interface {
- DependsOn(Operation) bool
- }); another == op || !hasDeps || !dop.DependsOn(op) {
- continue
- }
- if err := visit(another); err != nil {
- return err
- }
- }
-
- status[op] = visited
-
- // Any dependent nodes would've already been added to the list by now, so we prepend.
- resolved = append([]Operation{op}, resolved...)
- return nil
- }
-
- for next() {
- if err := visit(nextOp); err != nil {
- return err
- }
- }
-
- c.operations = resolved
- return nil
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/diff.go b/vendor/github.com/uptrace/bun/migrate/diff.go
deleted file mode 100644
index e05d54b7d..000000000
--- a/vendor/github.com/uptrace/bun/migrate/diff.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package migrate
-
-import (
- "github.com/uptrace/bun/migrate/sqlschema"
-)
-
-// changeset is a set of changes to the database schema definition.
-type changeset struct {
- operations []Operation
-}
-
-// Add new operations to the changeset.
-func (c *changeset) Add(op ...Operation) {
- c.operations = append(c.operations, op...)
-}
-
-// diff calculates the diff between the current database schema and the target state.
-// The changeset is not sorted -- the caller should resolve dependencies before applying the changes.
-func diff(got, want sqlschema.Database, opts ...diffOption) *changeset {
- d := newDetector(got, want, opts...)
- return d.detectChanges()
-}
-
-func (d *detector) detectChanges() *changeset {
- currentTables := d.current.GetTables()
- targetTables := d.target.GetTables()
-
-RenameCreate:
- for _, wantPair := range targetTables.Pairs() {
- wantName, wantTable := wantPair.Key, wantPair.Value
- // A table with this name exists in the database. We assume that schema objects won't
- // be renamed to an already existing name, nor do we support such cases.
- // Simply check if the table definition has changed.
- if haveTable, ok := currentTables.Load(wantName); ok {
- d.detectColumnChanges(haveTable, wantTable, true)
- d.detectConstraintChanges(haveTable, wantTable)
- continue
- }
-
- // Find all renamed tables. We assume that renamed tables have the same signature.
- for _, havePair := range currentTables.Pairs() {
- haveName, haveTable := havePair.Key, havePair.Value
- if _, exists := targetTables.Load(haveName); !exists && d.canRename(haveTable, wantTable) {
- d.changes.Add(&RenameTableOp{
- TableName: haveTable.GetName(),
- NewName: wantName,
- })
- d.refMap.RenameTable(haveTable.GetName(), wantName)
-
- // Find renamed columns, if any, and check if constraints (PK, UNIQUE) have been updated.
- // We need not check wantTable any further.
- d.detectColumnChanges(haveTable, wantTable, false)
- d.detectConstraintChanges(haveTable, wantTable)
- currentTables.Delete(haveName)
- continue RenameCreate
- }
- }
-
- // If wantTable does not exist in the database and was not renamed
- // then we need to create this table in the database.
- additional := wantTable.(*sqlschema.BunTable)
- d.changes.Add(&CreateTableOp{
- TableName: wantTable.GetName(),
- Model: additional.Model,
- })
- }
-
- // Drop any remaining "current" tables which do not have a model.
- for _, tPair := range currentTables.Pairs() {
- name, table := tPair.Key, tPair.Value
- if _, keep := targetTables.Load(name); !keep {
- d.changes.Add(&DropTableOp{
- TableName: table.GetName(),
- })
- }
- }
-
- targetFKs := d.target.GetForeignKeys()
- currentFKs := d.refMap.Deref()
-
- for fk := range targetFKs {
- if _, ok := currentFKs[fk]; !ok {
- d.changes.Add(&AddForeignKeyOp{
- ForeignKey: fk,
- ConstraintName: "", // leave empty to let each dialect apply their convention
- })
- }
- }
-
- for fk, name := range currentFKs {
- if _, ok := targetFKs[fk]; !ok {
- d.changes.Add(&DropForeignKeyOp{
- ConstraintName: name,
- ForeignKey: fk,
- })
- }
- }
-
- return &d.changes
-}
-
-// detechColumnChanges finds renamed columns and, if checkType == true, columns with changed type.
-func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkType bool) {
- currentColumns := current.GetColumns()
- targetColumns := target.GetColumns()
-
-ChangeRename:
- for _, tPair := range targetColumns.Pairs() {
- tName, tCol := tPair.Key, tPair.Value
-
- // This column exists in the database, so it hasn't been renamed, dropped, or added.
- // Still, we should not delete(columns, thisColumn), because later we will need to
- // check that we do not try to rename a column to an already a name that already exists.
- if cCol, ok := currentColumns.Load(tName); ok {
- if checkType && !d.equalColumns(cCol, tCol) {
- d.changes.Add(&ChangeColumnTypeOp{
- TableName: target.GetName(),
- Column: tName,
- From: cCol,
- To: d.makeTargetColDef(cCol, tCol),
- })
- }
- continue
- }
-
- // Column tName does not exist in the database -- it's been either renamed or added.
- // Find renamed columns first.
- for _, cPair := range currentColumns.Pairs() {
- cName, cCol := cPair.Key, cPair.Value
- // Cannot rename if a column with this name already exists or the types differ.
- if _, exists := targetColumns.Load(cName); exists || !d.equalColumns(tCol, cCol) {
- continue
- }
- d.changes.Add(&RenameColumnOp{
- TableName: target.GetName(),
- OldName: cName,
- NewName: tName,
- })
- d.refMap.RenameColumn(target.GetName(), cName, tName)
- currentColumns.Delete(cName) // no need to check this column again
-
- // Update primary key definition to avoid superficially recreating the constraint.
- current.GetPrimaryKey().Columns.Replace(cName, tName)
-
- continue ChangeRename
- }
-
- d.changes.Add(&AddColumnOp{
- TableName: target.GetName(),
- ColumnName: tName,
- Column: tCol,
- })
- }
-
- // Drop columns which do not exist in the target schema and were not renamed.
- for _, cPair := range currentColumns.Pairs() {
- cName, cCol := cPair.Key, cPair.Value
- if _, keep := targetColumns.Load(cName); !keep {
- d.changes.Add(&DropColumnOp{
- TableName: target.GetName(),
- ColumnName: cName,
- Column: cCol,
- })
- }
- }
-}
-
-func (d *detector) detectConstraintChanges(current, target sqlschema.Table) {
-Add:
- for _, want := range target.GetUniqueConstraints() {
- for _, got := range current.GetUniqueConstraints() {
- if got.Equals(want) {
- continue Add
- }
- }
- d.changes.Add(&AddUniqueConstraintOp{
- TableName: target.GetName(),
- Unique: want,
- })
- }
-
-Drop:
- for _, got := range current.GetUniqueConstraints() {
- for _, want := range target.GetUniqueConstraints() {
- if got.Equals(want) {
- continue Drop
- }
- }
-
- d.changes.Add(&DropUniqueConstraintOp{
- TableName: target.GetName(),
- Unique: got,
- })
- }
-
- targetPK := target.GetPrimaryKey()
- currentPK := current.GetPrimaryKey()
-
- // Detect primary key changes
- if targetPK == nil && currentPK == nil {
- return
- }
- switch {
- case targetPK == nil && currentPK != nil:
- d.changes.Add(&DropPrimaryKeyOp{
- TableName: target.GetName(),
- PrimaryKey: *currentPK,
- })
- case currentPK == nil && targetPK != nil:
- d.changes.Add(&AddPrimaryKeyOp{
- TableName: target.GetName(),
- PrimaryKey: *targetPK,
- })
- case targetPK.Columns != currentPK.Columns:
- d.changes.Add(&ChangePrimaryKeyOp{
- TableName: target.GetName(),
- Old: *currentPK,
- New: *targetPK,
- })
- }
-}
-
-func newDetector(got, want sqlschema.Database, opts ...diffOption) *detector {
- cfg := &detectorConfig{
- cmpType: func(c1, c2 sqlschema.Column) bool {
- return c1.GetSQLType() == c2.GetSQLType() && c1.GetVarcharLen() == c2.GetVarcharLen()
- },
- }
- for _, opt := range opts {
- opt(cfg)
- }
-
- return &detector{
- current: got,
- target: want,
- refMap: newRefMap(got.GetForeignKeys()),
- cmpType: cfg.cmpType,
- }
-}
-
-type diffOption func(*detectorConfig)
-
-func withCompareTypeFunc(f CompareTypeFunc) diffOption {
- return func(cfg *detectorConfig) {
- cfg.cmpType = f
- }
-}
-
-// detectorConfig controls how differences in the model states are resolved.
-type detectorConfig struct {
- cmpType CompareTypeFunc
-}
-
-// detector may modify the passed database schemas, so it isn't safe to re-use them.
-type detector struct {
- // current state represents the existing database schema.
- current sqlschema.Database
-
- // target state represents the database schema defined in bun models.
- target sqlschema.Database
-
- changes changeset
- refMap refMap
-
- // cmpType determines column type equivalence.
- // Default is direct comparison with '==' operator, which is inaccurate
- // due to the existence of dialect-specific type aliases. The caller
- // should pass a concrete InspectorDialect.EquuivalentType for robust comparison.
- cmpType CompareTypeFunc
-}
-
-// canRename checks if t1 can be renamed to t2.
-func (d detector) canRename(t1, t2 sqlschema.Table) bool {
- return t1.GetSchema() == t2.GetSchema() && equalSignatures(t1, t2, d.equalColumns)
-}
-
-func (d detector) equalColumns(col1, col2 sqlschema.Column) bool {
- return d.cmpType(col1, col2) &&
- col1.GetDefaultValue() == col2.GetDefaultValue() &&
- col1.GetIsNullable() == col2.GetIsNullable() &&
- col1.GetIsAutoIncrement() == col2.GetIsAutoIncrement() &&
- col1.GetIsIdentity() == col2.GetIsIdentity()
-}
-
-func (d detector) makeTargetColDef(current, target sqlschema.Column) sqlschema.Column {
- // Avoid unneccessary type-change migrations if the types are equivalent.
- if d.cmpType(current, target) {
- target = &sqlschema.BaseColumn{
- Name: target.GetName(),
- DefaultValue: target.GetDefaultValue(),
- IsNullable: target.GetIsNullable(),
- IsAutoIncrement: target.GetIsAutoIncrement(),
- IsIdentity: target.GetIsIdentity(),
-
- SQLType: current.GetSQLType(),
- VarcharLen: current.GetVarcharLen(),
- }
- }
- return target
-}
-
-type CompareTypeFunc func(sqlschema.Column, sqlschema.Column) bool
-
-// equalSignatures determines if two tables have the same "signature".
-func equalSignatures(t1, t2 sqlschema.Table, eq CompareTypeFunc) bool {
- sig1 := newSignature(t1, eq)
- sig2 := newSignature(t2, eq)
- return sig1.Equals(sig2)
-}
-
-// signature is a set of column definitions, which allows "relation/name-agnostic" comparison between them;
-// meaning that two columns are considered equal if their types are the same.
-type signature struct {
-
- // underlying stores the number of occurences for each unique column type.
- // It helps to account for the fact that a table might have multiple columns that have the same type.
- underlying map[sqlschema.BaseColumn]int
-
- eq CompareTypeFunc
-}
-
-func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature {
- s := signature{
- underlying: make(map[sqlschema.BaseColumn]int),
- eq: eq,
- }
- s.scan(t)
- return s
-}
-
-// scan iterates over table's field and counts occurrences of each unique column definition.
-func (s *signature) scan(t sqlschema.Table) {
- for _, icol := range t.GetColumns().Values() {
- scanCol := icol.(*sqlschema.BaseColumn)
- // This is slightly more expensive than if the columns could be compared directly
- // and we always did s.underlying[col]++, but we get type-equivalence in return.
- col, count := s.getCount(*scanCol)
- if count == 0 {
- s.underlying[*scanCol] = 1
- } else {
- s.underlying[col]++
- }
- }
-}
-
-// getCount uses CompareTypeFunc to find a column with the same (equivalent) SQL type
-// and returns its count. Count 0 means there are no columns with of this type.
-func (s *signature) getCount(keyCol sqlschema.BaseColumn) (key sqlschema.BaseColumn, count int) {
- for col, cnt := range s.underlying {
- if s.eq(&col, &keyCol) {
- return col, cnt
- }
- }
- return keyCol, 0
-}
-
-// Equals returns true if 2 signatures share an identical set of columns.
-func (s *signature) Equals(other signature) bool {
- if len(s.underlying) != len(other.underlying) {
- return false
- }
- for col, count := range s.underlying {
- if _, countOther := other.getCount(col); countOther != count {
- return false
- }
- }
- return true
-}
-
-// refMap is a utility for tracking superficial changes in foreign keys,
-// which do not require any modificiation in the database.
-// Modern SQL dialects automatically updated foreign key constraints whenever
-// a column or a table is renamed. Detector can use refMap to ignore any
-// differences in foreign keys which were caused by renamed column/table.
-type refMap map[*sqlschema.ForeignKey]string
-
-func newRefMap(fks map[sqlschema.ForeignKey]string) refMap {
- rm := make(map[*sqlschema.ForeignKey]string)
- for fk, name := range fks {
- rm[&fk] = name
- }
- return rm
-}
-
-// RenameT updates table name in all foreign key definions which depend on it.
-func (rm refMap) RenameTable(tableName string, newName string) {
- for fk := range rm {
- switch tableName {
- case fk.From.TableName:
- fk.From.TableName = newName
- case fk.To.TableName:
- fk.To.TableName = newName
- }
- }
-}
-
-// RenameColumn updates column name in all foreign key definions which depend on it.
-func (rm refMap) RenameColumn(tableName string, column, newName string) {
- for fk := range rm {
- if tableName == fk.From.TableName {
- fk.From.Column.Replace(column, newName)
- }
- if tableName == fk.To.TableName {
- fk.To.Column.Replace(column, newName)
- }
- }
-}
-
-// Deref returns copies of ForeignKey values to a map.
-func (rm refMap) Deref() map[sqlschema.ForeignKey]string {
- out := make(map[sqlschema.ForeignKey]string)
- for fk, name := range rm {
- out[*fk] = name
- }
- return out
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/migration.go b/vendor/github.com/uptrace/bun/migrate/migration.go
deleted file mode 100644
index 3f4076d2b..000000000
--- a/vendor/github.com/uptrace/bun/migrate/migration.go
+++ /dev/null
@@ -1,302 +0,0 @@
-package migrate
-
-import (
- "bufio"
- "bytes"
- "context"
- "fmt"
- "io"
- "io/fs"
- "sort"
- "strings"
- "time"
-
- "github.com/uptrace/bun"
-)
-
-type Migration struct {
- bun.BaseModel
-
- ID int64 `bun:",pk,autoincrement"`
- Name string
- Comment string `bun:"-"`
- GroupID int64
- MigratedAt time.Time `bun:",notnull,nullzero,default:current_timestamp"`
-
- Up MigrationFunc `bun:"-"`
- Down MigrationFunc `bun:"-"`
-}
-
-func (m Migration) String() string {
- return fmt.Sprintf("%s_%s", m.Name, m.Comment)
-}
-
-func (m Migration) IsApplied() bool {
- return m.ID > 0
-}
-
-type MigrationFunc func(ctx context.Context, db *bun.DB) error
-
-func NewSQLMigrationFunc(fsys fs.FS, name string) MigrationFunc {
- return func(ctx context.Context, db *bun.DB) error {
- f, err := fsys.Open(name)
- if err != nil {
- return err
- }
-
- isTx := strings.HasSuffix(name, ".tx.up.sql") || strings.HasSuffix(name, ".tx.down.sql")
- return Exec(ctx, db, f, isTx)
- }
-}
-
-// Exec reads and executes the SQL migration in the f.
-func Exec(ctx context.Context, db *bun.DB, f io.Reader, isTx bool) error {
- scanner := bufio.NewScanner(f)
- var queries []string
-
- var query []byte
- for scanner.Scan() {
- b := scanner.Bytes()
-
- const prefix = "--bun:"
- if bytes.HasPrefix(b, []byte(prefix)) {
- b = b[len(prefix):]
- if bytes.Equal(b, []byte("split")) {
- queries = append(queries, string(query))
- query = query[:0]
- continue
- }
- return fmt.Errorf("bun: unknown directive: %q", b)
- }
-
- query = append(query, b...)
- query = append(query, '\n')
- }
-
- if len(query) > 0 {
- queries = append(queries, string(query))
- }
- if err := scanner.Err(); err != nil {
- return err
- }
-
- var idb bun.IConn
-
- if isTx {
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return err
- }
- idb = tx
- } else {
- conn, err := db.Conn(ctx)
- if err != nil {
- return err
- }
- idb = conn
- }
-
- var retErr error
- var execErr error
-
- defer func() {
- if tx, ok := idb.(bun.Tx); ok {
- if execErr != nil {
- retErr = tx.Rollback()
- } else {
- retErr = tx.Commit()
- }
- return
- }
-
- if conn, ok := idb.(bun.Conn); ok {
- retErr = conn.Close()
- return
- }
-
- panic("not reached")
- }()
-
- for _, q := range queries {
- _, execErr = idb.ExecContext(ctx, q)
- if execErr != nil {
- return execErr
- }
- }
-
- return retErr
-}
-
-const goTemplate = `package %s
-
-import (
- "context"
- "fmt"
-
- "github.com/uptrace/bun"
-)
-
-func init() {
- Migrations.MustRegister(func(ctx context.Context, db *bun.DB) error {
- fmt.Print(" [up migration] ")
- return nil
- }, func(ctx context.Context, db *bun.DB) error {
- fmt.Print(" [down migration] ")
- return nil
- })
-}
-`
-
-const sqlTemplate = `SET statement_timeout = 0;
-
---bun:split
-
-SELECT 1
-
---bun:split
-
-SELECT 2
-`
-
-const transactionalSQLTemplate = `SET statement_timeout = 0;
-
-SELECT 1;
-`
-
-//------------------------------------------------------------------------------
-
-type MigrationSlice []Migration
-
-func (ms MigrationSlice) String() string {
- if len(ms) == 0 {
- return "empty"
- }
-
- if len(ms) > 5 {
- return fmt.Sprintf("%d migrations (%s ... %s)", len(ms), ms[0].Name, ms[len(ms)-1].Name)
- }
-
- var sb strings.Builder
-
- for i := range ms {
- if i > 0 {
- sb.WriteString(", ")
- }
- sb.WriteString(ms[i].String())
- }
-
- return sb.String()
-}
-
-// Applied returns applied migrations in descending order
-// (the order is important and is used in Rollback).
-func (ms MigrationSlice) Applied() MigrationSlice {
- var applied MigrationSlice
- for i := range ms {
- if ms[i].IsApplied() {
- applied = append(applied, ms[i])
- }
- }
- sortDesc(applied)
- return applied
-}
-
-// Unapplied returns unapplied migrations in ascending order
-// (the order is important and is used in Migrate).
-func (ms MigrationSlice) Unapplied() MigrationSlice {
- var unapplied MigrationSlice
- for i := range ms {
- if !ms[i].IsApplied() {
- unapplied = append(unapplied, ms[i])
- }
- }
- sortAsc(unapplied)
- return unapplied
-}
-
-// LastGroupID returns the last applied migration group id.
-// The id is 0 when there are no migration groups.
-func (ms MigrationSlice) LastGroupID() int64 {
- var lastGroupID int64
- for i := range ms {
- groupID := ms[i].GroupID
- if groupID > lastGroupID {
- lastGroupID = groupID
- }
- }
- return lastGroupID
-}
-
-// LastGroup returns the last applied migration group.
-func (ms MigrationSlice) LastGroup() *MigrationGroup {
- group := &MigrationGroup{
- ID: ms.LastGroupID(),
- }
- if group.ID == 0 {
- return group
- }
- for i := range ms {
- if ms[i].GroupID == group.ID {
- group.Migrations = append(group.Migrations, ms[i])
- }
- }
- return group
-}
-
-type MigrationGroup struct {
- ID int64
- Migrations MigrationSlice
-}
-
-func (g MigrationGroup) IsZero() bool {
- return g.ID == 0 && len(g.Migrations) == 0
-}
-
-func (g MigrationGroup) String() string {
- if g.IsZero() {
- return "nil"
- }
- return fmt.Sprintf("group #%d (%s)", g.ID, g.Migrations)
-}
-
-type MigrationFile struct {
- Name string
- Path string
- Content string
-}
-
-//------------------------------------------------------------------------------
-
-type migrationConfig struct {
- nop bool
-}
-
-func newMigrationConfig(opts []MigrationOption) *migrationConfig {
- cfg := new(migrationConfig)
- for _, opt := range opts {
- opt(cfg)
- }
- return cfg
-}
-
-type MigrationOption func(cfg *migrationConfig)
-
-func WithNopMigration() MigrationOption {
- return func(cfg *migrationConfig) {
- cfg.nop = true
- }
-}
-
-//------------------------------------------------------------------------------
-
-func sortAsc(ms MigrationSlice) {
- sort.Slice(ms, func(i, j int) bool {
- return ms[i].Name < ms[j].Name
- })
-}
-
-func sortDesc(ms MigrationSlice) {
- sort.Slice(ms, func(i, j int) bool {
- return ms[i].Name > ms[j].Name
- })
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/migrations.go b/vendor/github.com/uptrace/bun/migrate/migrations.go
deleted file mode 100644
index 1a7ea5668..000000000
--- a/vendor/github.com/uptrace/bun/migrate/migrations.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package migrate
-
-import (
- "errors"
- "fmt"
- "io/fs"
- "os"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
-)
-
-type MigrationsOption func(m *Migrations)
-
-func WithMigrationsDirectory(directory string) MigrationsOption {
- return func(m *Migrations) {
- m.explicitDirectory = directory
- }
-}
-
-type Migrations struct {
- ms MigrationSlice
-
- explicitDirectory string
- implicitDirectory string
-}
-
-func NewMigrations(opts ...MigrationsOption) *Migrations {
- m := new(Migrations)
- for _, opt := range opts {
- opt(m)
- }
- m.implicitDirectory = filepath.Dir(migrationFile())
- return m
-}
-
-func (m *Migrations) Sorted() MigrationSlice {
- migrations := make(MigrationSlice, len(m.ms))
- copy(migrations, m.ms)
- sortAsc(migrations)
- return migrations
-}
-
-func (m *Migrations) MustRegister(up, down MigrationFunc) {
- if err := m.Register(up, down); err != nil {
- panic(err)
- }
-}
-
-func (m *Migrations) Register(up, down MigrationFunc) error {
- fpath := migrationFile()
- name, comment, err := extractMigrationName(fpath)
- if err != nil {
- return err
- }
-
- m.Add(Migration{
- Name: name,
- Comment: comment,
- Up: up,
- Down: down,
- })
-
- return nil
-}
-
-func (m *Migrations) Add(migration Migration) {
- if migration.Name == "" {
- panic("migration name is required")
- }
- m.ms = append(m.ms, migration)
-}
-
-func (m *Migrations) DiscoverCaller() error {
- dir := filepath.Dir(migrationFile())
- return m.Discover(os.DirFS(dir))
-}
-
-func (m *Migrations) Discover(fsys fs.FS) error {
- return fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
- if err != nil {
- return err
- }
- if d.IsDir() {
- return nil
- }
-
- if !strings.HasSuffix(path, ".up.sql") && !strings.HasSuffix(path, ".down.sql") {
- return nil
- }
-
- name, comment, err := extractMigrationName(path)
- if err != nil {
- return err
- }
-
- migration := m.getOrCreateMigration(name)
- migration.Comment = comment
- migrationFunc := NewSQLMigrationFunc(fsys, path)
-
- if strings.HasSuffix(path, ".up.sql") {
- migration.Up = migrationFunc
- return nil
- }
- if strings.HasSuffix(path, ".down.sql") {
- migration.Down = migrationFunc
- return nil
- }
-
- return errors.New("migrate: not reached")
- })
-}
-
-func (m *Migrations) getOrCreateMigration(name string) *Migration {
- for i := range m.ms {
- m := &m.ms[i]
- if m.Name == name {
- return m
- }
- }
-
- m.ms = append(m.ms, Migration{Name: name})
- return &m.ms[len(m.ms)-1]
-}
-
-func (m *Migrations) getDirectory() string {
- if m.explicitDirectory != "" {
- return m.explicitDirectory
- }
- if m.implicitDirectory != "" {
- return m.implicitDirectory
- }
- return filepath.Dir(migrationFile())
-}
-
-func migrationFile() string {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(1, pcs[:])
- frames := runtime.CallersFrames(pcs[:n])
-
- for {
- f, ok := frames.Next()
- if !ok {
- break
- }
- if !strings.Contains(f.Function, "/bun/migrate.") {
- return f.File
- }
- }
-
- return ""
-}
-
-var fnameRE = regexp.MustCompile(`^(\d{1,14})_([0-9a-z_\-]+)\.`)
-
-func extractMigrationName(fpath string) (string, string, error) {
- fname := filepath.Base(fpath)
-
- matches := fnameRE.FindStringSubmatch(fname)
- if matches == nil {
- return "", "", fmt.Errorf("migrate: unsupported migration name format: %q", fname)
- }
-
- return matches[1], matches[2], nil
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/migrator.go b/vendor/github.com/uptrace/bun/migrate/migrator.go
deleted file mode 100644
index d5a72aec0..000000000
--- a/vendor/github.com/uptrace/bun/migrate/migrator.go
+++ /dev/null
@@ -1,458 +0,0 @@
-package migrate
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "regexp"
- "time"
-
- "github.com/uptrace/bun"
-)
-
-const (
- defaultTable = "bun_migrations"
- defaultLocksTable = "bun_migration_locks"
-)
-
-type MigratorOption func(m *Migrator)
-
-// WithTableName overrides default migrations table name.
-func WithTableName(table string) MigratorOption {
- return func(m *Migrator) {
- m.table = table
- }
-}
-
-// WithLocksTableName overrides default migration locks table name.
-func WithLocksTableName(table string) MigratorOption {
- return func(m *Migrator) {
- m.locksTable = table
- }
-}
-
-// WithMarkAppliedOnSuccess sets the migrator to only mark migrations as applied/unapplied
-// when their up/down is successful.
-func WithMarkAppliedOnSuccess(enabled bool) MigratorOption {
- return func(m *Migrator) {
- m.markAppliedOnSuccess = enabled
- }
-}
-
-type Migrator struct {
- db *bun.DB
- migrations *Migrations
-
- ms MigrationSlice
-
- table string
- locksTable string
- markAppliedOnSuccess bool
-}
-
-func NewMigrator(db *bun.DB, migrations *Migrations, opts ...MigratorOption) *Migrator {
- m := &Migrator{
- db: db,
- migrations: migrations,
-
- ms: migrations.ms,
-
- table: defaultTable,
- locksTable: defaultLocksTable,
- }
- for _, opt := range opts {
- opt(m)
- }
- return m
-}
-
-func (m *Migrator) DB() *bun.DB {
- return m.db
-}
-
-// MigrationsWithStatus returns migrations with status in ascending order.
-func (m *Migrator) MigrationsWithStatus(ctx context.Context) (MigrationSlice, error) {
- sorted, _, err := m.migrationsWithStatus(ctx)
- return sorted, err
-}
-
-func (m *Migrator) migrationsWithStatus(ctx context.Context) (MigrationSlice, int64, error) {
- sorted := m.migrations.Sorted()
-
- applied, err := m.AppliedMigrations(ctx)
- if err != nil {
- return nil, 0, err
- }
-
- appliedMap := migrationMap(applied)
- for i := range sorted {
- m1 := &sorted[i]
- if m2, ok := appliedMap[m1.Name]; ok {
- m1.ID = m2.ID
- m1.GroupID = m2.GroupID
- m1.MigratedAt = m2.MigratedAt
- }
- }
-
- return sorted, applied.LastGroupID(), nil
-}
-
-func (m *Migrator) Init(ctx context.Context) error {
- if _, err := m.db.NewCreateTable().
- Model((*Migration)(nil)).
- ModelTableExpr(m.table).
- IfNotExists().
- Exec(ctx); err != nil {
- return err
- }
- if _, err := m.db.NewCreateTable().
- Model((*migrationLock)(nil)).
- ModelTableExpr(m.locksTable).
- IfNotExists().
- Exec(ctx); err != nil {
- return err
- }
- return nil
-}
-
-func (m *Migrator) Reset(ctx context.Context) error {
- if _, err := m.db.NewDropTable().
- Model((*Migration)(nil)).
- ModelTableExpr(m.table).
- IfExists().
- Exec(ctx); err != nil {
- return err
- }
- if _, err := m.db.NewDropTable().
- Model((*migrationLock)(nil)).
- ModelTableExpr(m.locksTable).
- IfExists().
- Exec(ctx); err != nil {
- return err
- }
- return m.Init(ctx)
-}
-
-// Migrate runs unapplied migrations. If a migration fails, migrate immediately exits.
-func (m *Migrator) Migrate(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
- cfg := newMigrationConfig(opts)
-
- if err := m.validate(); err != nil {
- return nil, err
- }
-
- migrations, lastGroupID, err := m.migrationsWithStatus(ctx)
- if err != nil {
- return nil, err
- }
- migrations = migrations.Unapplied()
-
- group := new(MigrationGroup)
- if len(migrations) == 0 {
- return group, nil
- }
- group.ID = lastGroupID + 1
-
- for i := range migrations {
- migration := &migrations[i]
- migration.GroupID = group.ID
-
- if !m.markAppliedOnSuccess {
- if err := m.MarkApplied(ctx, migration); err != nil {
- return group, err
- }
- }
-
- group.Migrations = migrations[:i+1]
-
- if !cfg.nop && migration.Up != nil {
- if err := migration.Up(ctx, m.db); err != nil {
- return group, err
- }
- }
-
- if m.markAppliedOnSuccess {
- if err := m.MarkApplied(ctx, migration); err != nil {
- return group, err
- }
- }
- }
-
- return group, nil
-}
-
-func (m *Migrator) Rollback(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
- cfg := newMigrationConfig(opts)
-
- if err := m.validate(); err != nil {
- return nil, err
- }
-
- migrations, err := m.MigrationsWithStatus(ctx)
- if err != nil {
- return nil, err
- }
-
- lastGroup := migrations.LastGroup()
-
- for i := len(lastGroup.Migrations) - 1; i >= 0; i-- {
- migration := &lastGroup.Migrations[i]
-
- if !m.markAppliedOnSuccess {
- if err := m.MarkUnapplied(ctx, migration); err != nil {
- return lastGroup, err
- }
- }
-
- if !cfg.nop && migration.Down != nil {
- if err := migration.Down(ctx, m.db); err != nil {
- return lastGroup, err
- }
- }
-
- if m.markAppliedOnSuccess {
- if err := m.MarkUnapplied(ctx, migration); err != nil {
- return lastGroup, err
- }
- }
- }
-
- return lastGroup, nil
-}
-
-type goMigrationConfig struct {
- packageName string
- goTemplate string
-}
-
-type GoMigrationOption func(cfg *goMigrationConfig)
-
-func WithPackageName(name string) GoMigrationOption {
- return func(cfg *goMigrationConfig) {
- cfg.packageName = name
- }
-}
-
-func WithGoTemplate(template string) GoMigrationOption {
- return func(cfg *goMigrationConfig) {
- cfg.goTemplate = template
- }
-}
-
-// CreateGoMigration creates a Go migration file.
-func (m *Migrator) CreateGoMigration(
- ctx context.Context, name string, opts ...GoMigrationOption,
-) (*MigrationFile, error) {
- cfg := &goMigrationConfig{
- packageName: "migrations",
- goTemplate: goTemplate,
- }
- for _, opt := range opts {
- opt(cfg)
- }
-
- name, err := genMigrationName(name)
- if err != nil {
- return nil, err
- }
-
- fname := name + ".go"
- fpath := filepath.Join(m.migrations.getDirectory(), fname)
- content := fmt.Sprintf(cfg.goTemplate, cfg.packageName)
-
- if err := os.WriteFile(fpath, []byte(content), 0o644); err != nil {
- return nil, err
- }
-
- mf := &MigrationFile{
- Name: fname,
- Path: fpath,
- Content: content,
- }
- return mf, nil
-}
-
-// CreateTxSQLMigration creates transactional up and down SQL migration files.
-func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
- name, err := genMigrationName(name)
- if err != nil {
- return nil, err
- }
-
- up, err := m.createSQL(ctx, name+".tx.up.sql", true)
- if err != nil {
- return nil, err
- }
-
- down, err := m.createSQL(ctx, name+".tx.down.sql", true)
- if err != nil {
- return nil, err
- }
-
- return []*MigrationFile{up, down}, nil
-}
-
-// CreateSQLMigrations creates up and down SQL migration files.
-func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
- name, err := genMigrationName(name)
- if err != nil {
- return nil, err
- }
-
- up, err := m.createSQL(ctx, name+".up.sql", false)
- if err != nil {
- return nil, err
- }
-
- down, err := m.createSQL(ctx, name+".down.sql", false)
- if err != nil {
- return nil, err
- }
-
- return []*MigrationFile{up, down}, nil
-}
-
-func (m *Migrator) createSQL(_ context.Context, fname string, transactional bool) (*MigrationFile, error) {
- fpath := filepath.Join(m.migrations.getDirectory(), fname)
-
- template := sqlTemplate
- if transactional {
- template = transactionalSQLTemplate
- }
-
- if err := os.WriteFile(fpath, []byte(template), 0o644); err != nil {
- return nil, err
- }
-
- mf := &MigrationFile{
- Name: fname,
- Path: fpath,
- Content: goTemplate,
- }
- return mf, nil
-}
-
-var nameRE = regexp.MustCompile(`^[0-9a-z_\-]+$`)
-
-func genMigrationName(name string) (string, error) {
- const timeFormat = "20060102150405"
-
- if name == "" {
- return "", errors.New("migrate: migration name can't be empty")
- }
- if !nameRE.MatchString(name) {
- return "", fmt.Errorf("migrate: invalid migration name: %q", name)
- }
-
- version := time.Now().UTC().Format(timeFormat)
- return fmt.Sprintf("%s_%s", version, name), nil
-}
-
-// MarkApplied marks the migration as applied (completed).
-func (m *Migrator) MarkApplied(ctx context.Context, migration *Migration) error {
- _, err := m.db.NewInsert().Model(migration).
- ModelTableExpr(m.table).
- Exec(ctx)
- return err
-}
-
-// MarkUnapplied marks the migration as unapplied (new).
-func (m *Migrator) MarkUnapplied(ctx context.Context, migration *Migration) error {
- _, err := m.db.NewDelete().
- Model(migration).
- ModelTableExpr(m.table).
- Where("id = ?", migration.ID).
- Exec(ctx)
- return err
-}
-
-func (m *Migrator) TruncateTable(ctx context.Context) error {
- _, err := m.db.NewTruncateTable().
- Model((*Migration)(nil)).
- ModelTableExpr(m.table).
- Exec(ctx)
- return err
-}
-
-// MissingMigrations returns applied migrations that can no longer be found.
-func (m *Migrator) MissingMigrations(ctx context.Context) (MigrationSlice, error) {
- applied, err := m.AppliedMigrations(ctx)
- if err != nil {
- return nil, err
- }
-
- existing := migrationMap(m.migrations.ms)
- for i := len(applied) - 1; i >= 0; i-- {
- m := &applied[i]
- if _, ok := existing[m.Name]; ok {
- applied = append(applied[:i], applied[i+1:]...)
- }
- }
-
- return applied, nil
-}
-
-// AppliedMigrations selects applied (applied) migrations in descending order.
-func (m *Migrator) AppliedMigrations(ctx context.Context) (MigrationSlice, error) {
- var ms MigrationSlice
- if err := m.db.NewSelect().
- ColumnExpr("*").
- Model(&ms).
- ModelTableExpr(m.table).
- Scan(ctx); err != nil {
- return nil, err
- }
- return ms, nil
-}
-
-func (m *Migrator) formattedTableName(db *bun.DB) string {
- return db.Formatter().FormatQuery(m.table)
-}
-
-func (m *Migrator) validate() error {
- if len(m.ms) == 0 {
- return errors.New("migrate: there are no migrations")
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type migrationLock struct {
- ID int64 `bun:",pk,autoincrement"`
- TableName string `bun:",unique"`
-}
-
-func (m *Migrator) Lock(ctx context.Context) error {
- lock := &migrationLock{
- TableName: m.formattedTableName(m.db),
- }
- if _, err := m.db.NewInsert().
- Model(lock).
- ModelTableExpr(m.locksTable).
- Exec(ctx); err != nil {
- return fmt.Errorf("migrate: migrations table is already locked (%w)", err)
- }
- return nil
-}
-
-func (m *Migrator) Unlock(ctx context.Context) error {
- tableName := m.formattedTableName(m.db)
- _, err := m.db.NewDelete().
- Model((*migrationLock)(nil)).
- ModelTableExpr(m.locksTable).
- Where("? = ?", bun.Ident("table_name"), tableName).
- Exec(ctx)
- return err
-}
-
-func migrationMap(ms MigrationSlice) map[string]*Migration {
- mp := make(map[string]*Migration)
- for i := range ms {
- m := &ms[i]
- mp[m.Name] = m
- }
- return mp
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/operations.go b/vendor/github.com/uptrace/bun/migrate/operations.go
deleted file mode 100644
index 7b749c5a0..000000000
--- a/vendor/github.com/uptrace/bun/migrate/operations.go
+++ /dev/null
@@ -1,340 +0,0 @@
-package migrate
-
-import (
- "fmt"
-
- "github.com/uptrace/bun/migrate/sqlschema"
-)
-
-// Operation encapsulates the request to change a database definition
-// and knowns which operation can revert it.
-//
-// It is useful to define "monolith" Operations whenever possible,
-// even though they a dialect may require several distinct steps to apply them.
-// For example, changing a primary key involves first dropping the old constraint
-// before generating the new one. Yet, this is only an implementation detail and
-// passing a higher-level ChangePrimaryKeyOp will give the dialect more information
-// about the applied change.
-//
-// Some operations might be irreversible due to technical limitations. Returning
-// a *comment from GetReverse() will add an explanatory note to the generate migation file.
-//
-// To declare dependency on another Operation, operations should implement
-// { DependsOn(Operation) bool } interface, which Changeset will use to resolve dependencies.
-type Operation interface {
- GetReverse() Operation
-}
-
-// CreateTableOp creates a new table in the schema.
-//
-// It does not report dependency on any other migration and may be executed first.
-// Make sure the dialect does not include FOREIGN KEY constraints in the CREATE TABLE
-// statement, as those may potentially reference not-yet-existing columns/tables.
-type CreateTableOp struct {
- TableName string
- Model interface{}
-}
-
-var _ Operation = (*CreateTableOp)(nil)
-
-func (op *CreateTableOp) GetReverse() Operation {
- return &DropTableOp{TableName: op.TableName}
-}
-
-// DropTableOp drops a database table. This operation is not reversible.
-type DropTableOp struct {
- TableName string
-}
-
-var _ Operation = (*DropTableOp)(nil)
-
-func (op *DropTableOp) DependsOn(another Operation) bool {
- drop, ok := another.(*DropForeignKeyOp)
- return ok && drop.ForeignKey.DependsOnTable(op.TableName)
-}
-
-// GetReverse for a DropTable returns a no-op migration. Logically, CreateTable is the reverse,
-// but DropTable does not have the table's definition to create one.
-func (op *DropTableOp) GetReverse() Operation {
- c := comment(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName))
- return &c
-}
-
-// RenameTableOp renames the table. Changing the "schema" part of the table's FQN (moving tables between schemas) is not allowed.
-type RenameTableOp struct {
- TableName string
- NewName string
-}
-
-var _ Operation = (*RenameTableOp)(nil)
-
-func (op *RenameTableOp) GetReverse() Operation {
- return &RenameTableOp{
- TableName: op.NewName,
- NewName: op.TableName,
- }
-}
-
-// RenameColumnOp renames a column in the table. If the changeset includes a rename operation
-// for the column's table, it should be executed first.
-type RenameColumnOp struct {
- TableName string
- OldName string
- NewName string
-}
-
-var _ Operation = (*RenameColumnOp)(nil)
-
-func (op *RenameColumnOp) GetReverse() Operation {
- return &RenameColumnOp{
- TableName: op.TableName,
- OldName: op.NewName,
- NewName: op.OldName,
- }
-}
-
-func (op *RenameColumnOp) DependsOn(another Operation) bool {
- rename, ok := another.(*RenameTableOp)
- return ok && op.TableName == rename.NewName
-}
-
-// AddColumnOp adds a new column to the table.
-type AddColumnOp struct {
- TableName string
- ColumnName string
- Column sqlschema.Column
-}
-
-var _ Operation = (*AddColumnOp)(nil)
-
-func (op *AddColumnOp) GetReverse() Operation {
- return &DropColumnOp{
- TableName: op.TableName,
- ColumnName: op.ColumnName,
- Column: op.Column,
- }
-}
-
-// DropColumnOp drop a column from the table.
-//
-// While some dialects allow DROP CASCADE to drop dependent constraints,
-// explicit handling on constraints is preferred for transparency and debugging.
-// DropColumnOp depends on DropForeignKeyOp, DropPrimaryKeyOp, and ChangePrimaryKeyOp
-// if any of the constraints is defined on this table.
-type DropColumnOp struct {
- TableName string
- ColumnName string
- Column sqlschema.Column
-}
-
-var _ Operation = (*DropColumnOp)(nil)
-
-func (op *DropColumnOp) GetReverse() Operation {
- return &AddColumnOp{
- TableName: op.TableName,
- ColumnName: op.ColumnName,
- Column: op.Column,
- }
-}
-
-func (op *DropColumnOp) DependsOn(another Operation) bool {
- switch drop := another.(type) {
- case *DropForeignKeyOp:
- return drop.ForeignKey.DependsOnColumn(op.TableName, op.ColumnName)
- case *DropPrimaryKeyOp:
- return op.TableName == drop.TableName && drop.PrimaryKey.Columns.Contains(op.ColumnName)
- case *ChangePrimaryKeyOp:
- return op.TableName == drop.TableName && drop.Old.Columns.Contains(op.ColumnName)
- }
- return false
-}
-
-// AddForeignKey adds a new FOREIGN KEY constraint.
-type AddForeignKeyOp struct {
- ForeignKey sqlschema.ForeignKey
- ConstraintName string
-}
-
-var _ Operation = (*AddForeignKeyOp)(nil)
-
-func (op *AddForeignKeyOp) TableName() string {
- return op.ForeignKey.From.TableName
-}
-
-func (op *AddForeignKeyOp) DependsOn(another Operation) bool {
- switch another := another.(type) {
- case *RenameTableOp:
- return op.ForeignKey.DependsOnTable(another.TableName) || op.ForeignKey.DependsOnTable(another.NewName)
- case *CreateTableOp:
- return op.ForeignKey.DependsOnTable(another.TableName)
- }
- return false
-}
-
-func (op *AddForeignKeyOp) GetReverse() Operation {
- return &DropForeignKeyOp{
- ForeignKey: op.ForeignKey,
- ConstraintName: op.ConstraintName,
- }
-}
-
-// DropForeignKeyOp drops a FOREIGN KEY constraint.
-type DropForeignKeyOp struct {
- ForeignKey sqlschema.ForeignKey
- ConstraintName string
-}
-
-var _ Operation = (*DropForeignKeyOp)(nil)
-
-func (op *DropForeignKeyOp) TableName() string {
- return op.ForeignKey.From.TableName
-}
-
-func (op *DropForeignKeyOp) GetReverse() Operation {
- return &AddForeignKeyOp{
- ForeignKey: op.ForeignKey,
- ConstraintName: op.ConstraintName,
- }
-}
-
-// AddUniqueConstraintOp adds new UNIQUE constraint to the table.
-type AddUniqueConstraintOp struct {
- TableName string
- Unique sqlschema.Unique
-}
-
-var _ Operation = (*AddUniqueConstraintOp)(nil)
-
-func (op *AddUniqueConstraintOp) GetReverse() Operation {
- return &DropUniqueConstraintOp{
- TableName: op.TableName,
- Unique: op.Unique,
- }
-}
-
-func (op *AddUniqueConstraintOp) DependsOn(another Operation) bool {
- switch another := another.(type) {
- case *AddColumnOp:
- return op.TableName == another.TableName && op.Unique.Columns.Contains(another.ColumnName)
- case *RenameTableOp:
- return op.TableName == another.NewName
- case *DropUniqueConstraintOp:
- // We want to drop the constraint with the same name before adding this one.
- return op.TableName == another.TableName && op.Unique.Name == another.Unique.Name
- default:
- return false
- }
-
-}
-
-// DropUniqueConstraintOp drops a UNIQUE constraint.
-type DropUniqueConstraintOp struct {
- TableName string
- Unique sqlschema.Unique
-}
-
-var _ Operation = (*DropUniqueConstraintOp)(nil)
-
-func (op *DropUniqueConstraintOp) DependsOn(another Operation) bool {
- if rename, ok := another.(*RenameTableOp); ok {
- return op.TableName == rename.NewName
- }
- return false
-}
-
-func (op *DropUniqueConstraintOp) GetReverse() Operation {
- return &AddUniqueConstraintOp{
- TableName: op.TableName,
- Unique: op.Unique,
- }
-}
-
-// ChangeColumnTypeOp set a new data type for the column.
-// The two types should be such that the data can be auto-casted from one to another.
-// E.g. reducing VARCHAR lenght is not possible in most dialects.
-// AutoMigrator does not enforce or validate these rules.
-type ChangeColumnTypeOp struct {
- TableName string
- Column string
- From sqlschema.Column
- To sqlschema.Column
-}
-
-var _ Operation = (*ChangeColumnTypeOp)(nil)
-
-func (op *ChangeColumnTypeOp) GetReverse() Operation {
- return &ChangeColumnTypeOp{
- TableName: op.TableName,
- Column: op.Column,
- From: op.To,
- To: op.From,
- }
-}
-
-// DropPrimaryKeyOp drops the table's PRIMARY KEY.
-type DropPrimaryKeyOp struct {
- TableName string
- PrimaryKey sqlschema.PrimaryKey
-}
-
-var _ Operation = (*DropPrimaryKeyOp)(nil)
-
-func (op *DropPrimaryKeyOp) GetReverse() Operation {
- return &AddPrimaryKeyOp{
- TableName: op.TableName,
- PrimaryKey: op.PrimaryKey,
- }
-}
-
-// AddPrimaryKeyOp adds a new PRIMARY KEY to the table.
-type AddPrimaryKeyOp struct {
- TableName string
- PrimaryKey sqlschema.PrimaryKey
-}
-
-var _ Operation = (*AddPrimaryKeyOp)(nil)
-
-func (op *AddPrimaryKeyOp) GetReverse() Operation {
- return &DropPrimaryKeyOp{
- TableName: op.TableName,
- PrimaryKey: op.PrimaryKey,
- }
-}
-
-func (op *AddPrimaryKeyOp) DependsOn(another Operation) bool {
- switch another := another.(type) {
- case *AddColumnOp:
- return op.TableName == another.TableName && op.PrimaryKey.Columns.Contains(another.ColumnName)
- }
- return false
-}
-
-// ChangePrimaryKeyOp changes the PRIMARY KEY of the table.
-type ChangePrimaryKeyOp struct {
- TableName string
- Old sqlschema.PrimaryKey
- New sqlschema.PrimaryKey
-}
-
-var _ Operation = (*AddPrimaryKeyOp)(nil)
-
-func (op *ChangePrimaryKeyOp) GetReverse() Operation {
- return &ChangePrimaryKeyOp{
- TableName: op.TableName,
- Old: op.New,
- New: op.Old,
- }
-}
-
-// comment denotes an Operation that cannot be executed.
-//
-// Operations, which cannot be reversed due to current technical limitations,
-// may return &comment with a helpful message from their GetReverse() method.
-//
-// Chnagelog should skip it when applying operations or output as log message,
-// and write it as an SQL comment when creating migration files.
-type comment string
-
-var _ Operation = (*comment)(nil)
-
-func (c *comment) GetReverse() Operation { return c }
diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/column.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
deleted file mode 100644
index 60f7ea8a6..000000000
--- a/vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package sqlschema
-
-import (
- "fmt"
-
- "github.com/uptrace/bun/schema"
-)
-
-type Column interface {
- GetName() string
- GetSQLType() string
- GetVarcharLen() int
- GetDefaultValue() string
- GetIsNullable() bool
- GetIsAutoIncrement() bool
- GetIsIdentity() bool
- AppendQuery(schema.Formatter, []byte) ([]byte, error)
-}
-
-var _ Column = (*BaseColumn)(nil)
-
-// BaseColumn is a base column definition that stores various attributes of a column.
-//
-// Dialects and only dialects can use it to implement the Column interface.
-// Other packages must use the Column interface.
-type BaseColumn struct {
- Name string
- SQLType string
- VarcharLen int
- DefaultValue string
- IsNullable bool
- IsAutoIncrement bool
- IsIdentity bool
- // TODO: add Precision and Cardinality for timestamps/bit-strings/floats and arrays respectively.
-}
-
-func (cd BaseColumn) GetName() string {
- return cd.Name
-}
-
-func (cd BaseColumn) GetSQLType() string {
- return cd.SQLType
-}
-
-func (cd BaseColumn) GetVarcharLen() int {
- return cd.VarcharLen
-}
-
-func (cd BaseColumn) GetDefaultValue() string {
- return cd.DefaultValue
-}
-
-func (cd BaseColumn) GetIsNullable() bool {
- return cd.IsNullable
-}
-
-func (cd BaseColumn) GetIsAutoIncrement() bool {
- return cd.IsAutoIncrement
-}
-
-func (cd BaseColumn) GetIsIdentity() bool {
- return cd.IsIdentity
-}
-
-// AppendQuery appends full SQL data type.
-func (c *BaseColumn) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
- b = append(b, c.SQLType...)
- if c.VarcharLen == 0 {
- return b, nil
- }
- b = append(b, "("...)
- b = append(b, fmt.Sprint(c.VarcharLen)...)
- b = append(b, ")"...)
- return b, nil
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
deleted file mode 100644
index eb7476c54..000000000
--- a/vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package sqlschema
-
-import (
- "slices"
- "strings"
-
- "github.com/uptrace/bun/internal/ordered"
- "github.com/uptrace/bun/schema"
-)
-
-type Database interface {
- GetTables() *ordered.Map[string, Table]
- GetForeignKeys() map[ForeignKey]string
-}
-
-var _ Database = (*BaseDatabase)(nil)
-
-// BaseDatabase is a base database definition.
-//
-// Dialects and only dialects can use it to implement the Database interface.
-// Other packages must use the Database interface.
-type BaseDatabase struct {
- Tables *ordered.Map[string, Table]
- ForeignKeys map[ForeignKey]string
-}
-
-func (ds BaseDatabase) GetTables() *ordered.Map[string, Table] {
- return ds.Tables
-}
-
-func (ds BaseDatabase) GetForeignKeys() map[ForeignKey]string {
- return ds.ForeignKeys
-}
-
-type ForeignKey struct {
- From ColumnReference
- To ColumnReference
-}
-
-func NewColumnReference(tableName string, columns ...string) ColumnReference {
- return ColumnReference{
- TableName: tableName,
- Column: NewColumns(columns...),
- }
-}
-
-func (fk ForeignKey) DependsOnTable(tableName string) bool {
- return fk.From.TableName == tableName || fk.To.TableName == tableName
-}
-
-func (fk ForeignKey) DependsOnColumn(tableName string, column string) bool {
- return fk.DependsOnTable(tableName) &&
- (fk.From.Column.Contains(column) || fk.To.Column.Contains(column))
-}
-
-// Columns is a hashable representation of []string used to define schema constraints that depend on multiple columns.
-// Although having duplicated column references in these constraints is illegal, Columns neither validates nor enforces this constraint on the caller.
-type Columns string
-
-// NewColumns creates a composite column from a slice of column names.
-func NewColumns(columns ...string) Columns {
- slices.Sort(columns)
- return Columns(strings.Join(columns, ","))
-}
-
-func (c *Columns) String() string {
- return string(*c)
-}
-
-func (c *Columns) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
- return schema.Safe(*c).AppendQuery(fmter, b)
-}
-
-// Split returns a slice of column names that make up the composite.
-func (c *Columns) Split() []string {
- return strings.Split(c.String(), ",")
-}
-
-// ContainsColumns checks that columns in "other" are a subset of current colums.
-func (c *Columns) ContainsColumns(other Columns) bool {
- columns := c.Split()
-Outer:
- for _, check := range other.Split() {
- for _, column := range columns {
- if check == column {
- continue Outer
- }
- }
- return false
- }
- return true
-}
-
-// Contains checks that a composite column contains the current column.
-func (c *Columns) Contains(other string) bool {
- return c.ContainsColumns(Columns(other))
-}
-
-// Replace renames a column if it is part of the composite.
-// If a composite consists of multiple columns, only one column will be renamed.
-func (c *Columns) Replace(oldColumn, newColumn string) bool {
- columns := c.Split()
- for i, column := range columns {
- if column == oldColumn {
- columns[i] = newColumn
- *c = NewColumns(columns...)
- return true
- }
- }
- return false
-}
-
-// Unique represents a unique constraint defined on 1 or more columns.
-type Unique struct {
- Name string
- Columns Columns
-}
-
-// Equals checks that two unique constraint are the same, assuming both are defined for the same table.
-func (u Unique) Equals(other Unique) bool {
- return u.Columns == other.Columns
-}
-
-type ColumnReference struct {
- TableName string
- Column Columns
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
deleted file mode 100644
index 19d1dc469..000000000
--- a/vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package sqlschema
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/uptrace/bun"
- "github.com/uptrace/bun/internal/ordered"
- "github.com/uptrace/bun/schema"
-)
-
-type InspectorDialect interface {
- schema.Dialect
-
- // Inspector returns a new instance of Inspector for the dialect.
- // Dialects MAY set their default InspectorConfig values in constructor
- // but MUST apply InspectorOptions to ensure they can be overriden.
- //
- // Use ApplyInspectorOptions to reduce boilerplate.
- NewInspector(db *bun.DB, options ...InspectorOption) Inspector
-
- // CompareType returns true if col1 and co2 SQL types are equivalent,
- // i.e. they might use dialect-specifc type aliases (SERIAL ~ SMALLINT)
- // or specify the same VARCHAR length differently (VARCHAR(255) ~ VARCHAR).
- CompareType(Column, Column) bool
-}
-
-// InspectorConfig controls the scope of migration by limiting the objects Inspector should return.
-// Inspectors SHOULD use the configuration directly instead of copying it, or MAY choose to embed it,
-// to make sure options are always applied correctly.
-type InspectorConfig struct {
- // SchemaName limits inspection to tables in a particular schema.
- SchemaName string
-
- // ExcludeTables from inspection.
- ExcludeTables []string
-}
-
-// Inspector reads schema state.
-type Inspector interface {
- Inspect(ctx context.Context) (Database, error)
-}
-
-func WithSchemaName(schemaName string) InspectorOption {
- return func(cfg *InspectorConfig) {
- cfg.SchemaName = schemaName
- }
-}
-
-// WithExcludeTables works in append-only mode, i.e. tables cannot be re-included.
-func WithExcludeTables(tables ...string) InspectorOption {
- return func(cfg *InspectorConfig) {
- cfg.ExcludeTables = append(cfg.ExcludeTables, tables...)
- }
-}
-
-// NewInspector creates a new database inspector, if the dialect supports it.
-func NewInspector(db *bun.DB, options ...InspectorOption) (Inspector, error) {
- dialect, ok := (db.Dialect()).(InspectorDialect)
- if !ok {
- return nil, fmt.Errorf("%s does not implement sqlschema.Inspector", db.Dialect().Name())
- }
- return &inspector{
- Inspector: dialect.NewInspector(db, options...),
- }, nil
-}
-
-func NewBunModelInspector(tables *schema.Tables, options ...InspectorOption) *BunModelInspector {
- bmi := &BunModelInspector{
- tables: tables,
- }
- ApplyInspectorOptions(&bmi.InspectorConfig, options...)
- return bmi
-}
-
-type InspectorOption func(*InspectorConfig)
-
-func ApplyInspectorOptions(cfg *InspectorConfig, options ...InspectorOption) {
- for _, opt := range options {
- opt(cfg)
- }
-}
-
-// inspector is opaque pointer to a database inspector.
-type inspector struct {
- Inspector
-}
-
-// BunModelInspector creates the current project state from the passed bun.Models.
-// Do not recycle BunModelInspector for different sets of models, as older models will not be de-registerred before the next run.
-type BunModelInspector struct {
- InspectorConfig
- tables *schema.Tables
-}
-
-var _ Inspector = (*BunModelInspector)(nil)
-
-func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
- state := BunModelSchema{
- BaseDatabase: BaseDatabase{
- ForeignKeys: make(map[ForeignKey]string),
- },
- Tables: ordered.NewMap[string, Table](),
- }
- for _, t := range bmi.tables.All() {
- if t.Schema != bmi.SchemaName {
- continue
- }
-
- columns := ordered.NewMap[string, Column]()
- for _, f := range t.Fields {
-
- sqlType, length, err := parseLen(f.CreateTableSQLType)
- if err != nil {
- return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err)
- }
- columns.Store(f.Name, &BaseColumn{
- Name: f.Name,
- SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq()
- VarcharLen: length,
- DefaultValue: exprOrLiteral(f.SQLDefault),
- IsNullable: !f.NotNull,
- IsAutoIncrement: f.AutoIncrement,
- IsIdentity: f.Identity,
- })
- }
-
- var unique []Unique
- for name, group := range t.Unique {
- // Create a separate unique index for single-column unique constraints
- // let each dialect apply the default naming convention.
- if name == "" {
- for _, f := range group {
- unique = append(unique, Unique{Columns: NewColumns(f.Name)})
- }
- continue
- }
-
- // Set the name if it is a "unique group", in which case the user has provided the name.
- var columns []string
- for _, f := range group {
- columns = append(columns, f.Name)
- }
- unique = append(unique, Unique{Name: name, Columns: NewColumns(columns...)})
- }
-
- var pk *PrimaryKey
- if len(t.PKs) > 0 {
- var columns []string
- for _, f := range t.PKs {
- columns = append(columns, f.Name)
- }
- pk = &PrimaryKey{Columns: NewColumns(columns...)}
- }
-
- // In cases where a table is defined in a non-default schema in the `bun:table` tag,
- // schema.Table only extracts the name of the schema, but passes the entire tag value to t.Name
- // for backwads-compatibility. For example, a bun model like this:
- // type Model struct { bun.BaseModel `bun:"table:favourite.books` }
- // produces
- // schema.Table{ Schema: "favourite", Name: "favourite.books" }
- tableName := strings.TrimPrefix(t.Name, t.Schema+".")
- state.Tables.Store(tableName, &BunTable{
- BaseTable: BaseTable{
- Schema: t.Schema,
- Name: tableName,
- Columns: columns,
- UniqueConstraints: unique,
- PrimaryKey: pk,
- },
- Model: t.ZeroIface,
- })
-
- for _, rel := range t.Relations {
- // These relations are nominal and do not need a foreign key to be declared in the current table.
- // They will be either expressed as N:1 relations in an m2m mapping table, or will be referenced by the other table if it's a 1:N.
- if rel.Type == schema.ManyToManyRelation ||
- rel.Type == schema.HasManyRelation {
- continue
- }
-
- var fromCols, toCols []string
- for _, f := range rel.BasePKs {
- fromCols = append(fromCols, f.Name)
- }
- for _, f := range rel.JoinPKs {
- toCols = append(toCols, f.Name)
- }
-
- target := rel.JoinTable
- state.ForeignKeys[ForeignKey{
- From: NewColumnReference(t.Name, fromCols...),
- To: NewColumnReference(target.Name, toCols...),
- }] = ""
- }
- }
- return state, nil
-}
-
-func parseLen(typ string) (string, int, error) {
- paren := strings.Index(typ, "(")
- if paren == -1 {
- return typ, 0, nil
- }
- length, err := strconv.Atoi(typ[paren+1 : len(typ)-1])
- if err != nil {
- return typ, 0, err
- }
- return typ[:paren], length, nil
-}
-
-// exprOrLiteral converts string to lowercase, if it does not contain a string literal 'lit'
-// and trims the surrounding '' otherwise.
-// Use it to ensure that user-defined default values in the models are always comparable
-// to those returned by the database inspector, regardless of the case convention in individual drivers.
-func exprOrLiteral(s string) string {
- if strings.HasPrefix(s, "'") && strings.HasSuffix(s, "'") {
- return strings.Trim(s, "'")
- }
- return strings.ToLower(s)
-}
-
-// BunModelSchema is the schema state derived from bun table models.
-type BunModelSchema struct {
- BaseDatabase
-
- Tables *ordered.Map[string, Table]
-}
-
-func (ms BunModelSchema) GetTables() *ordered.Map[string, Table] {
- return ms.Tables
-}
-
-// BunTable provides additional table metadata that is only accessible from scanning bun models.
-type BunTable struct {
- BaseTable
-
- // Model stores the zero interface to the underlying Go struct.
- Model interface{}
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
deleted file mode 100644
index 00500061b..000000000
--- a/vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package sqlschema
-
-import (
- "fmt"
-
- "github.com/uptrace/bun"
- "github.com/uptrace/bun/schema"
-)
-
-type MigratorDialect interface {
- schema.Dialect
- NewMigrator(db *bun.DB, schemaName string) Migrator
-}
-
-type Migrator interface {
- AppendSQL(b []byte, operation interface{}) ([]byte, error)
-}
-
-// migrator is a dialect-agnostic wrapper for sqlschema.MigratorDialect.
-type migrator struct {
- Migrator
-}
-
-func NewMigrator(db *bun.DB, schemaName string) (Migrator, error) {
- md, ok := db.Dialect().(MigratorDialect)
- if !ok {
- return nil, fmt.Errorf("%q dialect does not implement sqlschema.Migrator", db.Dialect().Name())
- }
- return &migrator{
- Migrator: md.NewMigrator(db, schemaName),
- }, nil
-}
-
-// BaseMigrator can be embeded by dialect's Migrator implementations to re-use some of the existing bun queries.
-type BaseMigrator struct {
- db *bun.DB
-}
-
-func NewBaseMigrator(db *bun.DB) *BaseMigrator {
- return &BaseMigrator{db: db}
-}
-
-func (m *BaseMigrator) AppendCreateTable(b []byte, model interface{}) ([]byte, error) {
- return m.db.NewCreateTable().Model(model).AppendQuery(m.db.Formatter(), b)
-}
-
-func (m *BaseMigrator) AppendDropTable(b []byte, schemaName, tableName string) ([]byte, error) {
- return m.db.NewDropTable().TableExpr("?.?", bun.Ident(schemaName), bun.Ident(tableName)).AppendQuery(m.db.Formatter(), b)
-}
diff --git a/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go b/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
deleted file mode 100644
index ec9b77f69..000000000
--- a/vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package sqlschema
-
-import (
- "github.com/uptrace/bun/internal/ordered"
-)
-
-type Table interface {
- GetSchema() string
- GetName() string
- GetColumns() *ordered.Map[string, Column]
- GetPrimaryKey() *PrimaryKey
- GetUniqueConstraints() []Unique
-}
-
-var _ Table = (*BaseTable)(nil)
-
-// BaseTable is a base table definition.
-//
-// Dialects and only dialects can use it to implement the Table interface.
-// Other packages must use the Table interface.
-type BaseTable struct {
- Schema string
- Name string
-
- // ColumnDefinitions map each column name to the column definition.
- Columns *ordered.Map[string, Column]
-
- // PrimaryKey holds the primary key definition.
- // A nil value means that no primary key is defined for the table.
- PrimaryKey *PrimaryKey
-
- // UniqueConstraints defined on the table.
- UniqueConstraints []Unique
-}
-
-// PrimaryKey represents a primary key constraint defined on 1 or more columns.
-type PrimaryKey struct {
- Name string
- Columns Columns
-}
-
-func (td *BaseTable) GetSchema() string {
- return td.Schema
-}
-
-func (td *BaseTable) GetName() string {
- return td.Name
-}
-
-func (td *BaseTable) GetColumns() *ordered.Map[string, Column] {
- return td.Columns
-}
-
-func (td *BaseTable) GetPrimaryKey() *PrimaryKey {
- return td.PrimaryKey
-}
-
-func (td *BaseTable) GetUniqueConstraints() []Unique {
- return td.UniqueConstraints
-}