diff options
author | 2022-11-24 08:35:46 +0000 | |
---|---|---|
committer | 2022-11-24 09:35:46 +0100 | |
commit | fcb9c0bb8bed51ffb856b8e47f4e047ddd75eb67 (patch) | |
tree | 933c9e1ed2b37e3ca2ee371c0b53f2c1ac561cc5 /testrig | |
parent | [feature/performance] Fail fast when doing remote transport calls inside inco... (diff) | |
download | gotosocial-fcb9c0bb8bed51ffb856b8e47f4e047ddd75eb67.tar.xz |
[chore] cleanup storage implementation, no need for multiple interface types (#1131)
Signed-off-by: kim <grufwub@gmail.com>
Signed-off-by: kim <grufwub@gmail.com>
Diffstat (limited to 'testrig')
-rw-r--r-- | testrig/federator.go | 2 | ||||
-rw-r--r-- | testrig/mediahandler.go | 2 | ||||
-rw-r--r-- | testrig/processor.go | 2 | ||||
-rw-r--r-- | testrig/storage.go | 65 |
4 files changed, 37 insertions, 34 deletions
diff --git a/testrig/federator.go b/testrig/federator.go index 2c709bc81..519f9ad62 100644 --- a/testrig/federator.go +++ b/testrig/federator.go @@ -29,6 +29,6 @@ import ( ) // NewTestFederator returns a federator with the given database and (mock!!) transport controller. -func NewTestFederator(db db.DB, tc transport.Controller, storage storage.Driver, mediaManager media.Manager, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federation.Federator { +func NewTestFederator(db db.DB, tc transport.Controller, storage *storage.Driver, mediaManager media.Manager, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federation.Federator { return federation.NewFederator(db, NewTestFederatingDB(db, fedWorker), tc, NewTestTypeConverter(db), mediaManager) } diff --git a/testrig/mediahandler.go b/testrig/mediahandler.go index acb5ed16b..12bfb8679 100644 --- a/testrig/mediahandler.go +++ b/testrig/mediahandler.go @@ -25,7 +25,7 @@ import ( ) // NewTestMediaManager returns a media handler with the default test config, and the given db and storage. -func NewTestMediaManager(db db.DB, storage storage.Driver) media.Manager { +func NewTestMediaManager(db db.DB, storage *storage.Driver) media.Manager { m, err := media.NewManager(db, storage) if err != nil { panic(err) diff --git a/testrig/processor.go b/testrig/processor.go index 902cb66bf..9dea6f579 100644 --- a/testrig/processor.go +++ b/testrig/processor.go @@ -30,6 +30,6 @@ import ( ) // NewTestProcessor returns a Processor suitable for testing purposes -func NewTestProcessor(db db.DB, storage storage.Driver, federator federation.Federator, emailSender email.Sender, mediaManager media.Manager, clientWorker *concurrency.WorkerPool[messages.FromClientAPI], fedWorker *concurrency.WorkerPool[messages.FromFederator]) processing.Processor { +func NewTestProcessor(db db.DB, storage *storage.Driver, federator federation.Federator, emailSender email.Sender, mediaManager media.Manager, clientWorker *concurrency.WorkerPool[messages.FromClientAPI], fedWorker *concurrency.WorkerPool[messages.FromFederator]) processing.Processor { return processing.NewProcessor(NewTestTypeConverter(db), federator, NewTestOauthServer(db), mediaManager, storage, db, emailSender, clientWorker, fedWorker) } diff --git a/testrig/storage.go b/testrig/storage.go index e29c82532..20226089c 100644 --- a/testrig/storage.go +++ b/testrig/storage.go @@ -33,15 +33,15 @@ import ( ) // NewInMemoryStorage returns a new in memory storage with the default test config -func NewInMemoryStorage() *gtsstorage.Local { - storage, err := kv.OpenStorage(storage.OpenMemory(200, false)) - if err != nil { - panic(err) +func NewInMemoryStorage() *gtsstorage.Driver { + storage := storage.OpenMemory(200, false) + return >sstorage.Driver{ + KVStore: kv.New(storage), + Storage: storage, } - return >sstorage.Local{KVStore: storage} } -func NewS3Storage() gtsstorage.Driver { +func NewS3Storage() *gtsstorage.Driver { endpoint := config.GetStorageS3Endpoint() access := config.GetStorageS3AccessKey() secret := config.GetStorageS3SecretKey() @@ -65,16 +65,16 @@ func NewS3Storage() gtsstorage.Driver { panic(fmt.Errorf("error opening s3 storage: %w", err)) } - return >sstorage.S3{ + return >sstorage.Driver{ + KVStore: kv.New(s3), + Storage: s3, Proxy: proxy, Bucket: bucket, - Storage: s3, - KVStore: kv.New(s3), } } // StandardStorageSetup populates the storage with standard test entries from the given directory. -func StandardStorageSetup(s gtsstorage.Driver, relativePath string) { +func StandardStorageSetup(storage *gtsstorage.Driver, relativePath string) { storedA := newTestStoredAttachments() a := NewTestAttachments() for k, paths := range storedA { @@ -90,14 +90,14 @@ func StandardStorageSetup(s gtsstorage.Driver, relativePath string) { if err != nil { panic(err) } - if err := s.Put(context.TODO(), pathOriginal, bOriginal); err != nil { + if err := storage.Put(context.TODO(), pathOriginal, bOriginal); err != nil { panic(err) } bSmall, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameSmall)) if err != nil { panic(err) } - if err := s.Put(context.TODO(), pathSmall, bSmall); err != nil { + if err := storage.Put(context.TODO(), pathSmall, bSmall); err != nil { panic(err) } } @@ -117,14 +117,14 @@ func StandardStorageSetup(s gtsstorage.Driver, relativePath string) { if err != nil { panic(err) } - if err := s.Put(context.TODO(), pathOriginal, bOriginal); err != nil { + if err := storage.Put(context.TODO(), pathOriginal, bOriginal); err != nil { panic(err) } bStatic, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameStatic)) if err != nil { panic(err) } - if err := s.Put(context.TODO(), pathStatic, bStatic); err != nil { + if err := storage.Put(context.TODO(), pathStatic, bStatic); err != nil { panic(err) } } @@ -133,24 +133,27 @@ func StandardStorageSetup(s gtsstorage.Driver, relativePath string) { // StandardStorageTeardown deletes everything in storage so that it's clean for // the next test // nolint:gocritic // complains about the type switch, but it's the cleanest solution -func StandardStorageTeardown(s gtsstorage.Driver) { +func StandardStorageTeardown(storage *gtsstorage.Driver) { defer os.RemoveAll(path.Join(os.TempDir(), "gotosocial")) - switch st := s.(type) { - case *gtsstorage.Local: - iter, err := st.KVStore.Iterator(context.Background(), nil) - if err != nil { - panic(err) - } - keys := []string{} - for iter.Next() { - keys = append(keys, iter.Key()) - } - iter.Release() - for _, k := range keys { - if err := s.Delete(context.TODO(), k); err != nil { - panic(err) - } - } + // Open a storage iterator + iter, err := storage.Iterator(context.Background(), nil) + if err != nil { + panic(err) + } + + var keys []string + + for iter.Next() { + // Collate all of the storage keys + keys = append(keys, iter.Key()) + } + + // Done with iter + iter.Release() + + for _, key := range keys { + // Ignore errors, we just want to attempt delete all + _ = storage.Delete(context.Background(), key) } } |