diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration.go new file mode 100644 index 0000000000000..3083ef6e2aa13 --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration.go @@ -0,0 +1,125 @@ +package m180tom181 + +import ( + "context" + "database/sql" + "os" + + timestamp "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/migrator/migrations" + "github.com/stackrox/rox/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema" + "github.com/stackrox/rox/migrator/types" + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/postgres/gorm/largeobject" + "github.com/stackrox/rox/pkg/postgres/pgutils" + "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/utils" + "gorm.io/gorm" +) + +const ( + scannerDefBlobName = "/offline/scanner/scanner-defs.zip" +) + +var ( + scannerDefPath = "/var/lib/stackrox/scannerdefinitions/scanner-defs.zip" +) + +var ( + migration = types.Migration{ + StartingSeqNum: 180, + VersionAfter: &storage.Version{SeqNum: 181}, + Run: func(databases *types.Databases) error { + err := moveToBlobs(databases.GormDB) + if err != nil { + return errors.Wrap(err, "moving persistent files to blobs") + } + return nil + }, + } + log = logging.LoggerForModule() +) + +func moveToBlobs(db *gorm.DB) (err error) { + ctx := sac.WithAllAccess(context.Background()) + db = db.WithContext(ctx).Table(schema.BlobsTableName) + pgutils.CreateTableFromModel(context.Background(), db, schema.CreateTableBlobsStmt) + + tx := db.Begin(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + if err = moveScannerDefinitions(tx); err != nil { + result := tx.Rollback() + if result.Error != nil { + log.Warnf("failed to rollback with error %v", result.Error) + } + return errors.Wrap(err, "failed to move scanner definition to blob store.") + } + + return tx.Commit().Error +} + +func moveScannerDefinitions(tx *gorm.DB) error { + fd, err := os.Open(scannerDefPath) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to open %s", scannerDefPath) + } + defer utils.IgnoreError(fd.Close) + stat, err := fd.Stat() + if err != nil { + return err + } + if stat.IsDir() { + return nil + } + modTime, err := timestamp.TimestampProto(stat.ModTime()) + if err != nil { + return errors.Wrapf(err, "invalid timestamp %v", stat.ModTime()) + } + + // Prepare blob + blob := &storage.Blob{ + Name: scannerDefBlobName, + Length: stat.Size(), + LastUpdated: timestamp.TimestampNow(), + ModifiedTime: modTime, + } + los := largeobject.LargeObjects{DB: tx} + + // Find the blob if it exists + var targets []schema.Blobs + result := tx.Limit(1).Where(&schema.Blobs{Name: scannerDefBlobName}).Find(&targets) + if result.Error != nil { + return result.Error + } + + if len(targets) == 0 { + blob.Oid, err = los.Create() + if err != nil { + return errors.Wrap(err, "failed to create large object") + } + } else { + // Update + existingBlob, err := schema.ConvertBlobToProto(&targets[0]) + if err != nil { + return errors.Wrapf(err, "existing blob is not valid %+v", targets[0]) + } + blob.Oid = existingBlob.Oid + } + blobModel, err := schema.ConvertBlobFromProto(blob) + if err != nil { + return errors.Wrapf(err, "failed to convert blob to blob model %+v", blob) + } + tx = tx.FirstOrCreate(blobModel) + if tx.Error != nil { + return errors.Wrap(tx.Error, "failed to create blob metadata") + } + return los.Upsert(blob.Oid, fd) +} + +func init() { + migrations.MustRegisterMigration(migration) +} diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration_test.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration_test.go new file mode 100644 index 0000000000000..0f6b8284a63cf --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/migration_test.go @@ -0,0 +1,98 @@ +//go:build sql_integration + +package m180tom181 + +import ( + "bytes" + "crypto/rand" + "io" + "os" + "testing" + + "github.com/stackrox/rox/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema" + pghelper "github.com/stackrox/rox/migrator/migrations/postgreshelper" + "github.com/stackrox/rox/pkg/postgres/gorm/largeobject" + "github.com/stackrox/rox/pkg/postgres/pgutils" + "github.com/stretchr/testify/suite" +) + +type blobMigrationTestSuite struct { + suite.Suite + + db *pghelper.TestPostgres +} + +func TestMigration(t *testing.T) { + suite.Run(t, new(blobMigrationTestSuite)) +} + +func (s *blobMigrationTestSuite) SetupTest() { + s.db = pghelper.ForT(s.T(), true) +} + +func (s *blobMigrationTestSuite) TearDownTest() { + s.db.Teardown(s.T()) +} + +func (s *blobMigrationTestSuite) TestMigration() { + // Nothing to migrate + s.Require().NoError(moveToBlobs(s.db.GetGormDB())) + + // Prepare persistent file + size := 90000 + randomData := make([]byte, size) + _, err := rand.Read(randomData) + s.Require().NoError(err) + reader := bytes.NewBuffer(randomData) + + file, err := os.CreateTemp("", "move-blob") + s.Require().NoError(err) + defer func() { + s.NoError(file.Close()) + s.NoError(os.Remove(file.Name())) + }() + scannerDefPath = file.Name() + n, err := io.Copy(file, reader) + s.Require().NoError(err) + + s.Require().EqualValues(size, n) + fileInfo, err := file.Stat() + s.Require().NoError(err) + + // Migrate + s.Require().NoError(moveToBlobs(s.db.GetGormDB())) + + // Verify Blob + blobModel := &schema.Blobs{Name: scannerDefBlobName} + s.Require().NoError(s.db.GetGormDB().First(&blobModel).Error) + + blob, err := schema.ConvertBlobToProto(blobModel) + s.Require().NoError(err) + s.Equal(scannerDefBlobName, blob.GetName()) + s.EqualValues(size, blob.GetLength()) + + modTime := pgutils.NilOrTime(blob.GetModifiedTime()) + s.Equal(fileInfo.ModTime().UTC(), modTime.UTC()) + + // Verify Data + buf := bytes.NewBuffer([]byte{}) + + tx := s.db.GetGormDB().Begin() + s.Require().NoError(err) + los := &largeobject.LargeObjects{DB: tx} + s.Require().NoError(los.Get(blob.Oid, buf)) + s.Equal(len(randomData), buf.Len()) + s.Equal(randomData, buf.Bytes()) + s.NoError(tx.Commit().Error) + + // Test re-entry + s.Require().NoError(moveToBlobs(s.db.GetGormDB())) + buf.Reset() + tx = s.db.GetGormDB().Begin() + los = &largeobject.LargeObjects{DB: tx} + s.Require().NoError(err) + s.Require().NoError(los.Get(blob.Oid, buf)) + s.Equal(len(randomData), buf.Len()) + s.Equal(randomData, buf.Bytes()) + s.NoError(tx.Commit().Error) +} diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/blobs.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/blobs.go new file mode 100644 index 0000000000000..c6401dcf1b832 --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/blobs.go @@ -0,0 +1,35 @@ +// Code generated by pg-bindings generator. DO NOT EDIT. + +package schema + +import ( + "reflect" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/postgres" + "github.com/stackrox/rox/pkg/postgres/walker" +) + +var ( + // CreateTableBlobsStmt holds the create statement for table `blobs`. + CreateTableBlobsStmt = &postgres.CreateStmts{ + GormModel: (*Blobs)(nil), + Children: []*postgres.CreateStmts{}, + } + + // BlobsSchema is the go schema for table `blobs`. + BlobsSchema = func() *walker.Schema { + schema := walker.Walk(reflect.TypeOf((*storage.Blob)(nil)), "blobs") + return schema + }() +) + +const ( + BlobsTableName = "blobs" +) + +// Blobs holds the Gorm model for Postgres table `blobs`. +type Blobs struct { + Name string `gorm:"column:name;type:varchar;primaryKey"` + Serialized []byte `gorm:"column:serialized;type:bytea"` +} diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs.go new file mode 100644 index 0000000000000..25223842c6131 --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs.go @@ -0,0 +1,28 @@ +// Code generated by pg-bindings generator. DO NOT EDIT. +package schema + +import ( + "github.com/stackrox/rox/generated/storage" +) + +// ConvertBlobFromProto converts a `*storage.Blob` to Gorm model +func ConvertBlobFromProto(obj *storage.Blob) (*Blobs, error) { + serialized, err := obj.Marshal() + if err != nil { + return nil, err + } + model := &Blobs{ + Name: obj.GetName(), + Serialized: serialized, + } + return model, nil +} + +// ConvertBlobToProto converts Gorm model `Blobs` to its protobuf type object +func ConvertBlobToProto(m *Blobs) (*storage.Blob, error) { + var msg storage.Blob + if err := msg.Unmarshal(m.Serialized); err != nil { + return nil, err + } + return &msg, nil +} diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs_test.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs_test.go new file mode 100644 index 0000000000000..a2f2300ee3c51 --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/convert_blobs_test.go @@ -0,0 +1,20 @@ +// Code generated by pg-bindings generator. DO NOT EDIT. +package schema + +import ( + "testing" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/testutils" + "github.com/stretchr/testify/assert" +) + +func TestBlobSerialization(t *testing.T) { + obj := &storage.Blob{} + assert.NoError(t, testutils.FullInit(obj, testutils.UniqueInitializer(), testutils.JSONFieldsFilter)) + m, err := ConvertBlobFromProto(obj) + assert.NoError(t, err) + conv, err := ConvertBlobToProto(m) + assert.NoError(t, err) + assert.Equal(t, obj, conv) +} diff --git a/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/gen.go b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/gen.go new file mode 100644 index 0000000000000..b1c32a96973f2 --- /dev/null +++ b/migrator/migrations/m_180_to_m_181_move_to_blobstore/schema/gen.go @@ -0,0 +1,6 @@ +package schema + +// TODO(ROX-17180): Remove this auto-generation at the beginning of 4.2 or at least +// before we made schema change to Blob store after first release. + +//go:generate pg-schema-migration-helper --type=storage.Blob diff --git a/migrator/runner/all.go b/migrator/runner/all.go index fad02c8cfafbb..33872b711b938 100644 --- a/migrator/runner/all.go +++ b/migrator/runner/all.go @@ -136,4 +136,5 @@ import ( _ "github.com/stackrox/rox/migrator/migrations/m_177_to_m_178_group_permissions" _ "github.com/stackrox/rox/migrator/migrations/m_178_to_m_179_embedded_collections_search_label" _ "github.com/stackrox/rox/migrator/migrations/m_179_to_m_180_openshift_policy_exclusions" + _ "github.com/stackrox/rox/migrator/migrations/m_180_to_m_181_move_to_blobstore" ) diff --git a/pkg/migrations/internal/seq_num.go b/pkg/migrations/internal/seq_num.go index c0c65febd0877..063a38da4d67f 100644 --- a/pkg/migrations/internal/seq_num.go +++ b/pkg/migrations/internal/seq_num.go @@ -4,13 +4,13 @@ var ( // CurrentDBVersionSeqNum is the current DB version number. // This must be incremented every time we write a migration. // It is a shared constant between central and the migrator binary. - CurrentDBVersionSeqNum = 180 + CurrentDBVersionSeqNum = 181 // MinimumSupportedDBVersionSeqNum is the minimum DB version number // that is supported by this database. This is used in case of rollbacks in // the event that a major change introduced an incompatible schema update we // can inform that a rollback below this is not supported by the database - MinimumSupportedDBVersionSeqNum = 180 + MinimumSupportedDBVersionSeqNum = 181 // LastRocksDBVersionSeqNum is the sequence number for the last RocksDB version. LastRocksDBVersionSeqNum = 112 diff --git a/pkg/postgres/gorm/largeobject/large_objects.go b/pkg/postgres/gorm/largeobject/large_objects.go new file mode 100644 index 0000000000000..de6deb16db1de --- /dev/null +++ b/pkg/postgres/gorm/largeobject/large_objects.go @@ -0,0 +1,159 @@ +package largeobject + +import ( + "errors" + "io" + + "gorm.io/gorm" +) + +// LargeObjects is used to access the large objects API with gorm ORM. +// +// This is originally created with similar API with existing github.com/jackc/pgx +// For more details see: http://www.postgresql.org/docs/current/static/largeobjects.html +type LargeObjects struct { + *gorm.DB +} + +// Mode is the open mode for large object +type Mode int32 + +const ( + // ModeWrite is bitmap for write operation on large object + ModeWrite Mode = 0x20000 + // ModeRead is bitmap for read operation on large object + ModeRead Mode = 0x40000 +) + +// Create creates a new large object with an unused OID assigned +func (o *LargeObjects) Create() (oid uint32, err error) { + result := o.Raw("SELECT lo_create($1)", 0).Scan(&oid) + return oid, result.Error +} + +// Open opens an existing large object with the given mode. ctx will also be used for all operations on the opened large +// object. +func (o *LargeObjects) Open(oid uint32, mode Mode) (*LargeObject, error) { + var fd int32 + result := o.Raw("select lo_open($1, $2)", oid, mode).Scan(&fd) + if result.Error != nil { + return nil, result.Error + } + return &LargeObject{fd: fd, tx: o.DB}, nil +} + +// Unlink removes a large object from the database. +func (o *LargeObjects) Unlink(oid uint32) error { + var count int32 + result := o.Raw("select lo_unlink($1)", oid).Scan(&count) + if result.Error != nil { + return result.Error + } + if count != 1 { + return errors.New("failed to remove large object") + } + return nil +} + +// Upsert insert a large object with oid. If the large object exists, +// replace it. +func (o *LargeObjects) Upsert(oid uint32, r io.Reader) error { + obj, err := o.Open(oid, ModeWrite) + if err != nil { + return err + } + if _, err = obj.Truncate(0); err != nil { + return errors.Join(err, obj.Close()) + } + _, err = io.Copy(obj, r) + + return errors.Join(err, obj.Close()) +} + +// Get gets the content of the large object and write it to the writer. +func (o *LargeObjects) Get(oid uint32, w io.Writer) error { + obj, err := o.Open(oid, ModeRead) + if err != nil { + return err + } + + _, err = io.Copy(w, obj) + if err != nil { + return obj.wrapClose(err) + } + return obj.wrapClose(err) +} + +// A LargeObject implements the large object interface to Postgres database. It implements these interfaces: +// +// io.Writer +// io.Reader +// io.Seeker +// io.Closer +type LargeObject struct { + tx *gorm.DB + fd int32 +} + +// Write writes p to the large object and returns the number of bytes written and an error if not all of p was written. +func (o *LargeObject) Write(p []byte) (int, error) { + var n int + err := o.tx.Raw("select lowrite($1, $2)", o.fd, p).Row().Scan(&n) + if err != nil { + return n, err + } + + if n < 0 { + return 0, errors.New("failed to write to large object") + } + + return n, nil +} + +// Read reads up to len(p) bytes into p returning the number of bytes read. +func (o *LargeObject) Read(p []byte) (n int, err error) { + var res []byte + err = o.tx.Raw("select loread($1, $2)", o.fd, len(p)).Row().Scan(&res) + copy(p, res) + if err != nil { + return len(res), err + } + + if len(res) < len(p) { + err = io.EOF + } + return len(res), err +} + +// Seek moves the current location pointer to the new location specified by offset. +func (o *LargeObject) Seek(offset int64, whence int) (int64, error) { + var n int64 + result := o.tx.Raw("select lo_lseek64($1, $2, $3)", o.fd, offset, whence).Scan(&n) + return n, result.Error +} + +// Tell returns the current read or write location of the large object descriptor. +func (o *LargeObject) Tell() (int64, error) { + var n int64 + result := o.tx.Raw("select lo_tell64($1)", o.fd).Scan(&n) + return n, result.Error +} + +// Truncate the large object to size and return the resulting size. +func (o *LargeObject) Truncate(size int64) (n int, err error) { + result := o.tx.Raw("select lo_truncate64($1, $2)", o.fd, size).Scan(&n) + return n, result.Error +} + +// Close the large object descriptor. +func (o *LargeObject) Close() error { + var n int + result := o.tx.Raw("select lo_close($1)", o.fd).Scan(&n) + return result.Error +} + +// wrapClose closes the large object and returns error if failed. Otherwise, it +// returns err +func (o *LargeObject) wrapClose(err error) error { + return errors.Join(err, o.Close()) +} diff --git a/pkg/postgres/gorm/largeobject/large_objects_test.go b/pkg/postgres/gorm/largeobject/large_objects_test.go new file mode 100644 index 0000000000000..ae7b9e9f5dbd1 --- /dev/null +++ b/pkg/postgres/gorm/largeobject/large_objects_test.go @@ -0,0 +1,210 @@ +//go:build sql_integration + +package largeobject + +import ( + "bytes" + "context" + "crypto/rand" + "database/sql" + "io" + "testing" + + "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stretchr/testify/suite" + "gorm.io/gorm" +) + +type GormUtilsTestSuite struct { + suite.Suite + + db *pgtest.TestPostgres + ctx context.Context + gormDB *gorm.DB +} + +func TestLargeObjects(t *testing.T) { + suite.Run(t, new(GormUtilsTestSuite)) +} + +func (s *GormUtilsTestSuite) SetupTest() { + s.db = pgtest.ForT(s.T()) + s.ctx = context.Background() + s.gormDB = s.db.GetGormDB(s.T()).WithContext(s.ctx) +} + +func (s *GormUtilsTestSuite) TearDownTest() { + s.db.Teardown(s.T()) +} + +func (s *GormUtilsTestSuite) TestUpsertGet() { + // Write a long file + randomData := make([]byte, 90000) + _, err := rand.Read(randomData) + s.NoError(err) + + reader := bytes.NewBuffer(randomData) + tx := s.gormDB.Begin(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + los := LargeObjects{tx} + oid, err := los.Create() + s.Require().NoError(err) + err = los.Upsert(oid, reader) + s.Require().NoError(err) + s.Require().NoError(tx.Commit().Error) + + // Read it back and verify + tx = s.gormDB.Begin(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + los = LargeObjects{tx} + writer := bytes.NewBuffer([]byte{}) + s.Require().NoError(los.Get(oid, writer)) + s.Require().NoError(tx.Commit().Error) + + // Overwrite it + s.Require().Equal(randomData, writer.Bytes()) + reader = bytes.NewBuffer([]byte("hi")) + tx = s.gormDB.Begin(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + los = LargeObjects{tx} + err = los.Upsert(oid, reader) + s.Require().NoError(err) + s.Require().NoError(tx.Commit().Error) + + // Read it back and verify + tx = s.gormDB.Begin(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}) + los = LargeObjects{tx} + writer = bytes.NewBuffer([]byte{}) + writer.Reset() + s.Require().NoError(los.Get(oid, writer)) + s.Require().Equal([]byte("hi"), writer.Bytes()) + s.Require().NoError(tx.Commit().Error) +} + +func (s *GormUtilsTestSuite) TestLargeObjectSingleTransaction() { + tx := s.gormDB.Begin() + s.Require().NoError(tx.Error) + + los := &LargeObjects{tx} + + id, err := los.Create() + s.Require().NoError(err) + + obj, err := los.Open(id, ModeWrite|ModeRead) + s.Require().NoError(err) + + n, err := obj.Write([]byte("testing")) + s.Require().NoError(err) + s.Require().Equal(7, n, "Expected n to be 7, got %d", n) + + pos, err := obj.Seek(1, 0) + s.Require().NoError(err) + s.Require().Equal(int64(1), pos) + + res := make([]byte, 6) + n, err = obj.Read(res) + s.Require().NoError(err) + s.Require().Equal("esting", string(res)) + s.Require().Equal(6, n) + + n, err = obj.Read(res) + s.Require().Equal(err, io.EOF) + s.Require().Zero(n) + + pos, err = obj.Tell() + s.Require().NoError(err) + s.Require().EqualValues(7, pos) + + _, err = obj.Truncate(1) + s.Require().NoError(err) + + pos, err = obj.Seek(-1, 2) + s.Require().NoError(err) + s.Require().Zero(pos) + + res = make([]byte, 2) + n, err = obj.Read(res) + s.Require().Equal(io.EOF, err) + s.Require().Equal(1, n) + s.Require().EqualValues('t', res[0]) + + err = obj.Close() + s.Require().NoError(err) + + err = los.Unlink(id) + s.Require().NoError(err) + + _, err = los.Open(id, ModeRead) + s.Require().Contains(err.Error(), "does not exist (SQLSTATE 42704)") +} + +func (s *GormUtilsTestSuite) TestLargeObjectMultipleTransactions() { + tx := s.gormDB.Begin() + s.Require().NoError(tx.Error) + los := &LargeObjects{tx} + + id, err := los.Create() + s.Require().NoError(err) + obj, err := los.Open(id, ModeWrite|ModeRead) + s.Require().NoError(err) + + n, err := obj.Write([]byte("testing")) + s.Require().NoError(err) + s.Require().Equal(7, n, "Expected n to be 7, got %d", n) + + // Commit the first transaction + s.Require().NoError(tx.Commit().Error) + + // IMPORTANT: Use the same connection for another query + query := `select n from generate_series(1,10) n` + rows, err := s.gormDB.Raw(query).Rows() + s.Require().NoError(err) + s.Require().NoError(rows.Err()) + s.NoError(rows.Close()) + + // Start a new transaction + tx2 := s.gormDB.Begin() + s.Require().NoError(tx.Error) + los2 := &LargeObjects{tx2} + + // Reopen the large object in the new transaction + obj2, err := los2.Open(id, ModeWrite|ModeRead) + s.Require().NoError(err) + + pos, err := obj2.Seek(1, 0) + s.Require().NoError(err) + s.Require().EqualValues(1, pos) + + res := make([]byte, 6) + n, err = obj2.Read(res) + s.Require().NoError(err) + s.Require().Equal("esting", string(res)) + s.Require().Equal(6, n) + + n, err = obj2.Read(res) + s.Require().Equal(err, io.EOF) + s.Require().Zero(n) + + pos, err = obj2.Tell() + s.Require().NoError(err) + s.Require().EqualValues(7, pos) + + _, err = obj2.Truncate(1) + s.Require().NoError(err) + + pos, err = obj2.Seek(-1, 2) + s.Require().NoError(err) + s.Require().Zero(pos) + + res = make([]byte, 2) + n, err = obj2.Read(res) + s.Require().Equal(io.EOF, err) + s.Require().Equal(1, n) + s.Require().EqualValues('t', res[0]) + + err = obj2.Close() + s.Require().NoError(err) + + err = los2.Unlink(id) + s.Require().NoError(err) + + _, err = los2.Open(id, ModeRead) + s.Require().Contains(err.Error(), "does not exist (SQLSTATE 42704)") +} diff --git a/tools/roxvet/analyzers/validateimports/analyzer.go b/tools/roxvet/analyzers/validateimports/analyzer.go index f2c145b10c135..751d0c56f5e65 100644 --- a/tools/roxvet/analyzers/validateimports/analyzer.go +++ b/tools/roxvet/analyzers/validateimports/analyzer.go @@ -256,6 +256,7 @@ func verifyImportsFromAllowedPackagesOnly(pass *analysis.Pass, imports []*ast.Im "pkg/migrations", "pkg/nodes/converter", "pkg/policyutils", + "pkg/postgres/gorm", "pkg/postgres/pgadmin", "pkg/postgres/pgconfig", "pkg/postgres/pgtest",