diff --git a/central/complianceoperator/v2/report/datastore/datastore.go b/central/complianceoperator/v2/report/datastore/datastore.go index 56d4c4e1f70eb..45ab196311e63 100644 --- a/central/complianceoperator/v2/report/datastore/datastore.go +++ b/central/complianceoperator/v2/report/datastore/datastore.go @@ -24,6 +24,9 @@ type DataStore interface { // DeleteSnapshot removes a report snapshot object from the database DeleteSnapshot(ctx context.Context, id string) error + + // GetLastSnapshotFromScanConfig returns the last snapshot associated with a ScanConfiguration + GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) } // New returns an instance of DataStore. diff --git a/central/complianceoperator/v2/report/datastore/datastore_impl.go b/central/complianceoperator/v2/report/datastore/datastore_impl.go index e4f3839a6f3d2..ab83cb6bdcb30 100644 --- a/central/complianceoperator/v2/report/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/report/datastore/datastore_impl.go @@ -8,6 +8,7 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/errorhelpers" + types "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/search" ) @@ -75,3 +76,19 @@ func deleteOrphanedSnapshots(ctx context.Context, ds DataStore, query *v1.Query) } return errList.ToError() } + +func (d *datastoreImpl) GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) { + query := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID).ProtoQuery() + snapshots, err := d.SearchSnapshots(ctx, query) + if err != nil { + return nil, err + } + var lastSnapshot *storage.ComplianceOperatorReportSnapshotV2 + for _, snapshot := range snapshots { + if types.CompareTimestamps(snapshot.GetReportStatus().GetCompletedAt(), lastSnapshot.GetReportStatus().GetCompletedAt()) > 0 { + lastSnapshot = snapshot + } + } + return lastSnapshot, nil +} diff --git a/central/complianceoperator/v2/report/datastore/datastore_impl_test.go b/central/complianceoperator/v2/report/datastore/datastore_impl_test.go index 12c8c6a028b55..0fd841377480c 100644 --- a/central/complianceoperator/v2/report/datastore/datastore_impl_test.go +++ b/central/complianceoperator/v2/report/datastore/datastore_impl_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "testing" + "time" "github.com/google/uuid" reportStorage "github.com/stackrox/rox/central/complianceoperator/v2/report/store/postgres" @@ -13,6 +14,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" @@ -255,6 +257,34 @@ func (s *complianceReportSnapshotDataStoreSuite) TestDeleteOrphaned() { } } +func (s *complianceReportSnapshotDataStoreSuite) TestGetLastSnapshot() { + // make sure we have nothing + reportIDs, err := s.storage.GetIDs(s.hasReadCtx) + s.Require().NoError(err) + s.Require().Empty(reportIDs) + + timeNow := time.Now() + oldTime := timeNow.Add(-time.Hour) + timestampNow, err := protocompat.ConvertTimeToTimestampOrError(timeNow) + s.Require().NoError(err) + oldTimestamp, err := protocompat.ConvertTimeToTimestampOrError(oldTime) + s.Require().NoError(err) + + status1 := getStatus(storage.ComplianceOperatorReportStatus_PREPARING, oldTimestamp, timestampNow, "", storage.ComplianceOperatorReportStatus_SCHEDULED, storage.ComplianceOperatorReportStatus_EMAIL) + status2 := getStatus(storage.ComplianceOperatorReportStatus_PREPARING, oldTimestamp, oldTimestamp, "", storage.ComplianceOperatorReportStatus_SCHEDULED, storage.ComplianceOperatorReportStatus_EMAIL) + user := getUser("u-1", "user-1") + reports := []*storage.ComplianceOperatorReportSnapshotV2{ + getTestReport(uuidStub1, uuidScanConfigStub1, status1, user), + getTestReport(uuidStub2, uuidScanConfigStub1, status2, user), + } + for _, r := range reports { + s.Require().NoError(s.storage.Upsert(s.hasWriteCtx, r)) + } + snapshot, err := s.datastore.GetLastSnapshotFromScanConfig(s.hasReadCtx, uuidScanConfigStub1) + s.Assert().NoError(err) + s.Assert().Equal(uuidStub1, snapshot.GetReportId()) +} + func getTestReport(id string, scanConfigID string, status *storage.ComplianceOperatorReportStatus, user *storage.SlimUser) *storage.ComplianceOperatorReportSnapshotV2 { return &storage.ComplianceOperatorReportSnapshotV2{ ReportId: id, diff --git a/central/complianceoperator/v2/report/datastore/mocks/datastore.go b/central/complianceoperator/v2/report/datastore/mocks/datastore.go index 4a041004a0278..98b0f601a9ff6 100644 --- a/central/complianceoperator/v2/report/datastore/mocks/datastore.go +++ b/central/complianceoperator/v2/report/datastore/mocks/datastore.go @@ -56,6 +56,21 @@ func (mr *MockDataStoreMockRecorder) DeleteSnapshot(ctx, id any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockDataStore)(nil).DeleteSnapshot), ctx, id) } +// GetLastSnapshotFromScanConfig mocks base method. +func (m *MockDataStore) GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastSnapshotFromScanConfig", ctx, scanConfigID) + ret0, _ := ret[0].(*storage.ComplianceOperatorReportSnapshotV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLastSnapshotFromScanConfig indicates an expected call of GetLastSnapshotFromScanConfig. +func (mr *MockDataStoreMockRecorder) GetLastSnapshotFromScanConfig(ctx, scanConfigID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastSnapshotFromScanConfig", reflect.TypeOf((*MockDataStore)(nil).GetLastSnapshotFromScanConfig), ctx, scanConfigID) +} + // GetSnapshot mocks base method. func (m *MockDataStore) GetSnapshot(ctx context.Context, id string) (*storage.ComplianceOperatorReportSnapshotV2, bool, error) { m.ctrl.T.Helper() diff --git a/central/complianceoperator/v2/report/manager/format/formatter.go b/central/complianceoperator/v2/report/manager/format/formatter.go index 59a65e583a5be..72477ebc5e39a 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter.go +++ b/central/complianceoperator/v2/report/manager/format/formatter.go @@ -5,12 +5,11 @@ import ( "bytes" "fmt" "io" - "strings" "github.com/pkg/errors" "github.com/stackrox/rox/central/complianceoperator/v2/report" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/csv" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -67,7 +66,7 @@ func NewFormatter() *FormatterImpl { // If a cluster fails, the generated CSV file will contain the reason for the reason but (no check results). // If a cluster success, the generated CSV file will contain all the check results with enhanced information (e.g. remediation, associated profile, etc) // The results parameter is expected to contain the clusters that succeed (no failed clusters should be passed in results). -func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (buffRet *bytes.Buffer, errRet error) { +func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, clusters map[string]*report.ClusterData) (buffRet *bytes.Buffer, errRet error) { var buf bytes.Buffer zipWriter := f.newZipWriter(&buf) defer func() { @@ -76,20 +75,22 @@ func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, errRet = errors.Wrap(err, "unable to create a zip file of the compliance report") } }() - for clusterID, failedCluster := range failedClusters { - fileName := fmt.Sprintf(failedClusterFmt, clusterID) - if err := f.createFailedClusterFileInZip(zipWriter, fileName, failedCluster); err != nil { - return nil, errors.Wrap(err, "error creating failed cluster report") + timestamp := timestamppb.Now() + for clusterID, cluster := range clusters { + if cluster.FailedInfo != nil { + fileName := getFileName(failedClusterFmt, cluster.ClusterName, timestamp) + if err := f.createFailedClusterFileInZip(zipWriter, fileName, cluster.FailedInfo); err != nil { + return nil, errors.Wrap(err, "error creating failed cluster report") + } } - } - for clusterID, res := range results { - // We should not receive results from a failed cluster - if _, ok := failedClusters[clusterID]; ok { + if len(results[clusterID]) == 0 && cluster.FailedInfo != nil { continue } - fileName := fmt.Sprintf(successfulClusterFmt, clusterID) - err := f.createCSVInZip(zipWriter, fileName, res) - if err != nil { + if _, ok := results[clusterID]; !ok { + return nil, errors.Errorf("found no results for cluster %q", clusterID) + } + fileName := getFileName(successfulClusterFmt, cluster.ClusterName, timestamp) + if err := f.createCSVInZip(zipWriter, fileName, results[clusterID]); err != nil { return nil, errors.Wrap(err, "error creating csv report") } } @@ -127,24 +128,22 @@ func generateRecord(row *report.ResultRow) []string { } } -func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filename string, failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster) error { +func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filename string, failedCluster *report.FailedCluster) error { w, err := zipWriter.Create(filename) if err != nil { return err } csvWriter := f.newCSVWriter(failedClusterCSVHeader, true) - csvWriter.AddValue(generateFailRecord(failedCluster)) + for _, reason := range failedCluster.Reasons { + // The order in the slice needs to match the order defined in `failedClusterCSVHeader` + csvWriter.AddValue([]string{failedCluster.ClusterId, failedCluster.ClusterName, reason, failedCluster.OperatorVersion}) + } return csvWriter.WriteCSV(w) } -func generateFailRecord(failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster) []string { - // The order in the slice needs to match the order defined in `failedClusterCSVHeader` - return []string{ - failedCluster.GetClusterId(), - failedCluster.GetClusterName(), - strings.Join(failedCluster.GetReasons(), ", "), - failedCluster.GetOperatorVersion(), - } +func getFileName(format string, clusterName string, timestamp *timestamppb.Timestamp) string { + year, month, day := timestamp.AsTime().Date() + return fmt.Sprintf(format, fmt.Sprintf("%s_%d-%d-%d", clusterName, year, month, day)) } func createNewZipWriter(buf *bytes.Buffer) ZipWriter { diff --git a/central/complianceoperator/v2/report/manager/format/formatter_test.go b/central/complianceoperator/v2/report/manager/format/formatter_test.go index b88ce7cd7b3da..aead83f4beab9 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter_test.go +++ b/central/complianceoperator/v2/report/manager/format/formatter_test.go @@ -3,16 +3,16 @@ package format import ( "bytes" "fmt" + "strings" "testing" "github.com/pkg/errors" "github.com/stackrox/rox/central/complianceoperator/v2/report" "github.com/stackrox/rox/central/complianceoperator/v2/report/manager/format/mocks" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/csv" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -35,7 +35,10 @@ type ComplianceReportingFormatterSuite struct { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { s.Run("with empty failed clusters", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) gomock.InOrder( s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { data := getFakeReportData() @@ -49,25 +52,12 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().NoError(err) s.Require().NotNil(buf) }) - s.Run("with nil failed clusters", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) - gomock.InOrder( - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - data := getFakeReportData() - return compareStringSlice(s.T(), target, generateRecord(data[clusterID1][0])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - data := getFakeReportData() - return compareStringSlice(s.T(), target, generateRecord(data[clusterID1][1])) - })).Times(1), - ) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) + s.Run("with nil clusters data", func() { s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), nil) s.Require().NoError(err) s.Require().NotNil(buf) @@ -75,26 +65,33 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedClusterNoError() { - gomock.InOrder( - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil), - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil), - ) - gomock.InOrder( + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) })).Times(1), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][0])) + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) })).Times(1), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][1])) + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) })).Times(1), - ) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) - s.zipWriter.EXPECT().Close().Times(1).Return(nil) + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) buf, err := s.formatter.FormatCSVReport(getFakeReportDataWithFailedCluster()) s.Require().NoError(err) @@ -102,49 +99,115 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedClusterInResultsParameterNoError() { - gomock.InOrder( - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil), - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil), - ) - gomock.InOrder( + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + // Add empty results to the failed cluster + results[clusterID2] = []*report.ResultRow{} + + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) })).Times(1), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][0])) + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) })).Times(1), s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][1])) + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) })).Times(1), - ) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) - s.zipWriter.EXPECT().Close().Times(1).Return(nil) + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) - results, failedCluster := getFakeReportDataWithFailedCluster() - // Add empty results to the failed cluster - results[clusterID2] = []*report.ResultRow{} - buf, err := s.formatter.FormatCSVReport(results, failedCluster) + buf, err := s.formatter.FormatCSVReport(results, clusterData) + s.Require().NoError(err) + s.Require().NotNil(buf) +} + +func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFailedCluster() { + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + // Add partial results + results[clusterID2] = []*report.ResultRow{ + { + ClusterName: "test_cluster-2", + CheckName: "test_check-2", + Profile: "test_profile-2", + ControlRef: "test_control_ref-2", + Description: "description-2", + Status: "Fail", + Remediation: "remediation-2", + }, + } + + partialSuccessFileName := getFileName(successfulClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == partialSuccessFileName + })).Times(1).Return(nil, nil), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID2][0])) + })).Times(1), + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(3).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) + + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().NoError(err) s.Require().NotNil(buf) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { s.Run("zip writer failing to create a file (with no failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, errors.New("error")) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().Error(err) s.Require().Nil(buf) }) s.Run("zip writer failing to create a file (containing failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, errors.New("error")) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + delete(clusterData, clusterID1) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportDataOnlyFailedCluster()) + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().Error(err) s.Require().Nil(buf) }) @@ -152,43 +215,56 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { s.Run("csv writer failing to create a file (with no failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().Error(err) s.Require().Nil(buf) }) s.Run("csv writer failing to create a file (containing failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + delete(clusterData, clusterID1) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) })).Times(1) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportDataOnlyFailedCluster()) + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().Error(err) s.Require().Nil(buf) }) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCloseError() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(errors.New("error")) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), clusterData) s.Require().Error(err) s.Require().Nil(buf) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportEmptyReportNoError() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(&emptyValueMatcher{ t: s.T(), value: emptyValue, @@ -197,7 +273,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportEmptyReportNoErr s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeEmptyReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeEmptyReportData(), getFakeClusterData()) s.Require().NoError(err) s.Require().NotNil(buf) } @@ -256,32 +332,27 @@ func getFakeReportData() map[string][]*report.ResultRow { return results } -func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - failedClusters[clusterID2] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterName: "test_cluster-2", - ClusterId: "test_cluster-2-id", +func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[string]*report.ClusterData) { + clusterData := getFakeClusterData() + clusterData[clusterID2] = &report.ClusterData{ + ClusterName: "test_cluster-2", + ClusterId: "test_cluster-2-id", + } + clusterData[clusterID2].FailedInfo = &report.FailedCluster{ Reasons: []string{"timeout"}, OperatorVersion: "v1.6.0", } results := getFakeReportData() - return results, failedClusters + return results, clusterData } -func getFakeReportDataOnlyFailedCluster() (map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - failedClusters[clusterID2] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterName: "test_cluster-2", - ClusterId: "test_cluster-2-id", - Reasons: []string{"timeout"}, - OperatorVersion: "v1.6.0", +func getFakeClusterData() map[string]*report.ClusterData { + ret := make(map[string]*report.ClusterData) + ret[clusterID1] = &report.ClusterData{ + ClusterId: clusterID1, + ClusterName: "test_cluster-1", } - results := make(map[string][]*report.ResultRow) - return results, failedClusters -} - -func getFakeEmptyFailedClusters() map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - return make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) + return ret } type emptyValueMatcher struct { @@ -309,5 +380,15 @@ func (m *emptyValueMatcher) String() string { } func compareStringSlice(t *testing.T, actual []string, expected []string) bool { - return assert.Equal(t, expected, actual) + if len(actual) != len(expected) { + t.Logf("Expected slice %v but got %v", expected, actual) + return false + } + for i := 0; i < len(actual); i++ { + if strings.Compare(actual[i], expected[i]) != 0 { + t.Logf("Expected slice %v but got %v", expected, actual) + return false + } + } + return true } diff --git a/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go b/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go index d98f1d8ee0899..7111f17bbe72b 100644 --- a/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go +++ b/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go @@ -94,7 +94,7 @@ func (m *MockFormatter) EXPECT() *MockFormatterMockRecorder { } // FormatCSVReport mocks base method. -func (m *MockFormatter) FormatCSVReport(arg0 map[string][]*report.ResultRow, arg1 map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (*bytes.Buffer, error) { +func (m *MockFormatter) FormatCSVReport(arg0 map[string][]*report.ResultRow, arg1 map[string]*report.ClusterData) (*bytes.Buffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FormatCSVReport", arg0, arg1) ret0, _ := ret[0].(*bytes.Buffer) diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen.go b/central/complianceoperator/v2/report/manager/generator/report_gen.go index 91725ca0cd9fc..f202aa088a20d 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen.go @@ -34,7 +34,7 @@ type ComplianceReportGenerator interface { // //go:generate mockgen-wrapper type Formatter interface { - FormatCSVReport(map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (*bytes.Buffer, error) + FormatCSVReport(map[string][]*report.ResultRow, map[string]*report.ClusterData) (*bytes.Buffer, error) } // ResultsAggregator interface is used to generate the report data diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go index 0112e5780dfbb..5371d0546e1e7 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go @@ -73,7 +73,7 @@ type complianceReportGeneratorImpl struct { func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Request) error { - log.Infof("Processing report request %s", req) + log.Infof("Processing report request %s", req.ScanConfigID) var snapshot *storage.ComplianceOperatorReportSnapshotV2 if req.SnapshotID != "" { @@ -90,7 +90,7 @@ func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Reques reportData := rg.resultsAggregator.GetReportData(req) - zipData, err := rg.formatter.FormatCSVReport(reportData.ResultCSVs, req.FailedClusters) + zipData, err := rg.formatter.FormatCSVReport(reportData.ResultCSVs, reportData.ClustersData) if err != nil { if dbErr := reportUtils.UpdateSnapshotOnError(req.Ctx, snapshot, report.ErrReportGeneration, rg.snapshotDS); dbErr != nil { return errors.Wrap(dbErr, errUnableToUpdateSnapshotOnGenerationFailureStr) @@ -100,14 +100,14 @@ func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Reques if snapshot != nil { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_GENERATED - if len(req.FailedClusters) > 0 { + if req.NumFailedClusters > 0 { switch req.NotificationMethod { case storage.ComplianceOperatorReportStatus_EMAIL: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_EMAIL case storage.ComplianceOperatorReportStatus_DOWNLOAD: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_DOWNLOAD } - if len(req.FailedClusters) == len(req.ClusterIDs) { + if req.NumFailedClusters == len(req.ClusterIDs) { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_FAILURE } } diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go index 31f2c4efd8a1d..617cbc773a8ca 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go @@ -101,6 +101,9 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { }, }, }, + ClusterData: map[string]*report.ClusterData{ + "cluster-1": {}, + }, } s.Run("GetSnapshots data store error", func() { @@ -195,9 +198,12 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { return storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_DOWNLOAD == target.GetReportStatus().GetRunState() })).Times(1).Return(errors.New("some error")) req := newFakeDownloadRequest() - req.FailedClusters = map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - "cluster-2": {}, + req.ClusterData = map[string]*report.ClusterData{ + "cluster-2": { + FailedInfo: &report.FailedCluster{}, + }, } + req.NumFailedClusters = 1 err := s.reportGen.ProcessReportRequest(req) s.Require().Error(err) s.Assert().Contains(err.Error(), errUnableToUpdateSnapshotOnGenerationSuccessStr) @@ -215,13 +221,16 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { return storage.ComplianceOperatorReportStatus_FAILURE == target.GetReportStatus().GetRunState() })).Times(1).Return(errors.New("some error")) req := newFakeRequestWithFailedCluster() - req.FailedClusters["cluster-1"] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{} + req.ClusterData["cluster-1"] = &report.ClusterData{ + FailedInfo: &report.FailedCluster{}, + } + req.NumFailedClusters = 2 err := s.reportGen.ProcessReportRequest(req) s.Require().Error(err) s.Assert().Contains(err.Error(), errUnableToUpdateSnapshotOnGenerationSuccessStr) }) - s.Run("Fail saving report data (FormatCSVReport returns nil data)", func() { + s.Run("Fail saving report data (nil data)", func() { s.snapshotDS.EXPECT().GetSnapshot(gomock.Any(), gomock.Any()).Times(1). Return(&storage.ComplianceOperatorReportSnapshotV2{ ReportStatus: &storage.ComplianceOperatorReportStatus{}, @@ -463,9 +472,13 @@ func newFakeRequestWithFailedCluster() *report.Request { }, }, }, - FailedClusters: map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - "cluster-2": {}, + ClusterData: map[string]*report.ClusterData{ + "cluster-1": {}, + "cluster-2": { + FailedInfo: &report.FailedCluster{}, + }, }, + NumFailedClusters: 1, } } diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go new file mode 100644 index 0000000000000..097a87114c8a0 --- /dev/null +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go @@ -0,0 +1,99 @@ +package helpers + +import ( + "context" + + "github.com/pkg/errors" + "github.com/stackrox/rox/central/complianceoperator/v2/report" + snapshotDS "github.com/stackrox/rox/central/complianceoperator/v2/report/datastore" + scanDS "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/search" +) + +// GetFailedClusters returns the failed clusters metadata associated with a ScanConfiguration +func GetFailedClusters(ctx context.Context, scanConfigID string, snapshotStore snapshotDS.DataStore, scanStore scanDS.DataStore) (map[string]*report.FailedCluster, error) { + failedClusters := make(map[string]*report.FailedCluster) + prevSnapshot, err := snapshotStore.GetLastSnapshotFromScanConfig(ctx, scanConfigID) + if err != nil { + return nil, err + } + for _, failedCluster := range prevSnapshot.GetFailedClusters() { + scans, err := populateFailedScans(ctx, failedCluster.GetScanNames(), prevSnapshot.GetScans(), scanStore) + if err != nil { + return nil, err + } + failedClusters[failedCluster.GetClusterId()] = &report.FailedCluster{ + ClusterId: failedCluster.GetClusterId(), + ClusterName: failedCluster.GetClusterName(), + Reasons: failedCluster.GetReasons(), + OperatorVersion: failedCluster.GetOperatorVersion(), + FailedScans: scans, + } + } + return failedClusters, nil +} + +// GetClusterData returns the cluster metadata associated with a report data +func GetClusterData(ctx context.Context, reportData *storage.ComplianceOperatorReportData, failedClusters map[string]*report.FailedCluster, scanStore scanDS.DataStore) (map[string]*report.ClusterData, error) { + clusterData := make(map[string]*report.ClusterData) + for _, cluster := range reportData.GetClusterStatus() { + data := &report.ClusterData{ + ClusterId: cluster.GetClusterId(), + ClusterName: cluster.GetClusterName(), + } + data, err := populateScanNames(ctx, data, reportData, cluster.GetClusterId(), scanStore) + if err != nil { + return nil, err + } + clusterData[cluster.GetClusterId()] = data + } + for failedClusterId, failedCluster := range failedClusters { + cluster, found := clusterData[failedClusterId] + if !found { + continue + } + + failedCluster.ClusterName = cluster.ClusterName + cluster.FailedInfo = failedCluster + } + return clusterData, nil +} + +func populateScanNames(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { + if data == nil { + return nil, errors.New("cannot populate scans and profiles of a nil ClusterData") + } + query := search.NewQueryBuilder(). + AddExactMatches(search.ClusterID, clusterID). + AddExactMatches(search.ComplianceOperatorScanConfigName, reportData.GetScanConfiguration().GetScanConfigName()). + ProtoQuery() + scans, err := scanStore.SearchScans(ctx, query) + if err != nil { + return nil, errors.Wrapf(err, "unable to retrieve scans associated with the ScanConfiguration %q in the cluster %q", reportData.GetScanConfiguration().GetId(), clusterID) + } + for _, scan := range scans { + data.ScanNames = append(data.ScanNames, scan.GetScanName()) + } + return data, nil +} + +func populateFailedScans(ctx context.Context, failedScanNames []string, snapshotScans []*storage.ComplianceOperatorReportSnapshotV2_Scan, scanStore scanDS.DataStore) ([]*storage.ComplianceOperatorScanV2, error) { + scanRefIDs := make([]string, 0, len(snapshotScans)) + for _, scan := range snapshotScans { + scanRefIDs = append(scanRefIDs, scan.GetScanRefId()) + } + // We need to query by ScanName and ScanRefIDs + // because ScanNames are not unique cross cluster. + // scanRefIDs holds all the scan references (failed and successful) + // associated with the ScanConfiguration. + // failedScanNames holds the scan names of the failed scans. + query := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanName, failedScanNames...). + AddExactMatches(search.ComplianceOperatorScanResult, scanRefIDs...).ProtoQuery() + scans, err := scanStore.SearchScans(ctx, query) + if err != nil { + return nil, err + } + return scans, nil +} diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go new file mode 100644 index 0000000000000..d284c400bffe0 --- /dev/null +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go @@ -0,0 +1,258 @@ +package helpers + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stackrox/rox/central/complianceoperator/v2/report" + snapshotDS "github.com/stackrox/rox/central/complianceoperator/v2/report/datastore/mocks" + scanDS "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore/mocks" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestGetFailedClusters(t *testing.T) { + ctx := context.Background() + scanConfigID := "scan-config-id" + ctrl := gomock.NewController(t) + snapshotStore := snapshotDS.NewMockDataStore(ctrl) + scanStore := scanDS.NewMockDataStore(ctrl) + snapshot := &storage.ComplianceOperatorReportSnapshotV2{ + FailedClusters: []*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + { + ClusterId: "cluster-id", + ClusterName: "cluster-name", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + ScanNames: []string{"scan-2"}, + }, + }, + Scans: []*storage.ComplianceOperatorReportSnapshotV2_Scan{ + { + ScanRefId: "scan-ref-id-1", + }, + { + ScanRefId: "scan-ref-id-2", + }, + }, + } + t.Run("failure retrieving snapshot from the store", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(nil, errors.New("some error")) + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.Error(tt, err) + assert.Nil(tt, failedClusters) + }) + t.Run("failure retrieving scans from the store", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(snapshot, nil) + scanStore.EXPECT().SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return(nil, errors.New("some error")) + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.Error(tt, err) + assert.Nil(tt, failedClusters) + }) + t.Run("populate failed clusters successfully", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(snapshot, nil) + scans := []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + ScanRefId: "scan-ref-id-2", + }, + } + scanStore.EXPECT().SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return(scans, nil) + expectedFailedClusters := map[string]*report.FailedCluster{ + "cluster-id": { + ClusterId: "cluster-id", + ClusterName: "cluster-name", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + FailedScans: scans, + }, + } + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.NoError(tt, err) + require.Len(tt, failedClusters, len(expectedFailedClusters)) + for clusterID, expectedCluster := range expectedFailedClusters { + actualCluster, ok := failedClusters[clusterID] + require.True(tt, ok) + assert.Equal(tt, expectedCluster.ClusterId, actualCluster.ClusterId) + assert.Equal(tt, expectedCluster.ClusterName, actualCluster.ClusterName) + assert.Equal(tt, expectedCluster.Reasons, actualCluster.Reasons) + assert.Equal(tt, expectedCluster.OperatorVersion, actualCluster.OperatorVersion) + protoassert.SlicesEqual(t, expectedCluster.FailedScans, actualCluster.FailedScans) + } + }) +} + +func TestGetClusterData(t *testing.T) { + ctx := context.Background() + reportData := &storage.ComplianceOperatorReportData{ + ScanConfiguration: &storage.ComplianceOperatorScanConfigurationV2{ + Id: "scan-config-id", + }, + ClusterStatus: []*storage.ComplianceOperatorReportData_ClusterStatus{ + { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + }, + { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + }, + }, + } + failedClusters := map[string]*report.FailedCluster{ + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + FailedScans: []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + Profile: &storage.ProfileShim{ + ProfileRefId: "profile-ref-id", + }, + }, + }, + }, + } + ctrl := gomock.NewController(t) + scanStore := scanDS.NewMockDataStore(ctrl) + t.Run("empty cluster status", func(tt *testing.T) { + clusterData, err := GetClusterData(ctx, nil, failedClusters, scanStore) + assert.NoError(tt, err) + assert.Len(tt, clusterData, 0) + }) + t.Run("failure querying the scan store", func(tt *testing.T) { + scanStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return(nil, errors.New("some error")) + clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) + assert.Error(tt, err) + assert.Nil(tt, clusterData) + }) + t.Run("no failed clusters", func(tt *testing.T) { + gomock.InOrder( + scanStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, + }, nil), + scanStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, + }, nil), + ) + expectedClusterData := map[string]*report.ClusterData{ + "cluster-1": { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + ScanNames: []string{"scan-1", "scan-2"}, + }, + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + ScanNames: []string{"scan-1", "scan-2"}, + }, + } + clusterData, err := GetClusterData(ctx, reportData, nil, scanStore) + assert.NoError(tt, err) + assertClusterData(tt, expectedClusterData, clusterData) + }) + t.Run("with failed clusters", func(tt *testing.T) { + gomock.InOrder( + scanStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, + }, nil), + scanStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, + }, nil), + ) + expectedClusterData := map[string]*report.ClusterData{ + "cluster-1": { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + ScanNames: []string{"scan-1", "scan-2"}, + }, + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + ScanNames: []string{"scan-1", "scan-2"}, + FailedInfo: &report.FailedCluster{ + ClusterId: "cluster-2", + ClusterName: "cluster-2", + OperatorVersion: "v1.6.0", + Reasons: []string{"some reason"}, + FailedScans: []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + Profile: &storage.ProfileShim{ + ProfileRefId: "profile-ref-id", + }, + }, + }, + }, + }, + } + clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) + assert.NoError(tt, err) + assertClusterData(tt, expectedClusterData, clusterData) + }) +} + +func assertClusterData(t *testing.T, expected map[string]*report.ClusterData, actual map[string]*report.ClusterData) { + assert.Len(t, actual, len(expected)) + for clusterID, expectedCluster := range expected { + actualCluster, ok := actual[clusterID] + require.True(t, ok) + assert.Equal(t, expectedCluster.ClusterId, actualCluster.ClusterId) + assert.Equal(t, expectedCluster.ClusterName, actualCluster.ClusterName) + assert.ElementsMatch(t, expectedCluster.ScanNames, actualCluster.ScanNames) + if expectedCluster.FailedInfo != nil { + require.NotNil(t, actualCluster.FailedInfo) + assert.Equal(t, expectedCluster.FailedInfo.ClusterId, actualCluster.FailedInfo.ClusterId) + assert.Equal(t, expectedCluster.FailedInfo.ClusterName, actualCluster.FailedInfo.ClusterName) + assert.Equal(t, expectedCluster.FailedInfo.Reasons, actualCluster.FailedInfo.Reasons) + assert.Equal(t, expectedCluster.FailedInfo.OperatorVersion, actualCluster.FailedInfo.OperatorVersion) + protoassert.SlicesEqual(t, expectedCluster.FailedInfo.FailedScans, actualCluster.FailedInfo.FailedScans) + } else { + assert.Nil(t, actualCluster.FailedInfo) + } + } +} diff --git a/central/complianceoperator/v2/report/manager/manager_impl.go b/central/complianceoperator/v2/report/manager/manager_impl.go index 9b3496310e201..a67970fa8dd0c 100644 --- a/central/complianceoperator/v2/report/manager/manager_impl.go +++ b/central/complianceoperator/v2/report/manager/manager_impl.go @@ -33,7 +33,6 @@ import ( "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/pkg/timestamp" "github.com/stackrox/rox/pkg/uuid" - "golang.org/x/exp/maps" "golang.org/x/sync/semaphore" ) @@ -47,7 +46,8 @@ type reportRequest struct { ctx context.Context snapshotID string notificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod - failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + clusterData map[string]*report.ClusterData + numFailedClusters int } type managerImpl struct { @@ -220,7 +220,8 @@ func (m *managerImpl) generateReportNoLock(req *reportRequest) { Ctx: req.ctx, SnapshotID: req.snapshotID, NotificationMethod: req.notificationMethod, - FailedClusters: req.failedClusters, + ClusterData: req.clusterData, + NumFailedClusters: req.numFailedClusters, } log.Infof("Executing report request for scan config %q", req.scanConfig.GetId()) if err := m.reportGen.ProcessReportRequest(repRequest); err != nil { @@ -299,6 +300,24 @@ func (m *managerImpl) handleReportRequest(request *reportRequest) (bool, error) return false, errors.Wrap(err, "unable to upsert snapshot on report preparation") } request.snapshotID = snapshot.GetReportId() + failedClusters, err := helpers.GetFailedClusters(m.automaticReportingCtx, request.scanConfig.GetId(), m.snapshotDataStore, m.scanDataStore) + if err != nil { + log.Warnf("unable to retrieve failed clusters: %v", err) + } + request.numFailedClusters = len(failedClusters) + request.clusterData, err = helpers.GetClusterData(m.automaticReportingCtx, snapshot.GetReportData(), failedClusters, m.scanDataStore) + if err != nil { + log.Errorf("unable to get clusters information: %v", err) + if dbErr := helpers.UpdateSnapshotOnError(request.ctx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { + return false, errors.Wrap(dbErr, "unable to upsert snapshot on generation failure") + } + return false, errors.Wrap(err, "unable to get clusters information") + } + // Add failed clusters to the report snapshot + if _, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot); err != nil { + log.Errorf("unable to updata snapshot with failed clusters: %v", err) + return false, err + } m.generateReportNoLock(request) return true, nil } @@ -562,24 +581,27 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca log.Infof("Snapshot for ScanConfig %s: %+v -- %+v", result.ScanConfig.GetScanConfigName(), snapshot.GetReportStatus(), snapshot.GetFailedClusters()) // Update ReportData snapshot.ReportData = m.getReportData(result.ScanConfig) - // Add failed clusters to the report - if len(failedClusters) > 0 { - for _, cluster := range snapshot.ReportData.GetClusterStatus() { - if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { - failedCluster.ClusterName = cluster.GetClusterName() - } + // Populate ClusterData + clusterData, err := helpers.GetClusterData(m.automaticReportingCtx, snapshot.ReportData, failedClusters, m.scanDataStore) + if err != nil { + log.Errorf("unable to populate cluster data: %v", err) + if dbErr := helpers.UpdateSnapshotOnError(m.automaticReportingCtx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { + return errors.Wrap(dbErr, "unable to update snapshot on populate cluster data error") } - snapshot.FailedClusters = maps.Values(failedClusters) + return errors.Wrap(err, "unable to populate cluster data") } - if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { - return errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) + // Add failed clusters to the report snapshot + snapshot, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot) + if err != nil { + return err } generateReportReq := &reportRequest{ ctx: m.automaticReportingCtx, scanConfig: result.ScanConfig, snapshotID: snapshot.GetReportId(), notificationMethod: snapshot.GetReportStatus().GetReportNotificationMethod(), - failedClusters: failedClusters, + numFailedClusters: len(failedClusters), + clusterData: clusterData, } isOnDemand := snapshot.GetReportStatus().GetReportRequestType() == storage.ComplianceOperatorReportStatus_ON_DEMAND if err := m.handleReportScheduled(generateReportReq, isOnDemand); err != nil { @@ -588,6 +610,31 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca return nil } +func (m *managerImpl) addFailedClustersToTheSnapshot(failedClusters map[string]*report.FailedCluster, snapshot *storage.ComplianceOperatorReportSnapshotV2) (*storage.ComplianceOperatorReportSnapshotV2, error) { + if len(failedClusters) == 0 { + return snapshot, nil + } + failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) + for _, failedCluster := range failedClusters { + scans := make([]string, 0, len(failedCluster.FailedScans)) + for _, scan := range failedCluster.FailedScans { + scans = append(scans, scan.GetScanName()) + } + failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + ClusterId: failedCluster.ClusterId, + ClusterName: failedCluster.ClusterName, + OperatorVersion: failedCluster.OperatorVersion, + Reasons: failedCluster.Reasons, + ScanNames: scans, + }) + } + snapshot.FailedClusters = failedClustersSlice + if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { + return snapshot, errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) + } + return snapshot, nil +} + func (m *managerImpl) handleReportScheduled(request *reportRequest, isOnDemand bool) error { if err := m.concurrencySem.Acquire(context.Background(), 1); err != nil { return errors.Wrap(err, "Error acquiring semaphore to run new report") diff --git a/central/complianceoperator/v2/report/manager/manager_test.go b/central/complianceoperator/v2/report/manager/manager_test.go index 1a74b6f3cd8c6..e4e9e277d9cd5 100644 --- a/central/complianceoperator/v2/report/manager/manager_test.go +++ b/central/complianceoperator/v2/report/manager/manager_test.go @@ -104,6 +104,7 @@ func (m *ManagerTestSuite) TestHandleReportRequest() { m.Run("Successful report, no watchers running", func() { manager := New(m.scanConfigDataStore, m.scanDataStore, m.profileDataStore, m.snapshotDataStore, m.complianceIntegrationDataStore, m.suiteDataStore, m.bindingsDataStore, m.checkResultDataStore, m.reportGen) manager.Start() + scanConfig := getTestScanConfig() wg := concurrency.NewWaitGroup(1) m.snapshotDataStore.EXPECT().UpsertSnapshot(gomock.Any(), gomock.Any()).Times(1). Return(nil) @@ -112,6 +113,13 @@ func (m *ManagerTestSuite) TestHandleReportRequest() { wg.Add(-1) return nil }) + m.snapshotDataStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfig.GetId())). + Times(1).Return(nil, nil) + m.scanDataStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(len(scanConfig.GetClusters())). + Return(getScans(len(scanConfig.GetProfiles())), nil) err := manager.SubmitReportRequest(ctx, getTestScanConfig(), storage.ComplianceOperatorReportStatus_EMAIL) m.Require().NoError(err) handleWaitGroup(m.T(), &wg, 10*time.Millisecond, "report generation") @@ -610,10 +618,10 @@ func (m *ManagerTestSuite) setupExpectCallsFromFinishAllScans(sc *storage.Compli m.scanDataStore.EXPECT(). SearchScans(gomock.Any(), gomock.Any()). Times(1).Return(allScans, nil), - // Upsert Snapshots - m.snapshotDataStore.EXPECT(). - UpsertSnapshot(gomock.Any(), gomock.Any()). - Times(numSnapshots).Return(nil), + m.scanDataStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(len(sc.GetClusters())*numSnapshots). + Return(scans, nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -656,6 +664,11 @@ func (m *ManagerTestSuite) setupExpectCallsFromFailAllScans(sc *storage.Complian })). Times(numSnapshots).Return(nil), + // GetClusterData + m.scanDataStore.EXPECT(). + SearchScans(gomock.Any(), gomock.Any()). + Times(len(sc.GetClusters())*numSnapshots). + Return(scans, nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -748,6 +761,17 @@ func handleWaitGroup(t *testing.T, wg *concurrency.WaitGroup, timeout time.Durat } } +func getScans(numProfiles int) []*storage.ComplianceOperatorScanV2 { + var ret []*storage.ComplianceOperatorScanV2 + for i := 0; i < numProfiles; i++ { + name := fmt.Sprintf("profile-%d", i) + ret = append(ret, &storage.ComplianceOperatorScanV2{ + ScanName: name, + }) + } + return ret +} + func newGetScanConfigClusterStatusMatcher(sc *storage.ComplianceOperatorScanConfigurationV2) *getScanConfigClusterStatusMatcher { return &getScanConfigClusterStatusMatcher{ scanConfigID: sc.GetId(), diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator.go b/central/complianceoperator/v2/report/manager/results/results_aggregator.go index d004bd1c7b861..5fab32b0be997 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator.go @@ -16,6 +16,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/search" + "github.com/stackrox/rox/pkg/set" ) const ( @@ -65,16 +66,24 @@ type aggregateResultsFn func(context.Context, string, *[]*report.ResultRow, *che func (g *Aggregator) GetReportData(req *report.Request) *report.Results { resultsCSV := make(map[string][]*report.ResultRow) reportResults := &report.Results{} + reportResults.ClustersData = make(map[string]*report.ClusterData) for _, clusterID := range req.ClusterIDs { - clusterResults, clusterStatus, err := g.getReportDataForCluster(req.Ctx, req.ScanConfigID, clusterID, req.FailedClusters) + clusterData, ok := req.ClusterData[clusterID] + if !ok { + log.Errorf("empty cluster data for cluster %q", clusterID) + continue + } + clusterResults, clusterStatus, err := g.getReportDataForCluster(req.Ctx, req.ScanConfigID, clusterID, clusterData) if err != nil { log.Errorf("Data not found for cluster %s", clusterID) continue } + resultsCSV[clusterID] = clusterResults reportResults.TotalPass += clusterStatus.totalPass reportResults.TotalFail += clusterStatus.totalFail reportResults.TotalMixed += clusterStatus.totalMixed + reportResults.ClustersData[clusterID] = clusterData } reportResults.Clusters = len(req.ClusterIDs) reportResults.Profiles = req.Profiles @@ -82,19 +91,26 @@ func (g *Aggregator) GetReportData(req *report.Request) *report.Results { return reportResults } -func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, clusterID string, failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) ([]*report.ResultRow, *checkStatus, error) { +func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, clusterID string, clusterData *report.ClusterData) ([]*report.ResultRow, *checkStatus, error) { var ret []*report.ResultRow statuses := &checkStatus{ totalPass: 0, totalFail: 0, totalMixed: 0, } - // If the cluster is in the failedClusters map, we do not retrieve the data - if _, ok := failedClusters[clusterID]; ok { - return ret, statuses, nil + successfulScanNames := clusterData.ScanNames + if clusterData.FailedInfo != nil { + allScansSet := set.NewStringSet(successfulScanNames...) + failedScansSet := set.NewStringSet() + for _, scan := range clusterData.FailedInfo.FailedScans { + failedScansSet.Add(scan.GetScanName()) + } + successfulScanNames = allScansSet.Difference(failedScansSet).AsSlice() } - scanConfigQuery := search.NewQueryBuilder().AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). + scanConfigQuery := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). AddExactMatches(search.ClusterID, clusterID). + AddExactMatches(search.ComplianceOperatorScanName, successfulScanNames...). ProtoQuery() err := g.checkResultsDS.WalkByQuery(ctx, scanConfigQuery, g.aggreateResults(ctx, clusterID, &ret, statuses)) return ret, statuses, err diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go index 3268e3c62ea8f..fa662da009a9f 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go @@ -15,7 +15,9 @@ import ( "github.com/stackrox/rox/central/complianceoperator/v2/rules/datastore" ruleMocks "github.com/stackrox/rox/central/complianceoperator/v2/rules/datastore/mocks" scanMocks "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore/mocks" + v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/search" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -50,10 +52,10 @@ type getReportDataTestCase struct { numFailedChecksPerCluster int numMixedChecksPerCluster int numFailedClusters int - expectedErr error + expectedWalkByErr error } -func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { +func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataResultsGeneration() { cases := map[string]getReportDataTestCase{ "generate report data no error": { numClusters: 2, @@ -71,9 +73,9 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { numFailedClusters: 1, }, "generate report walk by error": { - numClusters: 3, - numProfiles: 4, - expectedErr: errors.New("error"), + numClusters: 3, + numProfiles: 4, + expectedWalkByErr: errors.New("error"), }, } for tname, tcase := range cases { @@ -81,28 +83,13 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { ctx := context.Background() req := getRequest(ctx, tcase.numClusters, tcase.numProfiles, tcase.numFailedClusters) s.checkResultsDS.EXPECT().WalkByQuery(gomock.Eq(ctx), gomock.Any(), gomock.Any()). - Times(tcase.numClusters). - DoAndReturn(func(_, _ any, fn checkResultWalkByQuery) error { - for i := 0; i < tcase.numPassedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("pass-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_PASS, - }) - } - for i := 0; i < tcase.numFailedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("fail-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_FAIL, - }) - } - for i := 0; i < tcase.numMixedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("mixed-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_INCONSISTENT, - }) - } - return tcase.expectedErr - }) + Times(tcase.numClusters + tcase.numFailedClusters). + DoAndReturn(fakeWalkByResponse( + req.ClusterData, + tcase.expectedWalkByErr, + tcase.numPassedChecksPerCluster, + tcase.numFailedChecksPerCluster, + tcase.numMixedChecksPerCluster)) s.aggregator.aggreateResults = mockWalkByQueryWrapper res := s.aggregator.GetReportData(req) assertResults(s.T(), tcase, res) @@ -110,6 +97,46 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { } } +func fakeWalkByResponse( + clusterData map[string]*report.ClusterData, + expectedErr error, + numPassedChecksPerCluster int, + numFailedChecksPerCluster int, + numMixedChecksPerCluster int, +) func(context.Context, *v1.Query, checkResultWalkByQuery) error { + return func(_ context.Context, query *v1.Query, fn checkResultWalkByQuery) error { + for _, q := range query.GetConjunction().GetQueries() { + if q.GetBaseQuery().GetMatchFieldQuery().GetField() == search.ClusterID.String() { + val := strings.Trim(q.GetBaseQuery().GetMatchFieldQuery().GetValue(), "\"") + if cluster, ok := clusterData[val]; ok { + if cluster.FailedInfo != nil { + return expectedErr + } + } + } + } + for i := 0; i < numPassedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("pass-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_PASS, + }) + } + for i := 0; i < numFailedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("fail-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_FAIL, + }) + } + for i := 0; i < numMixedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("mixed-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_INCONSISTENT, + }) + } + return expectedErr + } +} + var ( profiles = []*storage.ComplianceOperatorProfileV2{ { @@ -405,15 +432,43 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters ClusterIDs: getNames("cluster", numClusters), Profiles: getNames("profile", numProfiles), } + clusterData := make(map[string]*report.ClusterData) + for i := 0; i < numClusters+numFailedClusters; i++ { + id := fmt.Sprintf("cluster-%d", i) + var profileNames []string + for j := 0; j < numProfiles; j++ { + profileNames = append(profileNames, fmt.Sprintf("profile-%d", j)) + } + clusterData[id] = &report.ClusterData{ + ClusterId: id, + ClusterName: id, + ScanNames: profileNames, + } + } if numFailedClusters > 0 { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) for i := numClusters; i < numFailedClusters+numClusters; i++ { id := fmt.Sprintf("cluster-%d", i) ret.ClusterIDs = append(ret.ClusterIDs, id) - failedClusters[id] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{} + failedInfo := &report.FailedCluster{ + ClusterId: id, + ClusterName: id, + Reasons: []string{"timeout"}, + OperatorVersion: "v1.6.0", + FailedScans: func() []*storage.ComplianceOperatorScanV2 { + var scans []*storage.ComplianceOperatorScanV2 + for _, scanName := range clusterData[id].ScanNames { + scans = append(scans, &storage.ComplianceOperatorScanV2{ + ScanName: scanName, + }) + } + return scans + }(), + } + clusterData[id].FailedInfo = failedInfo } - ret.FailedClusters = failedClusters + ret.NumFailedClusters = numFailedClusters } + ret.ClusterData = clusterData return ret } @@ -450,7 +505,7 @@ func getRowFromCluster(check, clusterID string) *report.ResultRow { func assertResults(t *testing.T, tcase getReportDataTestCase, res *report.Results) { assert.Equal(t, tcase.numClusters+tcase.numFailedClusters, res.Clusters) assert.Equal(t, tcase.numProfiles, len(res.Profiles)) - if tcase.expectedErr != nil { + if tcase.expectedWalkByErr != nil { assert.Equal(t, 0, res.TotalPass) assert.Equal(t, 0, res.TotalFail) assert.Equal(t, 0, res.TotalMixed) diff --git a/central/complianceoperator/v2/report/manager/watcher/validator.go b/central/complianceoperator/v2/report/manager/watcher/validator.go index 356e2be2ac3fe..6c8db050a7ac5 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator.go @@ -13,8 +13,8 @@ import ( ) // ValidateScanConfigResults returns a map with the clusters that failed to be scanned. -func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, error) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) +func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (map[string]*report.FailedCluster, error) { + failedClusters := make(map[string]*report.FailedCluster) errList := errorhelpers.NewErrorList("failed clusters") clustersWithResults := set.NewStringSet() for _, scanResult := range results.ScanResults { @@ -23,12 +23,13 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe if failedClusterInfo == nil { continue } - errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.GetClusterId()))) - if previousFailedInfo, ok := failedClusters[failedClusterInfo.GetClusterId()]; ok && !isInstallationError { - previousFailedInfo.Reasons = append(previousFailedInfo.GetReasons(), failedClusterInfo.GetReasons()...) + errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.ClusterId))) + if previousFailedInfo, ok := failedClusters[failedClusterInfo.ClusterId]; ok && !isInstallationError { + previousFailedInfo.Reasons = append(previousFailedInfo.Reasons, failedClusterInfo.Reasons...) + previousFailedInfo.FailedScans = append(previousFailedInfo.FailedScans, failedClusterInfo.FailedScans...) continue } - failedClusters[failedClusterInfo.GetClusterId()] = failedClusterInfo + failedClusters[failedClusterInfo.ClusterId] = failedClusterInfo } // If we have less results than the number of clusters*profiles in the scan configuration, @@ -39,11 +40,11 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe continue } clusterInfo := ValidateClusterHealth(ctx, cluster.GetClusterId(), integrationDataStore) - errList.AddError(errors.New(fmt.Sprintf("cluster %s failed", clusterInfo.GetClusterId()))) + errList.AddError(errors.New(fmt.Sprintf("cluster %s failed", clusterInfo.ClusterId))) if len(clusterInfo.Reasons) == 0 { clusterInfo.Reasons = []string{report.INTERNAL_ERROR} } - failedClusters[clusterInfo.GetClusterId()] = clusterInfo + failedClusters[clusterInfo.ClusterId] = clusterInfo } } if results.Error != nil && errors.Is(results.Error, ErrScanConfigTimeout) { @@ -59,7 +60,7 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe } // ValidateScanResults if there are no errors in the scan results, it returns nil; otherwise it returns the failed cluster information -func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster, isInstallationError bool) { +func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (failedCluster *report.FailedCluster, isInstallationError bool) { if results.Error == nil { return nil, false } @@ -67,6 +68,7 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ if len(ret.Reasons) > 0 { return ret, true } + ret.FailedScans = []*storage.ComplianceOperatorScanV2{results.Scan} if errors.Is(results.Error, ErrScanRemoved) { ret.Reasons = []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, results.Scan.GetScanName())} return ret, false @@ -84,10 +86,9 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ } // ValidateClusterHealth returns the health status of the Compliance Operator Integration -func ValidateClusterHealth(ctx context.Context, clusterID string, integrationDataStore complianceIntegrationDS.DataStore) *storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - ret := &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", +func ValidateClusterHealth(ctx context.Context, clusterID string, integrationDataStore complianceIntegrationDS.DataStore) *report.FailedCluster { + ret := &report.FailedCluster{ + ClusterId: clusterID, } coStatus, err := IsComplianceOperatorHealthy(ctx, clusterID, integrationDataStore) if errors.Is(err, ErrComplianceOperatorIntegrationDataStore) || errors.Is(err, ErrComplianceOperatorIntegrationZeroIntegrations) { diff --git a/central/complianceoperator/v2/report/manager/watcher/validator_test.go b/central/complianceoperator/v2/report/manager/watcher/validator_test.go index 097fe76a6c297..7ff8787cd1728 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator_test.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator_test.go @@ -9,11 +9,11 @@ import ( mocksComplianceIntegrationDS "github.com/stackrox/rox/central/complianceoperator/v2/integration/datastore/mocks" "github.com/stackrox/rox/central/complianceoperator/v2/report" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/protoassert" "github.com/stackrox/rox/pkg/set" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" ) const ( @@ -35,14 +35,14 @@ func TestValidateScanConfigResults(t *testing.T) { cases := map[string]struct { results *ScanConfigWatcherResults expectFn func(*mocksComplianceIntegrationDS.MockDataStore) - expectedFailedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + expectedFailedClusters map[string]*report.FailedCluster expectedError bool expectedExactError error }{ "no error": { results: getScanConfigResults(2, 0, 0, 1, nil), expectFn: withExpectCall(nil), - expectedFailedClusters: make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster), + expectedFailedClusters: make(map[string]*report.FailedCluster), }, "two failed clusters": { results: getScanConfigResults(2, 2, 0, 1, nil), @@ -56,7 +56,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, }, "two failed clusters with two scans": { @@ -71,7 +71,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 2), + expectedFailedClusters: getFailedClusters(2, 2, 0, 2), expectedError: true, }, "two failed clusters scan config watcher timeout": { @@ -86,7 +86,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, expectedExactError: report.ErrScanConfigWatcherTimeout, }, @@ -102,7 +102,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, expectedExactError: report.ErrScanWatchersFailed, }, @@ -118,7 +118,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 0, 2, 1), expectedError: true, }, "two missing clusters and two failed clusters": { @@ -133,7 +133,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 4, 1), + expectedFailedClusters: getFailedClusters(2, 2, 2, 1), expectedError: true, }, } @@ -146,7 +146,7 @@ func TestValidateScanConfigResults(t *testing.T) { for id, failedCluster := range tCase.expectedFailedClusters { actual, ok := res[id] require.True(tt, ok) - protoassert.Equal(tt, failedCluster, actual) + assertFailedCluster(tt, failedCluster, actual) } if tCase.expectedError { assert.Error(tt, err) @@ -169,7 +169,7 @@ func TestValidateScanResults(t *testing.T) { operatorStatus []*storage.ComplianceIntegration expectDSError error results *ScanWatcherResults - expectedFailedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster + expectedFailedCluster *report.FailedCluster expectedInstallationError bool }{ "no error": { @@ -186,12 +186,8 @@ func TestValidateScanResults(t *testing.T) { }, Error: errors.New("some error"), }, - expectDSError: errors.New("some error"), - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.INTERNAL_ERROR}, - }, + expectDSError: errors.New("some error"), + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.INTERNAL_ERROR}, false), expectedInstallationError: true, }, "internal error due to no integration retrieved from data store": { @@ -201,13 +197,9 @@ func TestValidateScanResults(t *testing.T) { }, Error: errors.New("some error"), }, - operatorStatus: []*storage.ComplianceIntegration{}, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.INTERNAL_ERROR}, - }, + operatorStatus: []*storage.ComplianceIntegration{}, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.INTERNAL_ERROR}, false), expectedInstallationError: true, }, "operator not installed": { @@ -222,12 +214,8 @@ func TestValidateScanResults(t *testing.T) { OperatorInstalled: false, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.COMPLIANCE_NOT_INSTALLED}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.COMPLIANCE_NOT_INSTALLED}, false), expectedInstallationError: true, }, "operator old version": { @@ -244,12 +232,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: oldVersion, - Reasons: []string{report.COMPLIANCE_VERSION_ERROR}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, oldVersion, []string{report.COMPLIANCE_VERSION_ERROR}, false), expectedInstallationError: true, }, "scan removed error": { @@ -267,12 +251,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, scanName)}, true), }, "scan timeout error": { results: &ScanWatcherResults{ @@ -290,12 +270,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_TIMEOUT_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_TIMEOUT_FMT, scanName)}, true), }, "sensor context canceled error": { results: &ScanWatcherResults{ @@ -313,12 +289,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_TIMEOUT_SENSOR_DISCONNECTED_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_TIMEOUT_SENSOR_DISCONNECTED_FMT, scanName)}, true), }, "internal error due context canceled error": { results: &ScanWatcherResults{ @@ -336,12 +308,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{report.INTERNAL_ERROR}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{report.INTERNAL_ERROR}, true), }, } for tName, tCase := range cases { @@ -352,7 +320,7 @@ func TestValidateScanResults(t *testing.T) { Return(tCase.operatorStatus, tCase.expectDSError) } res, isInstallationError := ValidateScanResults(ctx, tCase.results, coIntegrationDS) - protoassert.Equal(tt, tCase.expectedFailedCluster, res) + assertFailedCluster(tt, tCase.expectedFailedCluster, res) assert.Equal(tt, tCase.expectedInstallationError, isInstallationError) }) } @@ -412,10 +380,10 @@ func TestValidateClusterHealth(t *testing.T) { Return(tCase.operatorStatus, tCase.expectDSError) res := ValidateClusterHealth(ctx, clusterID, coIntegrationDS) require.NotNil(tt, res) - assert.Equal(tt, clusterID, res.GetClusterId()) - assert.Equal(tt, tCase.expectedReason, res.GetReasons()) + assert.Equal(tt, clusterID, res.ClusterId) + assert.Equal(tt, tCase.expectedReason, res.Reasons) if len(tCase.operatorStatus) > 0 { - assert.Equal(tt, tCase.operatorStatus[0].GetVersion(), res.GetOperatorVersion()) + assert.Equal(tt, tCase.operatorStatus[0].GetVersion(), res.OperatorVersion) } }) } @@ -477,24 +445,75 @@ func getScanConfigResults(numSuccessfulClusters, numFailedClusters, numMissingCl } } -func getFailedClusters(idx, numClusters, numScans int) map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - ret := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - for i := idx; i < idx+numClusters; i++ { +func getFailedClusters(idx, numFailedClusters, numMissingClusters, numScans int) map[string]*report.FailedCluster { + ret := make(map[string]*report.FailedCluster) + for i := idx; i < idx+numFailedClusters; i++ { id := fmt.Sprintf("cluster-%d", i) - ret[id] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + failedCluster := &report.FailedCluster{ ClusterId: id, OperatorVersion: minimumComplianceOperatorVersion, Reasons: []string{report.INTERNAL_ERROR}, } + ret[id] = failedCluster var reasons []string for j := 0; j < numScans; j++ { reasons = append(reasons, report.INTERNAL_ERROR) + failedCluster.FailedScans = append(failedCluster.FailedScans, &storage.ComplianceOperatorScanV2{ + ClusterId: id, + }) } ret[id].Reasons = reasons } + for i := idx + numFailedClusters; i < idx+numFailedClusters+numMissingClusters; i++ { + id := fmt.Sprintf("cluster-%d", i) + failedCluster := &report.FailedCluster{ + ClusterId: id, + OperatorVersion: minimumComplianceOperatorVersion, + Reasons: []string{report.INTERNAL_ERROR}, + } + ret[id] = failedCluster + } return ret } +func newFailedCluster(clusterID, coVersion string, reasons []string, expectScan bool) *report.FailedCluster { + ret := &report.FailedCluster{ + ClusterId: clusterID, + OperatorVersion: coVersion, + Reasons: reasons, + } + if expectScan { + ret.FailedScans = []*storage.ComplianceOperatorScanV2{ + { + ClusterId: clusterID, + ScanName: scanName, + }, + } + } + return ret +} + +func assertFailedCluster(t *testing.T, expected, actual *report.FailedCluster) { + if expected == nil && actual == nil { + return + } + assert.Equal(t, expected.ClusterId, actual.ClusterId) + assert.Equal(t, expected.ClusterName, actual.ClusterName) + assert.Equal(t, expected.OperatorVersion, actual.OperatorVersion) + assert.Equal(t, expected.Reasons, actual.Reasons) + assert.Equal(t, len(expected.FailedScans), len(actual.FailedScans)) + for _, expectedScan := range expected.FailedScans { + found := false + for _, actualScan := range actual.FailedScans { + if proto.Equal(expectedScan, actualScan) { + found = true + break + } + } + assert.Truef(t, found, "expected scan %v not found", expectedScan) + } +} + type clusterIdMatcher struct { ids set.StringSet error string diff --git a/central/complianceoperator/v2/report/request.go b/central/complianceoperator/v2/report/request.go index 89f593b112a1f..4c92a8d028536 100644 --- a/central/complianceoperator/v2/report/request.go +++ b/central/complianceoperator/v2/report/request.go @@ -16,5 +16,23 @@ type Request struct { Ctx context.Context SnapshotID string NotificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod - FailedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + ClusterData map[string]*ClusterData + NumFailedClusters int +} + +// ClusterData holds the metadata for the clusters +type ClusterData struct { + ClusterId string + ClusterName string + ScanNames []string + FailedInfo *FailedCluster +} + +// FailedCluster holds the information of a failed cluster +type FailedCluster struct { + ClusterId string + ClusterName string + Reasons []string + OperatorVersion string + FailedScans []*storage.ComplianceOperatorScanV2 } diff --git a/central/complianceoperator/v2/report/result.go b/central/complianceoperator/v2/report/result.go index ca717acca4e79..3d79245867950 100644 --- a/central/complianceoperator/v2/report/result.go +++ b/central/complianceoperator/v2/report/result.go @@ -15,10 +15,11 @@ type ResultRow struct { // Results struct which holds the results of a report. type Results struct { - ResultCSVs map[string][]*ResultRow // map of cluster id to slice of *resultRow - TotalPass int - TotalFail int - TotalMixed int - Profiles []string - Clusters int + ResultCSVs map[string][]*ResultRow // map of cluster id to slice of *resultRow + TotalPass int + TotalFail int + TotalMixed int + Profiles []string + Clusters int + ClustersData map[string]*ClusterData } diff --git a/generated/storage/compliance_operator_v2.pb.go b/generated/storage/compliance_operator_v2.pb.go index 1f0cbe2f672e1..71ab962354648 100644 --- a/generated/storage/compliance_operator_v2.pb.go +++ b/generated/storage/compliance_operator_v2.pb.go @@ -2619,6 +2619,7 @@ type ComplianceOperatorReportSnapshotV2_FailedCluster struct { ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` Reasons []string `protobuf:"bytes,3,rep,name=reasons,proto3" json:"reasons,omitempty"` OperatorVersion string `protobuf:"bytes,4,opt,name=operator_version,json=operatorVersion,proto3" json:"operator_version,omitempty"` + ScanNames []string `protobuf:"bytes,5,rep,name=scanNames,proto3" json:"scanNames,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2681,6 +2682,13 @@ func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetOperatorVersion() return "" } +func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetScanNames() []string { + if x != nil { + return x.ScanNames + } + return nil +} + // Next available tag: 5 type ComplianceOperatorReportData_SuiteStatus struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -3060,7 +3068,7 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0foutdated_object\x18\x06 \x01(\tR\x0eoutdatedObject\x12)\n" + "\x10enforcement_type\x18\a \x01(\tR\x0fenforcementType\x12\x1d\n" + "\n" + - "cluster_id\x18\b \x01(\tR\tclusterId\"\x9d\x06\n" + + "cluster_id\x18\b \x01(\tR\tclusterId\"\xbb\x06\n" + "\"ComplianceOperatorReportSnapshotV2\x12\x1b\n" + "\treport_id\x18\x01 \x01(\tR\breportId\x122\n" + "\x15scan_configuration_id\x18\x02 \x01(\tR\x13scanConfigurationId\x12\x12\n" + @@ -3074,13 +3082,14 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0ffailed_clusters\x18\t \x03(\v29.storage.ComplianceOperatorReportSnapshotV2.FailedClusterR\x0efailedClusters\x1an\n" + "\x04Scan\x12\x1e\n" + "\vscan_ref_id\x18\x01 \x01(\tR\tscanRefId\x12F\n" + - "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\x96\x01\n" + + "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\xb4\x01\n" + "\rFailedCluster\x12\x1d\n" + "\n" + "cluster_id\x18\x01 \x01(\tR\tclusterId\x12!\n" + "\fcluster_name\x18\x02 \x01(\tR\vclusterName\x12\x18\n" + "\areasons\x18\x03 \x03(\tR\areasons\x12)\n" + - "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\"\x96\x05\n" + + "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\x12\x1c\n" + + "\tscanNames\x18\x05 \x03(\tR\tscanNames\"\x96\x05\n" + "\x1cComplianceOperatorReportData\x12]\n" + "\x12scan_configuration\x18\x01 \x01(\v2..storage.ComplianceOperatorScanConfigurationV2R\x11scanConfiguration\x12Z\n" + "\x0ecluster_status\x18\x02 \x03(\v23.storage.ComplianceOperatorReportData.ClusterStatusR\rclusterStatus\x12H\n" + diff --git a/generated/storage/compliance_operator_v2_vtproto.pb.go b/generated/storage/compliance_operator_v2_vtproto.pb.go index b92db7270787c..fb39ebeda5e2c 100644 --- a/generated/storage/compliance_operator_v2_vtproto.pb.go +++ b/generated/storage/compliance_operator_v2_vtproto.pb.go @@ -664,6 +664,11 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) CloneVT() *Compliance copy(tmpContainer, rhs) r.Reasons = tmpContainer } + if rhs := m.ScanNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ScanNames = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1860,6 +1865,15 @@ func (this *ComplianceOperatorReportSnapshotV2_FailedCluster) EqualVT(that *Comp if this.OperatorVersion != that.OperatorVersion { return false } + if len(this.ScanNames) != len(that.ScanNames) { + return false + } + for i, vx := range this.ScanNames { + vy := that.ScanNames[i] + if vx != vy { + return false + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -4099,6 +4113,15 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) MarshalToSizedBufferV i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ScanNames) > 0 { + for iNdEx := len(m.ScanNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ScanNames[iNdEx]) + copy(dAtA[i:], m.ScanNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ScanNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if len(m.OperatorVersion) > 0 { i -= len(m.OperatorVersion) copy(dAtA[i:], m.OperatorVersion) @@ -5400,6 +5423,12 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if len(m.ScanNames) > 0 { + for _, s := range m.ScanNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -12563,6 +12592,38 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVT(dAtA []by } m.OperatorVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScanNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScanNames = append(m.ScanNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -21179,6 +21240,42 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVTUnsafe(dAt } m.OperatorVersion = stringValue iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScanNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.ScanNames = append(m.ScanNames, stringValue) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/fixtures/fixtureconsts/fixture_consts.go b/pkg/fixtures/fixtureconsts/fixture_consts.go index d3f15ad012e82..55b0b90a8934f 100644 --- a/pkg/fixtures/fixtureconsts/fixture_consts.go +++ b/pkg/fixtures/fixtureconsts/fixture_consts.go @@ -12,6 +12,8 @@ const ( Cluster3 = "caaaaaaa-bbbb-4011-0000-333333333333" ClusterFake1 = "caaaaaaa-bbbb-4011-9999-111111111111" ClusterFake2 = "caaaaaaa-bbbb-4011-9999-222222222222" + ComplianceProfileID1 = "caaaaaaa-bbbb-4011-1111-111111111111" + ComplianceProfileID2 = "caaaaaaa-bbbb-4011-2222-222222222222" ComplianceScanConfigID1 = "caaaaaaa-bbbb-4011-1111-111111111111" ComplianceScanConfigID2 = "caaaaaaa-bbbb-4011-2222-222222222222" ComplianceScanConfigID3 = "caaaaaaa-bbbb-4011-3333-333333333333" diff --git a/proto/storage/compliance_operator_v2.proto b/proto/storage/compliance_operator_v2.proto index afaf527de0256..c93217c496a20 100644 --- a/proto/storage/compliance_operator_v2.proto +++ b/proto/storage/compliance_operator_v2.proto @@ -310,6 +310,7 @@ message ComplianceOperatorReportSnapshotV2 { string cluster_name = 2; repeated string reasons = 3; string operator_version = 4; + repeated string scanNames = 5; } repeated FailedCluster failed_clusters = 9; diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 2ff9d9f207bea..ad1258ae5777f 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -4863,6 +4863,12 @@ "id": 4, "name": "operator_version", "type": "string" + }, + { + "id": 5, + "name": "scanNames", + "type": "string", + "is_repeated": true } ] }