From ad33a7b3e39948ea0e49cd3e93ad673896a4da6b Mon Sep 17 00:00:00 2001 From: lvalerom Date: Thu, 22 May 2025 15:39:56 +0200 Subject: [PATCH 1/7] generate partial reports per cluster --- .../v2/report/manager/format/formatter.go | 61 ++-- .../report/manager/format/formatter_test.go | 245 ++++++++----- .../manager/generator/mocks/report_gen.go | 2 +- .../v2/report/manager/generator/report_gen.go | 2 +- .../manager/generator/report_gen_impl.go | 6 +- .../manager/generator/report_gen_impl_test.go | 18 +- .../v2/report/manager/manager_impl.go | 34 +- .../manager/results/results_aggregator.go | 74 +++- .../results/results_aggregator_test.go | 335 ++++++++++++++++-- .../v2/report/manager/watcher/validator.go | 16 +- .../report/manager/watcher/validator_test.go | 154 ++++---- .../complianceoperator/v2/report/request.go | 19 +- .../complianceoperator/v2/report/result.go | 13 +- .../v2/scans/datastore/datastore.go | 9 +- .../v2/scans/datastore/datastore_impl.go | 49 ++- .../v2/scans/datastore/datastore_impl_test.go | 208 ++++++++++- .../v2/scans/datastore/mocks/datastore.go | 30 ++ .../v2/scans/datastore/singleton.go | 2 +- pkg/fixtures/fixtureconsts/fixture_consts.go | 2 + 19 files changed, 1024 insertions(+), 255 deletions(-) diff --git a/central/complianceoperator/v2/report/manager/format/formatter.go b/central/complianceoperator/v2/report/manager/format/formatter.go index 59a65e583a5be..e21193d156da8 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter.go +++ b/central/complianceoperator/v2/report/manager/format/formatter.go @@ -9,8 +9,9 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/central/complianceoperator/v2/report" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/csv" + "github.com/stackrox/rox/pkg/set" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -67,7 +68,7 @@ func NewFormatter() *FormatterImpl { // If a cluster fails, the generated CSV file will contain the reason for the reason but (no check results). // If a cluster success, the generated CSV file will contain all the check results with enhanced information (e.g. remediation, associated profile, etc) // The results parameter is expected to contain the clusters that succeed (no failed clusters should be passed in results). -func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (buffRet *bytes.Buffer, errRet error) { +func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, clusters map[string]*report.ClusterData) (buffRet *bytes.Buffer, errRet error) { var buf bytes.Buffer zipWriter := f.newZipWriter(&buf) defer func() { @@ -76,20 +77,22 @@ func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, errRet = errors.Wrap(err, "unable to create a zip file of the compliance report") } }() - for clusterID, failedCluster := range failedClusters { - fileName := fmt.Sprintf(failedClusterFmt, clusterID) - if err := f.createFailedClusterFileInZip(zipWriter, fileName, failedCluster); err != nil { - return nil, errors.Wrap(err, "error creating failed cluster report") + timestamp := timestamppb.Now() + for clusterID, cluster := range clusters { + if cluster.FailedInfo != nil { + fileName := getFileName(failedClusterFmt, cluster.ClusterName, cluster.FailedInfo.Profiles, timestamp) + if err := f.createFailedClusterFileInZip(zipWriter, fileName, cluster.FailedInfo); err != nil { + return nil, errors.Wrap(err, "error creating failed cluster report") + } + if len(cluster.FailedInfo.Profiles) == len(cluster.Profiles) { + continue + } } - } - for clusterID, res := range results { - // We should not receive results from a failed cluster - if _, ok := failedClusters[clusterID]; ok { - continue + fileName := getFileName(successfulClusterFmt, cluster.ClusterName, getProfileDiff(cluster.Profiles, cluster.FailedInfo), timestamp) + if _, ok := results[clusterID]; !ok { + return nil, errors.Errorf("found no results for cluster %q", clusterID) } - fileName := fmt.Sprintf(successfulClusterFmt, clusterID) - err := f.createCSVInZip(zipWriter, fileName, res) - if err != nil { + if err := f.createCSVInZip(zipWriter, fileName, results[clusterID]); err != nil { return nil, errors.Wrap(err, "error creating csv report") } } @@ -127,26 +130,42 @@ func generateRecord(row *report.ResultRow) []string { } } -func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filename string, failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster) error { +func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filename string, failedCluster *report.FailedCluster) error { w, err := zipWriter.Create(filename) if err != nil { return err } csvWriter := f.newCSVWriter(failedClusterCSVHeader, true) - csvWriter.AddValue(generateFailRecord(failedCluster)) + for _, reason := range failedCluster.GetReasons() { + csvWriter.AddValue(generateFailRecord(failedCluster.GetClusterId(), failedCluster.GetClusterName(), reason, failedCluster.GetOperatorVersion())) + } return csvWriter.WriteCSV(w) } -func generateFailRecord(failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster) []string { +func generateFailRecord(clusterID, clusterName, reason, coVersion string) []string { // The order in the slice needs to match the order defined in `failedClusterCSVHeader` return []string{ - failedCluster.GetClusterId(), - failedCluster.GetClusterName(), - strings.Join(failedCluster.GetReasons(), ", "), - failedCluster.GetOperatorVersion(), + clusterID, + clusterName, + reason, + coVersion, } } +func getProfileDiff(allProfiles []string, failedCluster *report.FailedCluster) []string { + if failedCluster == nil { + return allProfiles + } + allProfilesSet := set.NewStringSet(allProfiles...) + failedProfilesSet := set.NewStringSet(failedCluster.Profiles...) + return allProfilesSet.Difference(failedProfilesSet).AsSlice() +} + +func getFileName(format string, clusterName string, profiles []string, timestamp *timestamppb.Timestamp) string { + year, month, day := timestamp.AsTime().Date() + return fmt.Sprintf(format, fmt.Sprintf("%s_%s_%d-%d-%d", clusterName, strings.Join(profiles, "_"), year, month, day)) +} + func createNewZipWriter(buf *bytes.Buffer) ZipWriter { return zip.NewWriter(buf) } diff --git a/central/complianceoperator/v2/report/manager/format/formatter_test.go b/central/complianceoperator/v2/report/manager/format/formatter_test.go index b88ce7cd7b3da..bcc53d26f468f 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter_test.go +++ b/central/complianceoperator/v2/report/manager/format/formatter_test.go @@ -3,16 +3,16 @@ package format import ( "bytes" "fmt" + "strings" "testing" "github.com/pkg/errors" "github.com/stackrox/rox/central/complianceoperator/v2/report" "github.com/stackrox/rox/central/complianceoperator/v2/report/manager/format/mocks" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/csv" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -35,7 +35,10 @@ type ComplianceReportingFormatterSuite struct { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { s.Run("with empty failed clusters", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) gomock.InOrder( s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { data := getFakeReportData() @@ -49,25 +52,12 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().NoError(err) s.Require().NotNil(buf) }) - s.Run("with nil failed clusters", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) - gomock.InOrder( - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - data := getFakeReportData() - return compareStringSlice(s.T(), target, generateRecord(data[clusterID1][0])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - data := getFakeReportData() - return compareStringSlice(s.T(), target, generateRecord(data[clusterID1][1])) - })).Times(1), - ) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) + s.Run("with nil clusters data", func() { s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), nil) s.Require().NoError(err) s.Require().NotNil(buf) @@ -75,24 +65,21 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedClusterNoError() { - gomock.InOrder( - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil), - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil), - ) - gomock.InOrder( - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][0])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][1])) - })).Times(1), - ) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].Profiles, timestamp) + + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName || target == failedFileName + })).Times(2).Return(nil, nil) + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(3) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) @@ -102,49 +89,92 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedClusterInResultsParameterNoError() { - gomock.InOrder( - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil), - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil), - ) - gomock.InOrder( - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][0])) - })).Times(1), - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - successful, _ := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateRecord(successful[clusterID1][1])) - })).Times(1), - ) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + // Add empty results to the failed cluster + results[clusterID2] = []*report.ResultRow{} + + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].Profiles, timestamp) + + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName || target == failedFileName + })).Times(2).Return(nil, nil) + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(3) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - results, failedCluster := getFakeReportDataWithFailedCluster() - // Add empty results to the failed cluster - results[clusterID2] = []*report.ResultRow{} - buf, err := s.formatter.FormatCSVReport(results, failedCluster) + buf, err := s.formatter.FormatCSVReport(results, clusterData) + s.Require().NoError(err) + s.Require().NotNil(buf) +} + +func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFailedCluster() { + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + // Remove profile + clusterData[clusterID2].FailedInfo.Profiles = clusterData[clusterID2].FailedInfo.Profiles[:1] + // Add partial results + results[clusterID2] = []*report.ResultRow{ + { + ClusterName: "test_cluster-2", + CheckName: "test_check-2", + Profile: "test_profile-2", + ControlRef: "test_control_ref-2", + Description: "description-2", + Status: "Fail", + Remediation: "remediation-2", + }, + } + + partialSuccessFileName := getFileName(successfulClusterFmt, clusterData[clusterID2].ClusterName, getProfileDiff(clusterData[clusterID2].Profiles, clusterData[clusterID2].FailedInfo), timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName || target == failedFileName || target == partialSuccessFileName + })).Times(3).Return(nil, nil) + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) || + compareStringSlice(s.T(), target, generateRecord(results[clusterID2][0])) + })).Times(4) + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(3).Return(nil) + s.zipWriter.EXPECT().Close().Times(1).Return(nil) + + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().NoError(err) s.Require().NotNil(buf) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { s.Run("zip writer failing to create a file (with no failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, errors.New("error")) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().Error(err) s.Require().Nil(buf) }) s.Run("zip writer failing to create a file (containing failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, errors.New("error")) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + delete(clusterData, clusterID1) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportDataOnlyFailedCluster()) + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().Error(err) s.Require().Nil(buf) }) @@ -152,43 +182,56 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { s.Run("csv writer failing to create a file (with no failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeClusterData()) s.Require().Error(err) s.Require().Nil(buf) }) s.Run("csv writer failing to create a file (containing failed clusters) should yield an error", func() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(failedClusterFmt, clusterID2)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + results, clusterData := getFakeReportDataWithFailedCluster() + delete(clusterData, clusterID1) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - _, failed := getFakeReportDataWithFailedCluster() - return compareStringSlice(s.T(), target, generateFailRecord(failed[clusterID2])) + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) })).Times(1) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeReportDataOnlyFailedCluster()) + buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().Error(err) s.Require().Nil(buf) }) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCloseError() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(errors.New("error")) - buf, err := s.formatter.FormatCSVReport(getFakeReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeReportData(), clusterData) s.Require().Error(err) s.Require().Nil(buf) } func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportEmptyReportNoError() { - s.zipWriter.EXPECT().Create(fmt.Sprintf(successfulClusterFmt, clusterID1)).Times(1).Return(nil, nil) + timestamp := timestamppb.Now() + clusterData := getFakeClusterData() + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(&emptyValueMatcher{ t: s.T(), value: emptyValue, @@ -197,7 +240,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportEmptyReportNoErr s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) s.zipWriter.EXPECT().Close().Times(1).Return(nil) - buf, err := s.formatter.FormatCSVReport(getFakeEmptyReportData(), getFakeEmptyFailedClusters()) + buf, err := s.formatter.FormatCSVReport(getFakeEmptyReportData(), getFakeClusterData()) s.Require().NoError(err) s.Require().NotNil(buf) } @@ -256,32 +299,44 @@ func getFakeReportData() map[string][]*report.ResultRow { return results } -func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - failedClusters[clusterID2] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterName: "test_cluster-2", - ClusterId: "test_cluster-2-id", - Reasons: []string{"timeout"}, - OperatorVersion: "v1.6.0", +func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[string]*report.ClusterData) { + clusterData := getFakeClusterData() + clusterData[clusterID2] = &report.ClusterData{ + ClusterName: "test_cluster-2", + ClusterId: "test_cluster-2-id", + Profiles: []string{"test_profile-1", "test_profile-2"}, } + clusterData[clusterID2].FailedInfo = &report.FailedCluster{} + clusterData[clusterID2].FailedInfo.Reasons = []string{"timeout"} + clusterData[clusterID2].FailedInfo.Profiles = []string{"test_profile-1", "test_profile-2"} + clusterData[clusterID2].FailedInfo.OperatorVersion = "v1.6.0" results := getFakeReportData() - return results, failedClusters + return results, clusterData } -func getFakeReportDataOnlyFailedCluster() (map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - failedClusters[clusterID2] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterName: "test_cluster-2", - ClusterId: "test_cluster-2-id", - Reasons: []string{"timeout"}, - OperatorVersion: "v1.6.0", +func getFakeReportDataOnlyFailedCluster() (map[string][]*report.ResultRow, map[string]*report.ClusterData) { + failedClusters := make(map[string]*report.ClusterData) + failedClusters[clusterID2] = &report.ClusterData{ + ClusterName: "test_cluster-2", + ClusterId: "test_cluster-2-id", + Profiles: []string{"test_profile-1", "test_profile-2"}, } + failedClusters[clusterID2].FailedInfo = &report.FailedCluster{} + failedClusters[clusterID2].FailedInfo.Reasons = []string{"timeout"} + failedClusters[clusterID2].FailedInfo.Profiles = []string{"test_profile-1", "test_profile-2"} + failedClusters[clusterID2].FailedInfo.OperatorVersion = "v1.6.0" results := make(map[string][]*report.ResultRow) return results, failedClusters } -func getFakeEmptyFailedClusters() map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - return make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) +func getFakeClusterData() map[string]*report.ClusterData { + ret := make(map[string]*report.ClusterData) + ret[clusterID1] = &report.ClusterData{ + ClusterId: clusterID1, + ClusterName: "test_cluster-1", + Profiles: []string{"test_profile-1", "test_profile-2"}, + } + return ret } type emptyValueMatcher struct { @@ -308,6 +363,14 @@ func (m *emptyValueMatcher) String() string { return m.error } -func compareStringSlice(t *testing.T, actual []string, expected []string) bool { - return assert.Equal(t, expected, actual) +func compareStringSlice(_ *testing.T, actual []string, expected []string) bool { + if len(actual) != len(expected) { + return false + } + for i := 0; i < len(actual); i++ { + if strings.Compare(actual[i], expected[i]) != 0 { + return false + } + } + return true } diff --git a/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go b/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go index d98f1d8ee0899..7111f17bbe72b 100644 --- a/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go +++ b/central/complianceoperator/v2/report/manager/generator/mocks/report_gen.go @@ -94,7 +94,7 @@ func (m *MockFormatter) EXPECT() *MockFormatterMockRecorder { } // FormatCSVReport mocks base method. -func (m *MockFormatter) FormatCSVReport(arg0 map[string][]*report.ResultRow, arg1 map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (*bytes.Buffer, error) { +func (m *MockFormatter) FormatCSVReport(arg0 map[string][]*report.ResultRow, arg1 map[string]*report.ClusterData) (*bytes.Buffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FormatCSVReport", arg0, arg1) ret0, _ := ret[0].(*bytes.Buffer) diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen.go b/central/complianceoperator/v2/report/manager/generator/report_gen.go index 91725ca0cd9fc..f202aa088a20d 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen.go @@ -34,7 +34,7 @@ type ComplianceReportGenerator interface { // //go:generate mockgen-wrapper type Formatter interface { - FormatCSVReport(map[string][]*report.ResultRow, map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) (*bytes.Buffer, error) + FormatCSVReport(map[string][]*report.ResultRow, map[string]*report.ClusterData) (*bytes.Buffer, error) } // ResultsAggregator interface is used to generate the report data diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go index 0112e5780dfbb..6978dd1a450e6 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go @@ -90,7 +90,7 @@ func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Reques reportData := rg.resultsAggregator.GetReportData(req) - zipData, err := rg.formatter.FormatCSVReport(reportData.ResultCSVs, req.FailedClusters) + zipData, err := rg.formatter.FormatCSVReport(reportData.ResultCSVs, reportData.ClustersData) if err != nil { if dbErr := reportUtils.UpdateSnapshotOnError(req.Ctx, snapshot, report.ErrReportGeneration, rg.snapshotDS); dbErr != nil { return errors.Wrap(dbErr, errUnableToUpdateSnapshotOnGenerationFailureStr) @@ -100,14 +100,14 @@ func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Reques if snapshot != nil { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_GENERATED - if len(req.FailedClusters) > 0 { + if req.FailedClusters > 0 { switch req.NotificationMethod { case storage.ComplianceOperatorReportStatus_EMAIL: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_EMAIL case storage.ComplianceOperatorReportStatus_DOWNLOAD: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_DOWNLOAD } - if len(req.FailedClusters) == len(req.ClusterIDs) { + if req.FailedClusters == len(req.ClusterIDs) { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_FAILURE } } diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go index 31f2c4efd8a1d..5dea0397b923c 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go @@ -101,6 +101,9 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { }, }, }, + ClusterData: map[string]*report.ClusterData{ + "cluster-1": {}, + }, } s.Run("GetSnapshots data store error", func() { @@ -215,13 +218,16 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { return storage.ComplianceOperatorReportStatus_FAILURE == target.GetReportStatus().GetRunState() })).Times(1).Return(errors.New("some error")) req := newFakeRequestWithFailedCluster() - req.FailedClusters["cluster-1"] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{} + req.ClusterData["cluster-1"] = &report.ClusterData{ + FailedInfo: &report.FailedCluster{}, + } + req.FailedClusters = 2 err := s.reportGen.ProcessReportRequest(req) s.Require().Error(err) s.Assert().Contains(err.Error(), errUnableToUpdateSnapshotOnGenerationSuccessStr) }) - s.Run("Fail saving report data (FormatCSVReport returns nil data)", func() { + s.Run("Fail saving report data (nil data)", func() { s.snapshotDS.EXPECT().GetSnapshot(gomock.Any(), gomock.Any()).Times(1). Return(&storage.ComplianceOperatorReportSnapshotV2{ ReportStatus: &storage.ComplianceOperatorReportStatus{}, @@ -463,9 +469,13 @@ func newFakeRequestWithFailedCluster() *report.Request { }, }, }, - FailedClusters: map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - "cluster-2": {}, + ClusterData: map[string]*report.ClusterData{ + "cluster-1": {}, + "cluster-2": { + FailedInfo: &report.FailedCluster{}, + }, }, + FailedClusters: 1, } } diff --git a/central/complianceoperator/v2/report/manager/manager_impl.go b/central/complianceoperator/v2/report/manager/manager_impl.go index 9b3496310e201..6c6e918e6e32f 100644 --- a/central/complianceoperator/v2/report/manager/manager_impl.go +++ b/central/complianceoperator/v2/report/manager/manager_impl.go @@ -33,7 +33,6 @@ import ( "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/pkg/timestamp" "github.com/stackrox/rox/pkg/uuid" - "golang.org/x/exp/maps" "golang.org/x/sync/semaphore" ) @@ -47,7 +46,8 @@ type reportRequest struct { ctx context.Context snapshotID string notificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod - failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + clusterData map[string]*report.ClusterData + failedClusters int } type managerImpl struct { @@ -220,6 +220,7 @@ func (m *managerImpl) generateReportNoLock(req *reportRequest) { Ctx: req.ctx, SnapshotID: req.snapshotID, NotificationMethod: req.notificationMethod, + ClusterData: req.clusterData, FailedClusters: req.failedClusters, } log.Infof("Executing report request for scan config %q", req.scanConfig.GetId()) @@ -563,13 +564,29 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca // Update ReportData snapshot.ReportData = m.getReportData(result.ScanConfig) // Add failed clusters to the report + clusterData := make(map[string]*report.ClusterData) + for _, cluster := range snapshot.ReportData.GetClusterStatus() { + data := &report.ClusterData{ + ClusterId: cluster.GetClusterId(), + ClusterName: cluster.GetClusterName(), + } + if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { + failedCluster.ClusterName = cluster.GetClusterName() + data.FailedInfo = failedCluster + } + clusterData[cluster.GetClusterId()] = data + } if len(failedClusters) > 0 { - for _, cluster := range snapshot.ReportData.GetClusterStatus() { - if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { - failedCluster.ClusterName = cluster.GetClusterName() - } + failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) + for _, failedCluster := range failedClusters { + failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + ClusterId: failedCluster.GetClusterId(), + ClusterName: failedCluster.GetClusterName(), + OperatorVersion: failedCluster.GetOperatorVersion(), + Reasons: failedCluster.GetReasons(), + }) } - snapshot.FailedClusters = maps.Values(failedClusters) + snapshot.FailedClusters = failedClustersSlice } if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { return errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) @@ -579,7 +596,8 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca scanConfig: result.ScanConfig, snapshotID: snapshot.GetReportId(), notificationMethod: snapshot.GetReportStatus().GetReportNotificationMethod(), - failedClusters: failedClusters, + failedClusters: len(failedClusters), + clusterData: clusterData, } isOnDemand := snapshot.GetReportStatus().GetReportRequestType() == storage.ComplianceOperatorReportStatus_ON_DEMAND if err := m.handleReportScheduled(generateReportReq, isOnDemand); err != nil { diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator.go b/central/complianceoperator/v2/report/manager/results/results_aggregator.go index d004bd1c7b861..374a6105ef6ea 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/pkg/errors" benchmarksDS "github.com/stackrox/rox/central/complianceoperator/v2/benchmarks/datastore" checkResults "github.com/stackrox/rox/central/complianceoperator/v2/checkresults/datastore" "github.com/stackrox/rox/central/complianceoperator/v2/checkresults/utils" @@ -16,6 +17,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/search" + "github.com/stackrox/rox/pkg/set" ) const ( @@ -65,16 +67,24 @@ type aggregateResultsFn func(context.Context, string, *[]*report.ResultRow, *che func (g *Aggregator) GetReportData(req *report.Request) *report.Results { resultsCSV := make(map[string][]*report.ResultRow) reportResults := &report.Results{} + reportResults.ClustersData = make(map[string]*report.ClusterData) for _, clusterID := range req.ClusterIDs { - clusterResults, clusterStatus, err := g.getReportDataForCluster(req.Ctx, req.ScanConfigID, clusterID, req.FailedClusters) + clusterData, err := g.getClusterData(req.Ctx, req.ScanConfigID, clusterID, req.ClusterData[clusterID]) + if err != nil { + log.Errorf("Data unable to enhance cluster data for cluster %q: %v", clusterID, err) + continue + } + clusterResults, clusterStatus, err := g.getReportDataForCluster(req.Ctx, req.ScanConfigID, clusterID, clusterData) if err != nil { log.Errorf("Data not found for cluster %s", clusterID) continue } + resultsCSV[clusterID] = clusterResults reportResults.TotalPass += clusterStatus.totalPass reportResults.TotalFail += clusterStatus.totalFail reportResults.TotalMixed += clusterStatus.totalMixed + reportResults.ClustersData[clusterID] = clusterData } reportResults.Clusters = len(req.ClusterIDs) reportResults.Profiles = req.Profiles @@ -82,19 +92,71 @@ func (g *Aggregator) GetReportData(req *report.Request) *report.Results { return reportResults } -func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, clusterID string, failedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) ([]*report.ResultRow, *checkStatus, error) { +func (g *Aggregator) getClusterData(ctx context.Context, scanConfigID, clusterID string, clusterData *report.ClusterData) (*report.ClusterData, error) { + if clusterData == nil { + return nil, errors.Errorf("cluster data for cluster %q is nil", clusterID) + } + + scanNamesToProfileNames, err := g.scanDS.GetProfilesScanNamesByScanConfigAndCluster(ctx, scanConfigID, clusterID) + if err != nil { + return nil, errors.Wrapf(err, "unable to retrieve profiles associated with the ScanConfiguration %q in the cluster %q", scanConfigID, clusterID) + } + var profileNames []string + var scanNames []string + for scanName, profileName := range scanNamesToProfileNames { + profileNames = append(profileNames, profileName) + scanNames = append(scanNames, scanName) + } + clusterData.Profiles = profileNames + clusterData.Scans = scanNames + if clusterData.FailedInfo != nil { + clusterData.FailedInfo, err = g.getFailedClusterData(ctx, clusterData.FailedInfo, scanConfigID, clusterID) + if err != nil { + return nil, errors.Wrapf(err, "unable to retrieve profiles associated the cluster %q", clusterData.ClusterId) + } + } + return clusterData, nil +} + +func (g *Aggregator) getFailedClusterData(ctx context.Context, failedCluster *report.FailedCluster, scanConfigID, clusterID string) (*report.FailedCluster, error) { + var profileRefIDs []string + for _, scan := range failedCluster.Scans { + profileRefIDs = append(profileRefIDs, scan.GetProfile().GetProfileRefId()) + } + scanNameToProfileName, err := g.scanDS.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfigID, clusterID, profileRefIDs) + if err != nil { + return nil, err + } + var profileNames []string + for _, scan := range failedCluster.Scans { + if profileName, ok := scanNameToProfileName[scan.GetScanName()]; ok { + profileNames = append(profileNames, profileName) + } + } + failedCluster.Profiles = profileNames + return failedCluster, nil +} + +func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, clusterID string, clusterData *report.ClusterData) ([]*report.ResultRow, *checkStatus, error) { var ret []*report.ResultRow statuses := &checkStatus{ totalPass: 0, totalFail: 0, totalMixed: 0, } - // If the cluster is in the failedClusters map, we do not retrieve the data - if _, ok := failedClusters[clusterID]; ok { - return ret, statuses, nil + scans := clusterData.Scans + if clusterData.FailedInfo != nil { + allScansSet := set.NewStringSet(scans...) + failedScansSet := set.NewStringSet() + for _, scan := range clusterData.FailedInfo.Scans { + failedScansSet.Add(scan.GetScanName()) + } + scans = allScansSet.Difference(failedScansSet).AsSlice() } - scanConfigQuery := search.NewQueryBuilder().AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). + scanConfigQuery := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). AddExactMatches(search.ClusterID, clusterID). + AddExactMatches(search.ComplianceOperatorScanName, scans...). ProtoQuery() err := g.checkResultsDS.WalkByQuery(ctx, scanConfigQuery, g.aggreateResults(ctx, clusterID, &ret, statuses)) return ret, statuses, err diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go index 3268e3c62ea8f..cb78a10d29947 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go @@ -15,7 +15,9 @@ import ( "github.com/stackrox/rox/central/complianceoperator/v2/rules/datastore" ruleMocks "github.com/stackrox/rox/central/complianceoperator/v2/rules/datastore/mocks" scanMocks "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore/mocks" + v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/search" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -50,10 +52,10 @@ type getReportDataTestCase struct { numFailedChecksPerCluster int numMixedChecksPerCluster int numFailedClusters int - expectedErr error + expectedWalkByErr error } -func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { +func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataResultsGeneration() { cases := map[string]getReportDataTestCase{ "generate report data no error": { numClusters: 2, @@ -71,38 +73,41 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { numFailedClusters: 1, }, "generate report walk by error": { - numClusters: 3, - numProfiles: 4, - expectedErr: errors.New("error"), + numClusters: 3, + numProfiles: 4, + expectedWalkByErr: errors.New("error"), }, } for tname, tcase := range cases { s.Run(tname, func() { ctx := context.Background() req := getRequest(ctx, tcase.numClusters, tcase.numProfiles, tcase.numFailedClusters) - s.checkResultsDS.EXPECT().WalkByQuery(gomock.Eq(ctx), gomock.Any(), gomock.Any()). - Times(tcase.numClusters). - DoAndReturn(func(_, _ any, fn checkResultWalkByQuery) error { - for i := 0; i < tcase.numPassedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("pass-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_PASS, - }) - } - for i := 0; i < tcase.numFailedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("fail-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_FAIL, - }) + s.scanDS.EXPECT().GetProfilesScanNamesByScanConfigAndCluster(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { + for _, cluster := range req.ClusterData { + if target == cluster.ClusterId { + return true } - for i := 0; i < tcase.numMixedChecksPerCluster; i++ { - _ = fn(&storage.ComplianceOperatorCheckResultV2{ - CheckName: fmt.Sprintf("mixed-check-%d", i), - Status: storage.ComplianceOperatorCheckResultV2_INCONSISTENT, - }) + } + return false + })).Times(tcase.numClusters + tcase.numFailedClusters) + s.scanDS.EXPECT().GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { + for _, cluster := range req.ClusterData { + if cluster.FailedInfo != nil { + if target == cluster.ClusterId { + return true + } } - return tcase.expectedErr - }) + } + return false + }), gomock.Any()).Times(tcase.numFailedClusters) + s.checkResultsDS.EXPECT().WalkByQuery(gomock.Eq(ctx), gomock.Any(), gomock.Any()). + Times(tcase.numClusters + tcase.numFailedClusters). + DoAndReturn(fakeWalkByResponse( + req.ClusterData, + tcase.expectedWalkByErr, + tcase.numPassedChecksPerCluster, + tcase.numFailedChecksPerCluster, + tcase.numMixedChecksPerCluster)) s.aggregator.aggreateResults = mockWalkByQueryWrapper res := s.aggregator.GetReportData(req) assertResults(s.T(), tcase, res) @@ -110,6 +115,238 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportData() { } } +func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataGetClusterDataErrors() { + cases := map[string]struct { + expectedGetScanToProfileErr []error + expectedGetScanToProfileWithRef []error + emptyClusterData bool + numClusters int + numProfiles int + numFailedClusters int + numPassedChecksPerCluster int + numFailedChecksPerCluster int + numMixedChecksPerCluster int + expectClusters []string + expectFailedClusters map[string]struct{} + }{ + "empty cluster data should generate to empty results": { + emptyClusterData: true, + numClusters: 2, + numProfiles: 2, + numFailedClusters: 1, + }, + "get scan to profile data fails in the first cluster": { + expectedGetScanToProfileErr: []error{errors.New("some error"), nil}, + numClusters: 2, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-1"}, + }, + "get scan to profile data fails always": { + expectedGetScanToProfileErr: []error{errors.New("some error"), errors.New("some error")}, + numClusters: 2, + numProfiles: 2, + }, + "get scan to profile data fails always after the first call": { + expectedGetScanToProfileErr: []error{nil, errors.New("some error"), errors.New("some error")}, + numClusters: 3, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-0"}, + }, + "get scan to profile data fails on the failed cluster": { + expectedGetScanToProfileErr: []error{nil, nil, errors.New("some error")}, + numClusters: 2, + numFailedClusters: 1, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-0", "cluster-1"}, + }, + "get scan to profile data with scan ref fails on the first failed cluster": { + expectedGetScanToProfileErr: []error{nil, nil, nil, nil}, + expectedGetScanToProfileWithRef: []error{errors.New("some error"), nil}, + numClusters: 2, + numFailedClusters: 2, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-0", "cluster-1", "cluster-3"}, + expectFailedClusters: map[string]struct{}{ + "cluster-3": {}, + }, + }, + "get scan to profile data with scan ref fails always": { + expectedGetScanToProfileErr: []error{nil, nil, nil, nil}, + expectedGetScanToProfileWithRef: []error{errors.New("some error"), errors.New("some error")}, + numClusters: 2, + numFailedClusters: 2, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-0", "cluster-1"}, + }, + "get scan to profile data with scan ref fails after the first call": { + expectedGetScanToProfileErr: []error{nil, nil, nil, nil, nil}, + expectedGetScanToProfileWithRef: []error{nil, errors.New("some error"), errors.New("some error")}, + numClusters: 2, + numFailedClusters: 3, + numProfiles: 2, + numPassedChecksPerCluster: 2, + numFailedChecksPerCluster: 2, + numMixedChecksPerCluster: 2, + expectClusters: []string{"cluster-0", "cluster-1", "cluster-2"}, + expectFailedClusters: map[string]struct{}{ + "cluster-2": {}, + }, + }, + } + ctx := context.Background() + for tName, tCase := range cases { + s.Run(tName, func() { + expectWalkByQueryCallsWithFailedCluster := len(tCase.expectFailedClusters) + expectClustersWithResults := len(tCase.expectClusters) - expectWalkByQueryCallsWithFailedCluster + req := getRequest(ctx, tCase.numClusters, tCase.numProfiles, tCase.numFailedClusters) + if tCase.emptyClusterData { + req.ClusterData = make(map[string]*report.ClusterData) + } + scanToProfilesMap := getScanToProfileMap(tCase.numProfiles) + var getScanToProfileCalls []any + for _, err := range tCase.expectedGetScanToProfileErr { + call := s.scanDS.EXPECT().GetProfilesScanNamesByScanConfigAndCluster(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { + for _, cluster := range req.ClusterData { + if target == cluster.ClusterId { + return true + } + } + return false + })).Times(1). + Return(scanToProfilesMap, err) + + getScanToProfileCalls = append(getScanToProfileCalls, call) + } + if len(getScanToProfileCalls) > 0 { + gomock.InOrder(getScanToProfileCalls...) + } + var getScanToProfileWithRefCalls []any + for _, err := range tCase.expectedGetScanToProfileWithRef { + call := s.scanDS.EXPECT().GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { + for _, cluster := range req.ClusterData { + if target == cluster.ClusterId { + return true + } + } + return false + }), gomock.Any()).Times(1). + Return(scanToProfilesMap, err) + getScanToProfileWithRefCalls = append(getScanToProfileWithRefCalls, call) + } + if len(getScanToProfileWithRefCalls) > 0 { + gomock.InOrder(getScanToProfileWithRefCalls...) + } + + s.checkResultsDS.EXPECT().WalkByQuery(gomock.Any(), gomock.Any(), gomock.Any()). + Times(expectClustersWithResults + expectWalkByQueryCallsWithFailedCluster). + DoAndReturn(fakeWalkByResponse( + req.ClusterData, + nil, + tCase.numPassedChecksPerCluster, + tCase.numFailedChecksPerCluster, + tCase.numMixedChecksPerCluster)) + + s.aggregator.aggreateResults = mockWalkByQueryWrapper + res := s.aggregator.GetReportData(req) + s.Require().NotNil(res) + s.Assert().Equal(tCase.numClusters+tCase.numFailedClusters, res.Clusters) + s.Assert().Equal(req.Profiles, res.Profiles) + s.Assert().Equal(expectClustersWithResults*tCase.numPassedChecksPerCluster, res.TotalPass) + s.Assert().Equal(expectClustersWithResults*tCase.numFailedChecksPerCluster, res.TotalFail) + s.Assert().Equal(expectClustersWithResults*tCase.numMixedChecksPerCluster, res.TotalMixed) + if tCase.emptyClusterData { + s.Assert().Len(res.ClustersData, 0) + s.Assert().Len(res.ResultCSVs, 0) + return + } + s.Assert().Len(res.ClustersData, expectClustersWithResults+expectWalkByQueryCallsWithFailedCluster) + s.Assert().Len(res.ResultCSVs, expectClustersWithResults+expectWalkByQueryCallsWithFailedCluster) + for _, clusterID := range tCase.expectClusters { + data := res.ClustersData[clusterID] + s.Require().NotNil(data) + for scan, profile := range scanToProfilesMap { + s.Assert().Contains(data.Profiles, profile) + s.Assert().Contains(data.Scans, scan) + } + s.Assert().Equal(clusterID, data.ClusterId) + s.Assert().Equal(clusterID, data.ClusterName) + if _, ok := tCase.expectFailedClusters[clusterID]; ok { + s.Require().NotNil(data.FailedInfo) + s.Assert().Equal(clusterID, data.FailedInfo.GetClusterId()) + s.Assert().Equal(clusterID, data.FailedInfo.GetClusterName()) + s.Assert().Equal([]string{"timeout"}, data.FailedInfo.GetReasons()) + s.Assert().Equal("v1.6.0", data.FailedInfo.GetOperatorVersion()) + for scan, profile := range scanToProfilesMap { + s.Assert().Contains(data.FailedInfo.Profiles, profile) + scanFound := false + for _, storageScan := range data.FailedInfo.Scans { + if storageScan.GetScanName() == scan { + scanFound = true + } + } + s.Assert().Truef(scanFound, "expected scan %q not found", scan) + } + } + } + }) + } +} + +func fakeWalkByResponse( + clusterData map[string]*report.ClusterData, + expectedErr error, + numPassedChecksPerCluster int, + numFailedChecksPerCluster int, + numMixedChecksPerCluster int, +) func(context.Context, *v1.Query, checkResultWalkByQuery) error { + return func(_ context.Context, query *v1.Query, fn checkResultWalkByQuery) error { + for _, q := range query.GetConjunction().GetQueries() { + if q.GetBaseQuery().GetMatchFieldQuery().GetField() == search.ClusterID.String() { + val := strings.Trim(q.GetBaseQuery().GetMatchFieldQuery().GetValue(), "\"") + if cluster, ok := clusterData[val]; ok { + if cluster.FailedInfo != nil { + return expectedErr + } + } + } + } + for i := 0; i < numPassedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("pass-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_PASS, + }) + } + for i := 0; i < numFailedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("fail-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_FAIL, + }) + } + for i := 0; i < numMixedChecksPerCluster; i++ { + _ = fn(&storage.ComplianceOperatorCheckResultV2{ + CheckName: fmt.Sprintf("mixed-check-%d", i), + Status: storage.ComplianceOperatorCheckResultV2_INCONSISTENT, + }) + } + return expectedErr + } +} + var ( profiles = []*storage.ComplianceOperatorProfileV2{ { @@ -405,15 +642,44 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters ClusterIDs: getNames("cluster", numClusters), Profiles: getNames("profile", numProfiles), } + clusterData := make(map[string]*report.ClusterData) + for i := 0; i < numClusters+numFailedClusters; i++ { + id := fmt.Sprintf("cluster-%d", i) + var profileNames []string + for j := 0; j < numProfiles; j++ { + profileNames = append(profileNames, fmt.Sprintf("profile-%d", j)) + } + clusterData[id] = &report.ClusterData{ + ClusterId: id, + ClusterName: id, + Profiles: profileNames, + Scans: profileNames, + } + } if numFailedClusters > 0 { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) for i := numClusters; i < numFailedClusters+numClusters; i++ { id := fmt.Sprintf("cluster-%d", i) ret.ClusterIDs = append(ret.ClusterIDs, id) - failedClusters[id] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{} + failedInfo := &report.FailedCluster{} + failedInfo.ClusterId = id + failedInfo.ClusterName = id + failedInfo.Profiles = clusterData[id].Profiles + failedInfo.Scans = func() []*storage.ComplianceOperatorScanV2 { + var scans []*storage.ComplianceOperatorScanV2 + for _, scanName := range clusterData[id].Scans { + scans = append(scans, &storage.ComplianceOperatorScanV2{ + ScanName: scanName, + }) + } + return scans + }() + failedInfo.Reasons = []string{"timeout"} + failedInfo.OperatorVersion = "v1.6.0" + clusterData[id].FailedInfo = failedInfo } - ret.FailedClusters = failedClusters + ret.FailedClusters = numFailedClusters } + ret.ClusterData = clusterData return ret } @@ -450,7 +716,7 @@ func getRowFromCluster(check, clusterID string) *report.ResultRow { func assertResults(t *testing.T, tcase getReportDataTestCase, res *report.Results) { assert.Equal(t, tcase.numClusters+tcase.numFailedClusters, res.Clusters) assert.Equal(t, tcase.numProfiles, len(res.Profiles)) - if tcase.expectedErr != nil { + if tcase.expectedWalkByErr != nil { assert.Equal(t, 0, res.TotalPass) assert.Equal(t, 0, res.TotalFail) assert.Equal(t, 0, res.TotalMixed) @@ -566,3 +832,12 @@ func assertResult(t *testing.T, tcase walkByQueryTestCase, row *report.ResultRow } assert.Equal(t, strings.Join(expControlInfos, ","), row.ControlRef) } + +func getScanToProfileMap(numProfiles int) map[string]string { + ret := make(map[string]string) + for i := 0; i < numProfiles; i++ { + name := fmt.Sprintf("profile-%d", i) + ret[name] = name + } + return ret +} diff --git a/central/complianceoperator/v2/report/manager/watcher/validator.go b/central/complianceoperator/v2/report/manager/watcher/validator.go index 356e2be2ac3fe..9271e62407ca6 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator.go @@ -13,8 +13,8 @@ import ( ) // ValidateScanConfigResults returns a map with the clusters that failed to be scanned. -func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, error) { - failedClusters := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) +func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (map[string]*report.FailedCluster, error) { + failedClusters := make(map[string]*report.FailedCluster) errList := errorhelpers.NewErrorList("failed clusters") clustersWithResults := set.NewStringSet() for _, scanResult := range results.ScanResults { @@ -26,6 +26,7 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.GetClusterId()))) if previousFailedInfo, ok := failedClusters[failedClusterInfo.GetClusterId()]; ok && !isInstallationError { previousFailedInfo.Reasons = append(previousFailedInfo.GetReasons(), failedClusterInfo.GetReasons()...) + previousFailedInfo.Scans = append(previousFailedInfo.Scans, failedClusterInfo.Scans...) continue } failedClusters[failedClusterInfo.GetClusterId()] = failedClusterInfo @@ -59,7 +60,7 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe } // ValidateScanResults if there are no errors in the scan results, it returns nil; otherwise it returns the failed cluster information -func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (failedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster, isInstallationError bool) { +func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integrationDataStore complianceIntegrationDS.DataStore) (failedCluster *report.FailedCluster, isInstallationError bool) { if results.Error == nil { return nil, false } @@ -67,6 +68,7 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ if len(ret.Reasons) > 0 { return ret, true } + ret.Scans = []*storage.ComplianceOperatorScanV2{results.Scan} if errors.Is(results.Error, ErrScanRemoved) { ret.Reasons = []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, results.Scan.GetScanName())} return ret, false @@ -84,11 +86,9 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ } // ValidateClusterHealth returns the health status of the Compliance Operator Integration -func ValidateClusterHealth(ctx context.Context, clusterID string, integrationDataStore complianceIntegrationDS.DataStore) *storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - ret := &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - } +func ValidateClusterHealth(ctx context.Context, clusterID string, integrationDataStore complianceIntegrationDS.DataStore) *report.FailedCluster { + ret := &report.FailedCluster{} + ret.ClusterId = clusterID coStatus, err := IsComplianceOperatorHealthy(ctx, clusterID, integrationDataStore) if errors.Is(err, ErrComplianceOperatorIntegrationDataStore) || errors.Is(err, ErrComplianceOperatorIntegrationZeroIntegrations) { ret.Reasons = []string{report.INTERNAL_ERROR} diff --git a/central/complianceoperator/v2/report/manager/watcher/validator_test.go b/central/complianceoperator/v2/report/manager/watcher/validator_test.go index 097fe76a6c297..f9db02cb53df1 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator_test.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator_test.go @@ -9,11 +9,11 @@ import ( mocksComplianceIntegrationDS "github.com/stackrox/rox/central/complianceoperator/v2/integration/datastore/mocks" "github.com/stackrox/rox/central/complianceoperator/v2/report" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/protoassert" "github.com/stackrox/rox/pkg/set" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" ) const ( @@ -35,14 +35,14 @@ func TestValidateScanConfigResults(t *testing.T) { cases := map[string]struct { results *ScanConfigWatcherResults expectFn func(*mocksComplianceIntegrationDS.MockDataStore) - expectedFailedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + expectedFailedClusters map[string]*report.FailedCluster expectedError bool expectedExactError error }{ "no error": { results: getScanConfigResults(2, 0, 0, 1, nil), expectFn: withExpectCall(nil), - expectedFailedClusters: make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster), + expectedFailedClusters: make(map[string]*report.FailedCluster), }, "two failed clusters": { results: getScanConfigResults(2, 2, 0, 1, nil), @@ -56,7 +56,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, }, "two failed clusters with two scans": { @@ -71,7 +71,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 2), + expectedFailedClusters: getFailedClusters(2, 2, 0, 2), expectedError: true, }, "two failed clusters scan config watcher timeout": { @@ -86,7 +86,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, expectedExactError: report.ErrScanConfigWatcherTimeout, }, @@ -102,7 +102,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 2, 0, 1), expectedError: true, expectedExactError: report.ErrScanWatchersFailed, }, @@ -118,7 +118,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 2, 1), + expectedFailedClusters: getFailedClusters(2, 0, 2, 1), expectedError: true, }, "two missing clusters and two failed clusters": { @@ -133,7 +133,7 @@ func TestValidateScanConfigResults(t *testing.T) { }, }, nil) }), - expectedFailedClusters: getFailedClusters(2, 4, 1), + expectedFailedClusters: getFailedClusters(2, 2, 2, 1), expectedError: true, }, } @@ -146,7 +146,7 @@ func TestValidateScanConfigResults(t *testing.T) { for id, failedCluster := range tCase.expectedFailedClusters { actual, ok := res[id] require.True(tt, ok) - protoassert.Equal(tt, failedCluster, actual) + assertFailedCluster(tt, failedCluster, actual) } if tCase.expectedError { assert.Error(tt, err) @@ -169,7 +169,7 @@ func TestValidateScanResults(t *testing.T) { operatorStatus []*storage.ComplianceIntegration expectDSError error results *ScanWatcherResults - expectedFailedCluster *storage.ComplianceOperatorReportSnapshotV2_FailedCluster + expectedFailedCluster *report.FailedCluster expectedInstallationError bool }{ "no error": { @@ -186,12 +186,8 @@ func TestValidateScanResults(t *testing.T) { }, Error: errors.New("some error"), }, - expectDSError: errors.New("some error"), - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.INTERNAL_ERROR}, - }, + expectDSError: errors.New("some error"), + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.INTERNAL_ERROR}, false), expectedInstallationError: true, }, "internal error due to no integration retrieved from data store": { @@ -201,13 +197,9 @@ func TestValidateScanResults(t *testing.T) { }, Error: errors.New("some error"), }, - operatorStatus: []*storage.ComplianceIntegration{}, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.INTERNAL_ERROR}, - }, + operatorStatus: []*storage.ComplianceIntegration{}, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.INTERNAL_ERROR}, false), expectedInstallationError: true, }, "operator not installed": { @@ -222,12 +214,8 @@ func TestValidateScanResults(t *testing.T) { OperatorInstalled: false, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: "", - Reasons: []string{report.COMPLIANCE_NOT_INSTALLED}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, "", []string{report.COMPLIANCE_NOT_INSTALLED}, false), expectedInstallationError: true, }, "operator old version": { @@ -244,12 +232,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: oldVersion, - Reasons: []string{report.COMPLIANCE_VERSION_ERROR}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, oldVersion, []string{report.COMPLIANCE_VERSION_ERROR}, false), expectedInstallationError: true, }, "scan removed error": { @@ -267,12 +251,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, scanName)}, true), }, "scan timeout error": { results: &ScanWatcherResults{ @@ -290,12 +270,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_TIMEOUT_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_TIMEOUT_FMT, scanName)}, true), }, "sensor context canceled error": { results: &ScanWatcherResults{ @@ -313,12 +289,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{fmt.Sprintf(report.SCAN_TIMEOUT_SENSOR_DISCONNECTED_FMT, scanName)}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{fmt.Sprintf(report.SCAN_TIMEOUT_SENSOR_DISCONNECTED_FMT, scanName)}, true), }, "internal error due context canceled error": { results: &ScanWatcherResults{ @@ -336,12 +308,8 @@ func TestValidateScanResults(t *testing.T) { OperatorStatus: storage.COStatus_HEALTHY, }, }, - expectDSError: nil, - expectedFailedCluster: &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: clusterID, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{report.INTERNAL_ERROR}, - }, + expectDSError: nil, + expectedFailedCluster: newFailedCluster(clusterID, minimumComplianceOperatorVersion, []string{report.INTERNAL_ERROR}, true), }, } for tName, tCase := range cases { @@ -352,7 +320,7 @@ func TestValidateScanResults(t *testing.T) { Return(tCase.operatorStatus, tCase.expectDSError) } res, isInstallationError := ValidateScanResults(ctx, tCase.results, coIntegrationDS) - protoassert.Equal(tt, tCase.expectedFailedCluster, res) + assertFailedCluster(tt, tCase.expectedFailedCluster, res) assert.Equal(tt, tCase.expectedInstallationError, isInstallationError) }) } @@ -477,24 +445,72 @@ func getScanConfigResults(numSuccessfulClusters, numFailedClusters, numMissingCl } } -func getFailedClusters(idx, numClusters, numScans int) map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster { - ret := make(map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster) - for i := idx; i < idx+numClusters; i++ { +func getFailedClusters(idx, numFailedClusters, numMissingClusters, numScans int) map[string]*report.FailedCluster { + ret := make(map[string]*report.FailedCluster) + for i := idx; i < idx+numFailedClusters; i++ { id := fmt.Sprintf("cluster-%d", i) - ret[id] = &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: id, - OperatorVersion: minimumComplianceOperatorVersion, - Reasons: []string{report.INTERNAL_ERROR}, - } + failedCluster := &report.FailedCluster{} + failedCluster.ClusterId = id + failedCluster.OperatorVersion = minimumComplianceOperatorVersion + failedCluster.Reasons = []string{report.INTERNAL_ERROR} + ret[id] = failedCluster var reasons []string for j := 0; j < numScans; j++ { reasons = append(reasons, report.INTERNAL_ERROR) + failedCluster.Scans = append(failedCluster.Scans, &storage.ComplianceOperatorScanV2{ + ClusterId: id, + }) } ret[id].Reasons = reasons } + for i := idx + numFailedClusters; i < idx+numFailedClusters+numMissingClusters; i++ { + id := fmt.Sprintf("cluster-%d", i) + failedCluster := &report.FailedCluster{} + failedCluster.ClusterId = id + failedCluster.OperatorVersion = minimumComplianceOperatorVersion + failedCluster.Reasons = []string{report.INTERNAL_ERROR} + ret[id] = failedCluster + } return ret } +func newFailedCluster(clusterID, coVersion string, reasons []string, expectScan bool) *report.FailedCluster { + ret := &report.FailedCluster{} + ret.ClusterId = clusterID + ret.OperatorVersion = coVersion + ret.Reasons = reasons + if expectScan { + ret.Scans = []*storage.ComplianceOperatorScanV2{ + { + ClusterId: clusterID, + ScanName: scanName, + }, + } + } + return ret +} + +func assertFailedCluster(t *testing.T, expected, actual *report.FailedCluster) { + if expected == nil && actual == nil { + return + } + assert.Equal(t, expected.GetClusterId(), actual.GetClusterId()) + assert.Equal(t, expected.GetClusterName(), actual.GetClusterName()) + assert.Equal(t, expected.GetOperatorVersion(), actual.GetOperatorVersion()) + assert.Equal(t, expected.GetReasons(), actual.GetReasons()) + assert.Equal(t, len(expected.Scans), len(actual.Scans)) + for _, expectedScan := range expected.Scans { + found := false + for _, actualScan := range actual.Scans { + if proto.Equal(expectedScan, actualScan) { + found = true + break + } + } + assert.Truef(t, found, "expected scan %v not found", expectedScan) + } +} + type clusterIdMatcher struct { ids set.StringSet error string diff --git a/central/complianceoperator/v2/report/request.go b/central/complianceoperator/v2/report/request.go index 89f593b112a1f..763f7865541b8 100644 --- a/central/complianceoperator/v2/report/request.go +++ b/central/complianceoperator/v2/report/request.go @@ -16,5 +16,22 @@ type Request struct { Ctx context.Context SnapshotID string NotificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod - FailedClusters map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster + ClusterData map[string]*ClusterData + FailedClusters int +} + +// ClusterData holds the metadata for the clusters +type ClusterData struct { + ClusterId string + ClusterName string + Profiles []string + Scans []string + FailedInfo *FailedCluster +} + +// FailedCluster holds the information of a failed cluster +type FailedCluster struct { + storage.ComplianceOperatorReportSnapshotV2_FailedCluster + Scans []*storage.ComplianceOperatorScanV2 + Profiles []string } diff --git a/central/complianceoperator/v2/report/result.go b/central/complianceoperator/v2/report/result.go index ca717acca4e79..3d79245867950 100644 --- a/central/complianceoperator/v2/report/result.go +++ b/central/complianceoperator/v2/report/result.go @@ -15,10 +15,11 @@ type ResultRow struct { // Results struct which holds the results of a report. type Results struct { - ResultCSVs map[string][]*ResultRow // map of cluster id to slice of *resultRow - TotalPass int - TotalFail int - TotalMixed int - Profiles []string - Clusters int + ResultCSVs map[string][]*ResultRow // map of cluster id to slice of *resultRow + TotalPass int + TotalFail int + TotalMixed int + Profiles []string + Clusters int + ClustersData map[string]*ClusterData } diff --git a/central/complianceoperator/v2/scans/datastore/datastore.go b/central/complianceoperator/v2/scans/datastore/datastore.go index ef96ee5da4a3a..56a55af40c964 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore.go +++ b/central/complianceoperator/v2/scans/datastore/datastore.go @@ -31,11 +31,16 @@ type DataStore interface { // SearchScans returns the scans for the given query SearchScans(ctx context.Context, query *v1.Query) ([]*storage.ComplianceOperatorScanV2, error) + + GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) + + GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfig, clusterID string, profileRefs []string) (map[string]string, error) } // New returns an instance of DataStore. -func New(complianceScanStorage pgStore.Store) DataStore { +func New(complianceScanStorage pgStore.Store, db postgres.DB) DataStore { return &datastoreImpl{ + db: db, store: complianceScanStorage, } } @@ -43,5 +48,5 @@ func New(complianceScanStorage pgStore.Store) DataStore { // GetTestPostgresDataStore provides a datastore connected to postgres for testing purposes. func GetTestPostgresDataStore(_ testing.TB, pool postgres.DB) DataStore { store := pgStore.New(pool) - return New(store) + return New(store, pool) } diff --git a/central/complianceoperator/v2/scans/datastore/datastore_impl.go b/central/complianceoperator/v2/scans/datastore/datastore_impl.go index cc821c245b263..2d2a2e5f6feac 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/scans/datastore/datastore_impl.go @@ -2,15 +2,21 @@ package datastore import ( "context" + "fmt" + "strings" - "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" + pgStore "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/postgres" + "github.com/stackrox/rox/pkg/postgres/schema" "github.com/stackrox/rox/pkg/search" + pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) type datastoreImpl struct { - store postgres.Store + db postgres.DB + store pgStore.Store } // GetScan retrieves the scan object from the database @@ -50,3 +56,42 @@ func (d *datastoreImpl) DeleteScanByCluster(ctx context.Context, clusterID strin func (d *datastoreImpl) SearchScans(ctx context.Context, query *v1.Query) ([]*storage.ComplianceOperatorScanV2, error) { return d.store.GetByQuery(ctx, query) } + +func (d *datastoreImpl) GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) { + return d.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfigID, clusterID, []string{}) +} + +func (d *datastoreImpl) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfigID, clusterID string, profileRefs []string) (map[string]string, error) { + query := search.NewQueryBuilder(). + AddExactMatches(search.ClusterID, clusterID). + AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). + ProtoQuery() + if len(profileRefs) > 0 { + query = search.ConjunctionQuery( + search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorProfileRef, profileRefs...). + ProtoQuery(), + query) + } + type ScanProfiles struct { + ProfileName string `db:"compliance_profile_name"` + ScanName string `db:"compliance_scan_name"` + } + query.Selects = []*v1.QuerySelect{ + search.NewQuerySelect(search.ComplianceOperatorProfileName).Proto(), + search.NewQuerySelect(search.ComplianceOperatorScanName).Proto(), + } + results, err := pgSearch.RunSelectRequestForSchema[ScanProfiles](ctx, d.db, schema.ComplianceOperatorScanV2Schema, query) + if err != nil { + return nil, err + } + expandedProfileNames := make(map[string]string) + for _, result := range results { + name := result.ProfileName + if result.ProfileName != result.ScanName { + name = fmt.Sprintf("%s%s", result.ProfileName, strings.TrimPrefix(result.ScanName, result.ProfileName)) + } + expandedProfileNames[result.ScanName] = name + } + return expandedProfileNames, nil +} diff --git a/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go b/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go index 65979409d3f73..b2392634970c7 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go +++ b/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go @@ -4,8 +4,13 @@ package datastore import ( "context" + "fmt" "testing" + profileStorage "github.com/stackrox/rox/central/complianceoperator/v2/profiles/datastore" + profileSearch "github.com/stackrox/rox/central/complianceoperator/v2/profiles/datastore/search" + profilePostgresStorage "github.com/stackrox/rox/central/complianceoperator/v2/profiles/store/postgres" + scanConfigStorage "github.com/stackrox/rox/central/complianceoperator/v2/scanconfigurations/datastore" scanStorage "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/features" @@ -275,10 +280,177 @@ func (s *complianceScanDataStoreTestSuite) TestDeleteScan() { protoassert.Equal(s.T(), testScan2, retrievedObject) } +func (s *complianceScanDataStoreTestSuite) TestGetProfileScanNames() { + // Create Test DataStores for ScanConfigurations and Profiles + scanConfigDS := scanConfigStorage.GetTestPostgresDataStore(s.T(), s.db) + profilePostgres := profilePostgresStorage.New(s.db) + searcher := profileSearch.New(profilePostgres) + profileDS := profileStorage.GetTestPostgresDataStore(s.T(), s.db, searcher) + + profileName1 := "ocp4-cis" + profileName2 := "ocp4-cis-node" + + // Define Profiles 1 and 2 in Cluster 1 + profile1 := getTestProfile(profileName1, fixtureconsts.ComplianceProfileID1, testconsts.Cluster1) + profile2 := getTestProfile(profileName2, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) + + // Define Profiles 1 and 2 in Cluster 2 + profile3 := getTestProfile(profileName1, fixtureconsts.ComplianceProfileID1, testconsts.Cluster2) + profile4 := getTestProfile(profileName2, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) + + // Define ScanConfiguration + scanConfigName := "scanConfig1" + scanConfig := getTestScanConfig(scanConfigName, + fixtureconsts.ComplianceScanConfigID1, + []string{testconsts.Cluster1, testconsts.Cluster2}, + []string{profileName1, profileName2}) + + s.T().Cleanup(func() { + _, _ = scanConfigDS.DeleteScanConfiguration(s.hasWriteCtx, fixtureconsts.ComplianceScanConfigID1) + _ = profileDS.DeleteProfilesByCluster(s.hasWriteCtx, fixtureconsts.Cluster1) + _ = profileDS.DeleteProfilesByCluster(s.hasWriteCtx, fixtureconsts.Cluster2) + }) + + // Create Profiles 1 and 2 in Cluster 1 + s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile1)) + s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile2)) + + // Create Profiles 1 and 2 in Cluster 2 + s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile3)) + s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile4)) + + // Create ScanConfiguration + s.Require().NoError(scanConfigDS.UpsertScanConfiguration(s.hasWriteCtx, scanConfig)) + + // make sure we have nothing + ScanIDs, err := s.storage.GetIDs(s.hasReadCtx) + s.Require().NoError(err) + s.Require().Empty(ScanIDs) + + scanName1 := "ocp4-cis" + scanName2 := "ocp4-cis-node-worker" + scanName3 := "ocp4-cis-node-master" + + // Define Scan 1, 2, and 3 for Cluster 1 + testScan1 := getTestScanWithScanConfig(scanName1, scanConfigName, fixtureconsts.ComplianceProfileID1, testconsts.Cluster1) + testScan2 := getTestScanWithScanConfig(scanName2, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) + testScan3 := getTestScanWithScanConfig(scanName3, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) + + // Define Scan 1, 2, and 3 for Cluster 2 + testScan4 := getTestScanWithScanConfig(scanName1, scanConfigName, fixtureconsts.ComplianceProfileID1, testconsts.Cluster2) + testScan5 := getTestScanWithScanConfig(scanName2, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) + testScan6 := getTestScanWithScanConfig(scanName3, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) + + s.T().Cleanup(func() { + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan1.GetId()) + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan2.GetId()) + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan3.GetId()) + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan4.GetId()) + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan5.GetId()) + _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan6.GetId()) + }) + + // Create Scans + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan1)) + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan2)) + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan3)) + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan4)) + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan5)) + s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan6)) + + s.Run("get scan to profiles from cluster 1", func() { + scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + + s.Assert().Contains(scanToProfileMap, scanName1) + s.Assert().Contains(scanToProfileMap, scanName2) + s.Assert().Contains(scanToProfileMap, scanName3) + + s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) + s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) + s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) + }) + + s.Run("get scan to profiles from cluster 2", func() { + scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster2) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + + s.Assert().Contains(scanToProfileMap, scanName1) + s.Assert().Contains(scanToProfileMap, scanName2) + s.Assert().Contains(scanToProfileMap, scanName3) + + s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) + s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) + s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) + }) + + s.Run("get empty scan to profiles (wrong cluster)", func() { + scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.WrongCluster) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + s.Assert().Len(scanToProfileMap, 0) + }) + + s.Run("get empty scan to profiles (wrong scan config)", func() { + scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID2, testconsts.Cluster1) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + s.Assert().Len(scanToProfileMap, 0) + }) + + s.Run("get scan to profiles with profile ref 1", func() { + scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID1}) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + + s.Assert().Contains(scanToProfileMap, scanName1) + s.Assert().NotContains(scanToProfileMap, scanName2) + s.Assert().NotContains(scanToProfileMap, scanName3) + + s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) + s.Assert().Len(scanToProfileMap[scanName2], 0) + s.Assert().Len(scanToProfileMap[scanName3], 0) + }) + + s.Run("get scan to profiles with profile ref 2", func() { + scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID2}) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + + s.Assert().NotContains(scanToProfileMap, scanName1) + s.Assert().Contains(scanToProfileMap, scanName2) + s.Assert().Contains(scanToProfileMap, scanName3) + + s.Assert().Len(scanToProfileMap[scanName1], 0) + s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) + s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) + }) + + s.Run("get scan to profiles with profile ref 1 and 2", func() { + scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID1, fixtureconsts.ComplianceProfileID2}) + s.Require().NoError(err) + s.Require().NotNil(scanToProfileMap) + + s.Assert().Contains(scanToProfileMap, scanName1) + s.Assert().Contains(scanToProfileMap, scanName2) + s.Assert().Contains(scanToProfileMap, scanName3) + + s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) + s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) + s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) + }) +} + func getTestScan(scanName string, profileID string, clusterID string) *storage.ComplianceOperatorScanV2 { + return getTestScanWithScanConfig(scanName, scanName, profileID, clusterID) +} + +func getTestScanWithScanConfig(scanName string, scanConfig string, profileID string, clusterID string) *storage.ComplianceOperatorScanV2 { return &storage.ComplianceOperatorScanV2{ Id: uuid.NewV4().String(), - ScanConfigName: scanName, + ScanConfigName: scanConfig, ScanName: scanName, ClusterId: clusterID, Errors: "", @@ -295,3 +467,37 @@ func getTestScan(scanName string, profileID string, clusterID string) *storage.C LastExecutedTime: protocompat.TimestampNow(), } } + +func getTestProfile(profileName string, profileRefID string, clusterID string) *storage.ComplianceOperatorProfileV2 { + return &storage.ComplianceOperatorProfileV2{ + Id: uuid.NewV4().String(), + Name: profileName, + ProfileRefId: profileRefID, + ClusterId: clusterID, + } +} + +func getTestScanConfig(scanConfigName string, scanConfigID string, clusterID []string, profiles []string) *storage.ComplianceOperatorScanConfigurationV2 { + return &storage.ComplianceOperatorScanConfigurationV2{ + Id: scanConfigID, + ScanConfigName: scanConfigName, + Profiles: func() []*storage.ComplianceOperatorScanConfigurationV2_ProfileName { + var ret []*storage.ComplianceOperatorScanConfigurationV2_ProfileName + for _, profile := range profiles { + ret = append(ret, &storage.ComplianceOperatorScanConfigurationV2_ProfileName{ + ProfileName: profile, + }) + } + return ret + }(), + Clusters: func() []*storage.ComplianceOperatorScanConfigurationV2_Cluster { + var ret []*storage.ComplianceOperatorScanConfigurationV2_Cluster + for _, cluster := range clusterID { + ret = append(ret, &storage.ComplianceOperatorScanConfigurationV2_Cluster{ + ClusterId: cluster, + }) + } + return ret + }(), + } +} diff --git a/central/complianceoperator/v2/scans/datastore/mocks/datastore.go b/central/complianceoperator/v2/scans/datastore/mocks/datastore.go index fab52feb0f8e7..9b4fabcb4567d 100644 --- a/central/complianceoperator/v2/scans/datastore/mocks/datastore.go +++ b/central/complianceoperator/v2/scans/datastore/mocks/datastore.go @@ -70,6 +70,36 @@ func (mr *MockDataStoreMockRecorder) DeleteScanByCluster(ctx, clusterID any) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScanByCluster", reflect.TypeOf((*MockDataStore)(nil).DeleteScanByCluster), ctx, clusterID) } +// GetProfileScanNamesByScanConfigClusterAndProfileRef mocks base method. +func (m *MockDataStore) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfig, clusterID string, profileRefs []string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProfileScanNamesByScanConfigClusterAndProfileRef", ctx, scanConfig, clusterID, profileRefs) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProfileScanNamesByScanConfigClusterAndProfileRef indicates an expected call of GetProfileScanNamesByScanConfigClusterAndProfileRef. +func (mr *MockDataStoreMockRecorder) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfig, clusterID, profileRefs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProfileScanNamesByScanConfigClusterAndProfileRef", reflect.TypeOf((*MockDataStore)(nil).GetProfileScanNamesByScanConfigClusterAndProfileRef), ctx, scanConfig, clusterID, profileRefs) +} + +// GetProfilesScanNamesByScanConfigAndCluster mocks base method. +func (m *MockDataStore) GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProfilesScanNamesByScanConfigAndCluster", ctx, scanConfigID, clusterID) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProfilesScanNamesByScanConfigAndCluster indicates an expected call of GetProfilesScanNamesByScanConfigAndCluster. +func (mr *MockDataStoreMockRecorder) GetProfilesScanNamesByScanConfigAndCluster(ctx, scanConfigID, clusterID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProfilesScanNamesByScanConfigAndCluster", reflect.TypeOf((*MockDataStore)(nil).GetProfilesScanNamesByScanConfigAndCluster), ctx, scanConfigID, clusterID) +} + // GetScan mocks base method. func (m *MockDataStore) GetScan(ctx context.Context, id string) (*storage.ComplianceOperatorScanV2, bool, error) { m.ctrl.T.Helper() diff --git a/central/complianceoperator/v2/scans/datastore/singleton.go b/central/complianceoperator/v2/scans/datastore/singleton.go index 0766482003777..115c800036ab0 100644 --- a/central/complianceoperator/v2/scans/datastore/singleton.go +++ b/central/complianceoperator/v2/scans/datastore/singleton.go @@ -14,7 +14,7 @@ var ( ) func initialize() { - dataStore = New(pgStore.New(globaldb.GetPostgres())) + dataStore = New(pgStore.New(globaldb.GetPostgres()), globaldb.GetPostgres()) } // Singleton provides the interface for non-service external interaction. diff --git a/pkg/fixtures/fixtureconsts/fixture_consts.go b/pkg/fixtures/fixtureconsts/fixture_consts.go index d3f15ad012e82..55b0b90a8934f 100644 --- a/pkg/fixtures/fixtureconsts/fixture_consts.go +++ b/pkg/fixtures/fixtureconsts/fixture_consts.go @@ -12,6 +12,8 @@ const ( Cluster3 = "caaaaaaa-bbbb-4011-0000-333333333333" ClusterFake1 = "caaaaaaa-bbbb-4011-9999-111111111111" ClusterFake2 = "caaaaaaa-bbbb-4011-9999-222222222222" + ComplianceProfileID1 = "caaaaaaa-bbbb-4011-1111-111111111111" + ComplianceProfileID2 = "caaaaaaa-bbbb-4011-2222-222222222222" ComplianceScanConfigID1 = "caaaaaaa-bbbb-4011-1111-111111111111" ComplianceScanConfigID2 = "caaaaaaa-bbbb-4011-2222-222222222222" ComplianceScanConfigID3 = "caaaaaaa-bbbb-4011-3333-333333333333" From 87daaefb19d5ba184d95d2f04689a3bc6b9a0b06 Mon Sep 17 00:00:00 2001 From: lvalerom Date: Mon, 26 May 2025 18:14:14 +0200 Subject: [PATCH 2/7] style --- .../v2/report/manager/format/formatter.go | 4 +- .../report/manager/format/formatter_test.go | 32 ++++----------- .../v2/report/manager/manager_impl.go | 8 ++-- .../results/results_aggregator_test.go | 39 +++++++++--------- .../v2/report/manager/watcher/validator.go | 17 ++++---- .../report/manager/watcher/validator_test.go | 41 ++++++++++--------- .../complianceoperator/v2/report/request.go | 9 ++-- 7 files changed, 72 insertions(+), 78 deletions(-) diff --git a/central/complianceoperator/v2/report/manager/format/formatter.go b/central/complianceoperator/v2/report/manager/format/formatter.go index e21193d156da8..fb79c0feb2dcc 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter.go +++ b/central/complianceoperator/v2/report/manager/format/formatter.go @@ -136,8 +136,8 @@ func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filena return err } csvWriter := f.newCSVWriter(failedClusterCSVHeader, true) - for _, reason := range failedCluster.GetReasons() { - csvWriter.AddValue(generateFailRecord(failedCluster.GetClusterId(), failedCluster.GetClusterName(), reason, failedCluster.GetOperatorVersion())) + for _, reason := range failedCluster.Reasons { + csvWriter.AddValue(generateFailRecord(failedCluster.ClusterId, failedCluster.ClusterName, reason, failedCluster.OperatorVersion)) } return csvWriter.WriteCSV(w) } diff --git a/central/complianceoperator/v2/report/manager/format/formatter_test.go b/central/complianceoperator/v2/report/manager/format/formatter_test.go index bcc53d26f468f..8897930dc6e0e 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter_test.go +++ b/central/complianceoperator/v2/report/manager/format/formatter_test.go @@ -76,7 +76,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste })).Times(2).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) })).Times(3) @@ -102,7 +102,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste })).Times(2).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) })).Times(3) @@ -141,7 +141,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFaile })).Times(3).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) || + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) || compareStringSlice(s.T(), target, generateRecord(results[clusterID2][0])) @@ -202,7 +202,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.GetClusterId(), failedInfo.GetClusterName(), failedInfo.GetReasons()[0], failedInfo.GetOperatorVersion())) + return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) })).Times(1) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) @@ -306,29 +306,15 @@ func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[s ClusterId: "test_cluster-2-id", Profiles: []string{"test_profile-1", "test_profile-2"}, } - clusterData[clusterID2].FailedInfo = &report.FailedCluster{} - clusterData[clusterID2].FailedInfo.Reasons = []string{"timeout"} - clusterData[clusterID2].FailedInfo.Profiles = []string{"test_profile-1", "test_profile-2"} - clusterData[clusterID2].FailedInfo.OperatorVersion = "v1.6.0" + clusterData[clusterID2].FailedInfo = &report.FailedCluster{ + Reasons: []string{"timeout"}, + OperatorVersion: "v1.6.0", + Profiles: []string{"test_profile-1", "test_profile-2"}, + } results := getFakeReportData() return results, clusterData } -func getFakeReportDataOnlyFailedCluster() (map[string][]*report.ResultRow, map[string]*report.ClusterData) { - failedClusters := make(map[string]*report.ClusterData) - failedClusters[clusterID2] = &report.ClusterData{ - ClusterName: "test_cluster-2", - ClusterId: "test_cluster-2-id", - Profiles: []string{"test_profile-1", "test_profile-2"}, - } - failedClusters[clusterID2].FailedInfo = &report.FailedCluster{} - failedClusters[clusterID2].FailedInfo.Reasons = []string{"timeout"} - failedClusters[clusterID2].FailedInfo.Profiles = []string{"test_profile-1", "test_profile-2"} - failedClusters[clusterID2].FailedInfo.OperatorVersion = "v1.6.0" - results := make(map[string][]*report.ResultRow) - return results, failedClusters -} - func getFakeClusterData() map[string]*report.ClusterData { ret := make(map[string]*report.ClusterData) ret[clusterID1] = &report.ClusterData{ diff --git a/central/complianceoperator/v2/report/manager/manager_impl.go b/central/complianceoperator/v2/report/manager/manager_impl.go index 6c6e918e6e32f..f0d0ec2aaedc0 100644 --- a/central/complianceoperator/v2/report/manager/manager_impl.go +++ b/central/complianceoperator/v2/report/manager/manager_impl.go @@ -580,10 +580,10 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) for _, failedCluster := range failedClusters { failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: failedCluster.GetClusterId(), - ClusterName: failedCluster.GetClusterName(), - OperatorVersion: failedCluster.GetOperatorVersion(), - Reasons: failedCluster.GetReasons(), + ClusterId: failedCluster.ClusterId, + ClusterName: failedCluster.ClusterName, + OperatorVersion: failedCluster.OperatorVersion, + Reasons: failedCluster.Reasons, }) } snapshot.FailedClusters = failedClustersSlice diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go index cb78a10d29947..2dae6140f9d88 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go @@ -287,10 +287,10 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataGetClusterDataError s.Assert().Equal(clusterID, data.ClusterName) if _, ok := tCase.expectFailedClusters[clusterID]; ok { s.Require().NotNil(data.FailedInfo) - s.Assert().Equal(clusterID, data.FailedInfo.GetClusterId()) - s.Assert().Equal(clusterID, data.FailedInfo.GetClusterName()) - s.Assert().Equal([]string{"timeout"}, data.FailedInfo.GetReasons()) - s.Assert().Equal("v1.6.0", data.FailedInfo.GetOperatorVersion()) + s.Assert().Equal(clusterID, data.FailedInfo.ClusterId) + s.Assert().Equal(clusterID, data.FailedInfo.ClusterName) + s.Assert().Equal([]string{"timeout"}, data.FailedInfo.Reasons) + s.Assert().Equal("v1.6.0", data.FailedInfo.OperatorVersion) for scan, profile := range scanToProfilesMap { s.Assert().Contains(data.FailedInfo.Profiles, profile) scanFound := false @@ -660,21 +660,22 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters for i := numClusters; i < numFailedClusters+numClusters; i++ { id := fmt.Sprintf("cluster-%d", i) ret.ClusterIDs = append(ret.ClusterIDs, id) - failedInfo := &report.FailedCluster{} - failedInfo.ClusterId = id - failedInfo.ClusterName = id - failedInfo.Profiles = clusterData[id].Profiles - failedInfo.Scans = func() []*storage.ComplianceOperatorScanV2 { - var scans []*storage.ComplianceOperatorScanV2 - for _, scanName := range clusterData[id].Scans { - scans = append(scans, &storage.ComplianceOperatorScanV2{ - ScanName: scanName, - }) - } - return scans - }() - failedInfo.Reasons = []string{"timeout"} - failedInfo.OperatorVersion = "v1.6.0" + failedInfo := &report.FailedCluster{ + ClusterId: id, + ClusterName: id, + Reasons: []string{"timeout"}, + OperatorVersion: "v1.6.0", + Profiles: clusterData[id].Profiles, + Scans: func() []*storage.ComplianceOperatorScanV2 { + var scans []*storage.ComplianceOperatorScanV2 + for _, scanName := range clusterData[id].Scans { + scans = append(scans, &storage.ComplianceOperatorScanV2{ + ScanName: scanName, + }) + } + return scans + }(), + } clusterData[id].FailedInfo = failedInfo } ret.FailedClusters = numFailedClusters diff --git a/central/complianceoperator/v2/report/manager/watcher/validator.go b/central/complianceoperator/v2/report/manager/watcher/validator.go index 9271e62407ca6..86df7cd0871b3 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator.go @@ -23,13 +23,13 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe if failedClusterInfo == nil { continue } - errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.GetClusterId()))) - if previousFailedInfo, ok := failedClusters[failedClusterInfo.GetClusterId()]; ok && !isInstallationError { - previousFailedInfo.Reasons = append(previousFailedInfo.GetReasons(), failedClusterInfo.GetReasons()...) + errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.ClusterId))) + if previousFailedInfo, ok := failedClusters[failedClusterInfo.ClusterId]; ok && !isInstallationError { + previousFailedInfo.Reasons = append(previousFailedInfo.Reasons, failedClusterInfo.Reasons...) previousFailedInfo.Scans = append(previousFailedInfo.Scans, failedClusterInfo.Scans...) continue } - failedClusters[failedClusterInfo.GetClusterId()] = failedClusterInfo + failedClusters[failedClusterInfo.ClusterId] = failedClusterInfo } // If we have less results than the number of clusters*profiles in the scan configuration, @@ -40,11 +40,11 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe continue } clusterInfo := ValidateClusterHealth(ctx, cluster.GetClusterId(), integrationDataStore) - errList.AddError(errors.New(fmt.Sprintf("cluster %s failed", clusterInfo.GetClusterId()))) + errList.AddError(errors.New(fmt.Sprintf("cluster %s failed", clusterInfo.ClusterId))) if len(clusterInfo.Reasons) == 0 { clusterInfo.Reasons = []string{report.INTERNAL_ERROR} } - failedClusters[clusterInfo.GetClusterId()] = clusterInfo + failedClusters[clusterInfo.ClusterId] = clusterInfo } } if results.Error != nil && errors.Is(results.Error, ErrScanConfigTimeout) { @@ -87,8 +87,9 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ // ValidateClusterHealth returns the health status of the Compliance Operator Integration func ValidateClusterHealth(ctx context.Context, clusterID string, integrationDataStore complianceIntegrationDS.DataStore) *report.FailedCluster { - ret := &report.FailedCluster{} - ret.ClusterId = clusterID + ret := &report.FailedCluster{ + ClusterId: clusterID, + } coStatus, err := IsComplianceOperatorHealthy(ctx, clusterID, integrationDataStore) if errors.Is(err, ErrComplianceOperatorIntegrationDataStore) || errors.Is(err, ErrComplianceOperatorIntegrationZeroIntegrations) { ret.Reasons = []string{report.INTERNAL_ERROR} diff --git a/central/complianceoperator/v2/report/manager/watcher/validator_test.go b/central/complianceoperator/v2/report/manager/watcher/validator_test.go index f9db02cb53df1..ec570f51b5cfc 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator_test.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator_test.go @@ -380,10 +380,10 @@ func TestValidateClusterHealth(t *testing.T) { Return(tCase.operatorStatus, tCase.expectDSError) res := ValidateClusterHealth(ctx, clusterID, coIntegrationDS) require.NotNil(tt, res) - assert.Equal(tt, clusterID, res.GetClusterId()) - assert.Equal(tt, tCase.expectedReason, res.GetReasons()) + assert.Equal(tt, clusterID, res.ClusterId) + assert.Equal(tt, tCase.expectedReason, res.Reasons) if len(tCase.operatorStatus) > 0 { - assert.Equal(tt, tCase.operatorStatus[0].GetVersion(), res.GetOperatorVersion()) + assert.Equal(tt, tCase.operatorStatus[0].GetVersion(), res.OperatorVersion) } }) } @@ -449,10 +449,11 @@ func getFailedClusters(idx, numFailedClusters, numMissingClusters, numScans int) ret := make(map[string]*report.FailedCluster) for i := idx; i < idx+numFailedClusters; i++ { id := fmt.Sprintf("cluster-%d", i) - failedCluster := &report.FailedCluster{} - failedCluster.ClusterId = id - failedCluster.OperatorVersion = minimumComplianceOperatorVersion - failedCluster.Reasons = []string{report.INTERNAL_ERROR} + failedCluster := &report.FailedCluster{ + ClusterId: id, + OperatorVersion: minimumComplianceOperatorVersion, + Reasons: []string{report.INTERNAL_ERROR}, + } ret[id] = failedCluster var reasons []string for j := 0; j < numScans; j++ { @@ -465,20 +466,22 @@ func getFailedClusters(idx, numFailedClusters, numMissingClusters, numScans int) } for i := idx + numFailedClusters; i < idx+numFailedClusters+numMissingClusters; i++ { id := fmt.Sprintf("cluster-%d", i) - failedCluster := &report.FailedCluster{} - failedCluster.ClusterId = id - failedCluster.OperatorVersion = minimumComplianceOperatorVersion - failedCluster.Reasons = []string{report.INTERNAL_ERROR} + failedCluster := &report.FailedCluster{ + ClusterId: id, + OperatorVersion: minimumComplianceOperatorVersion, + Reasons: []string{report.INTERNAL_ERROR}, + } ret[id] = failedCluster } return ret } func newFailedCluster(clusterID, coVersion string, reasons []string, expectScan bool) *report.FailedCluster { - ret := &report.FailedCluster{} - ret.ClusterId = clusterID - ret.OperatorVersion = coVersion - ret.Reasons = reasons + ret := &report.FailedCluster{ + ClusterId: clusterID, + OperatorVersion: coVersion, + Reasons: reasons, + } if expectScan { ret.Scans = []*storage.ComplianceOperatorScanV2{ { @@ -494,10 +497,10 @@ func assertFailedCluster(t *testing.T, expected, actual *report.FailedCluster) { if expected == nil && actual == nil { return } - assert.Equal(t, expected.GetClusterId(), actual.GetClusterId()) - assert.Equal(t, expected.GetClusterName(), actual.GetClusterName()) - assert.Equal(t, expected.GetOperatorVersion(), actual.GetOperatorVersion()) - assert.Equal(t, expected.GetReasons(), actual.GetReasons()) + assert.Equal(t, expected.ClusterId, actual.ClusterId) + assert.Equal(t, expected.ClusterName, actual.ClusterName) + assert.Equal(t, expected.OperatorVersion, actual.OperatorVersion) + assert.Equal(t, expected.Reasons, actual.Reasons) assert.Equal(t, len(expected.Scans), len(actual.Scans)) for _, expectedScan := range expected.Scans { found := false diff --git a/central/complianceoperator/v2/report/request.go b/central/complianceoperator/v2/report/request.go index 763f7865541b8..153abc5786db6 100644 --- a/central/complianceoperator/v2/report/request.go +++ b/central/complianceoperator/v2/report/request.go @@ -31,7 +31,10 @@ type ClusterData struct { // FailedCluster holds the information of a failed cluster type FailedCluster struct { - storage.ComplianceOperatorReportSnapshotV2_FailedCluster - Scans []*storage.ComplianceOperatorScanV2 - Profiles []string + ClusterId string + ClusterName string + Reasons []string + OperatorVersion string + Scans []*storage.ComplianceOperatorScanV2 + Profiles []string } From 3940915610cb397834eb59e8d3dacb9434c83b31 Mon Sep 17 00:00:00 2001 From: lvalerom Date: Fri, 30 May 2025 01:54:39 +0200 Subject: [PATCH 3/7] fix on demand report generation with cluster data refactors --- .../v2/report/datastore/datastore.go | 3 + .../v2/report/datastore/datastore_impl.go | 17 + .../report/datastore/datastore_impl_test.go | 30 ++ .../v2/report/datastore/mocks/datastore.go | 15 + .../manager/generator/report_gen_impl.go | 6 +- .../manager/generator/report_gen_impl_test.go | 4 +- .../v2/report/manager/helpers/clusterdata.go | 116 +++++++ .../manager/helpers/clusterdata_test.go | 291 ++++++++++++++++++ .../v2/report/manager/manager_impl.go | 83 +++-- .../v2/report/manager/manager_test.go | 34 +- .../manager/results/results_aggregator.go | 52 +--- .../results/results_aggregator_test.go | 221 +------------ .../complianceoperator/v2/report/request.go | 2 +- .../storage/compliance_operator_v2.pb.go | 24 +- .../compliance_operator_v2_vtproto.pb.go | 194 ++++++++++++ proto/storage/compliance_operator_v2.proto | 2 + 16 files changed, 784 insertions(+), 310 deletions(-) create mode 100644 central/complianceoperator/v2/report/manager/helpers/clusterdata.go create mode 100644 central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go diff --git a/central/complianceoperator/v2/report/datastore/datastore.go b/central/complianceoperator/v2/report/datastore/datastore.go index 56d4c4e1f70eb..45ab196311e63 100644 --- a/central/complianceoperator/v2/report/datastore/datastore.go +++ b/central/complianceoperator/v2/report/datastore/datastore.go @@ -24,6 +24,9 @@ type DataStore interface { // DeleteSnapshot removes a report snapshot object from the database DeleteSnapshot(ctx context.Context, id string) error + + // GetLastSnapshotFromScanConfig returns the last snapshot associated with a ScanConfiguration + GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) } // New returns an instance of DataStore. diff --git a/central/complianceoperator/v2/report/datastore/datastore_impl.go b/central/complianceoperator/v2/report/datastore/datastore_impl.go index e4f3839a6f3d2..ab83cb6bdcb30 100644 --- a/central/complianceoperator/v2/report/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/report/datastore/datastore_impl.go @@ -8,6 +8,7 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/errorhelpers" + types "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/search" ) @@ -75,3 +76,19 @@ func deleteOrphanedSnapshots(ctx context.Context, ds DataStore, query *v1.Query) } return errList.ToError() } + +func (d *datastoreImpl) GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) { + query := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID).ProtoQuery() + snapshots, err := d.SearchSnapshots(ctx, query) + if err != nil { + return nil, err + } + var lastSnapshot *storage.ComplianceOperatorReportSnapshotV2 + for _, snapshot := range snapshots { + if types.CompareTimestamps(snapshot.GetReportStatus().GetCompletedAt(), lastSnapshot.GetReportStatus().GetCompletedAt()) > 0 { + lastSnapshot = snapshot + } + } + return lastSnapshot, nil +} diff --git a/central/complianceoperator/v2/report/datastore/datastore_impl_test.go b/central/complianceoperator/v2/report/datastore/datastore_impl_test.go index 12c8c6a028b55..0fd841377480c 100644 --- a/central/complianceoperator/v2/report/datastore/datastore_impl_test.go +++ b/central/complianceoperator/v2/report/datastore/datastore_impl_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "testing" + "time" "github.com/google/uuid" reportStorage "github.com/stackrox/rox/central/complianceoperator/v2/report/store/postgres" @@ -13,6 +14,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" @@ -255,6 +257,34 @@ func (s *complianceReportSnapshotDataStoreSuite) TestDeleteOrphaned() { } } +func (s *complianceReportSnapshotDataStoreSuite) TestGetLastSnapshot() { + // make sure we have nothing + reportIDs, err := s.storage.GetIDs(s.hasReadCtx) + s.Require().NoError(err) + s.Require().Empty(reportIDs) + + timeNow := time.Now() + oldTime := timeNow.Add(-time.Hour) + timestampNow, err := protocompat.ConvertTimeToTimestampOrError(timeNow) + s.Require().NoError(err) + oldTimestamp, err := protocompat.ConvertTimeToTimestampOrError(oldTime) + s.Require().NoError(err) + + status1 := getStatus(storage.ComplianceOperatorReportStatus_PREPARING, oldTimestamp, timestampNow, "", storage.ComplianceOperatorReportStatus_SCHEDULED, storage.ComplianceOperatorReportStatus_EMAIL) + status2 := getStatus(storage.ComplianceOperatorReportStatus_PREPARING, oldTimestamp, oldTimestamp, "", storage.ComplianceOperatorReportStatus_SCHEDULED, storage.ComplianceOperatorReportStatus_EMAIL) + user := getUser("u-1", "user-1") + reports := []*storage.ComplianceOperatorReportSnapshotV2{ + getTestReport(uuidStub1, uuidScanConfigStub1, status1, user), + getTestReport(uuidStub2, uuidScanConfigStub1, status2, user), + } + for _, r := range reports { + s.Require().NoError(s.storage.Upsert(s.hasWriteCtx, r)) + } + snapshot, err := s.datastore.GetLastSnapshotFromScanConfig(s.hasReadCtx, uuidScanConfigStub1) + s.Assert().NoError(err) + s.Assert().Equal(uuidStub1, snapshot.GetReportId()) +} + func getTestReport(id string, scanConfigID string, status *storage.ComplianceOperatorReportStatus, user *storage.SlimUser) *storage.ComplianceOperatorReportSnapshotV2 { return &storage.ComplianceOperatorReportSnapshotV2{ ReportId: id, diff --git a/central/complianceoperator/v2/report/datastore/mocks/datastore.go b/central/complianceoperator/v2/report/datastore/mocks/datastore.go index 4a041004a0278..98b0f601a9ff6 100644 --- a/central/complianceoperator/v2/report/datastore/mocks/datastore.go +++ b/central/complianceoperator/v2/report/datastore/mocks/datastore.go @@ -56,6 +56,21 @@ func (mr *MockDataStoreMockRecorder) DeleteSnapshot(ctx, id any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockDataStore)(nil).DeleteSnapshot), ctx, id) } +// GetLastSnapshotFromScanConfig mocks base method. +func (m *MockDataStore) GetLastSnapshotFromScanConfig(ctx context.Context, scanConfigID string) (*storage.ComplianceOperatorReportSnapshotV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastSnapshotFromScanConfig", ctx, scanConfigID) + ret0, _ := ret[0].(*storage.ComplianceOperatorReportSnapshotV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLastSnapshotFromScanConfig indicates an expected call of GetLastSnapshotFromScanConfig. +func (mr *MockDataStoreMockRecorder) GetLastSnapshotFromScanConfig(ctx, scanConfigID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastSnapshotFromScanConfig", reflect.TypeOf((*MockDataStore)(nil).GetLastSnapshotFromScanConfig), ctx, scanConfigID) +} + // GetSnapshot mocks base method. func (m *MockDataStore) GetSnapshot(ctx context.Context, id string) (*storage.ComplianceOperatorReportSnapshotV2, bool, error) { m.ctrl.T.Helper() diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go index 6978dd1a450e6..5371d0546e1e7 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl.go @@ -73,7 +73,7 @@ type complianceReportGeneratorImpl struct { func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Request) error { - log.Infof("Processing report request %s", req) + log.Infof("Processing report request %s", req.ScanConfigID) var snapshot *storage.ComplianceOperatorReportSnapshotV2 if req.SnapshotID != "" { @@ -100,14 +100,14 @@ func (rg *complianceReportGeneratorImpl) ProcessReportRequest(req *report.Reques if snapshot != nil { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_GENERATED - if req.FailedClusters > 0 { + if req.NumFailedClusters > 0 { switch req.NotificationMethod { case storage.ComplianceOperatorReportStatus_EMAIL: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_EMAIL case storage.ComplianceOperatorReportStatus_DOWNLOAD: snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_DOWNLOAD } - if req.FailedClusters == len(req.ClusterIDs) { + if req.NumFailedClusters == len(req.ClusterIDs) { snapshot.GetReportStatus().RunState = storage.ComplianceOperatorReportStatus_FAILURE } } diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go index 5dea0397b923c..2f24ad58d90bf 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go @@ -221,7 +221,7 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { req.ClusterData["cluster-1"] = &report.ClusterData{ FailedInfo: &report.FailedCluster{}, } - req.FailedClusters = 2 + req.NumFailedClusters = 2 err := s.reportGen.ProcessReportRequest(req) s.Require().Error(err) s.Assert().Contains(err.Error(), errUnableToUpdateSnapshotOnGenerationSuccessStr) @@ -475,7 +475,7 @@ func newFakeRequestWithFailedCluster() *report.Request { FailedInfo: &report.FailedCluster{}, }, }, - FailedClusters: 1, + NumFailedClusters: 1, } } diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go new file mode 100644 index 0000000000000..88b1f4a2be326 --- /dev/null +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go @@ -0,0 +1,116 @@ +package helpers + +import ( + "context" + + "github.com/pkg/errors" + "github.com/stackrox/rox/central/complianceoperator/v2/report" + snapshotDS "github.com/stackrox/rox/central/complianceoperator/v2/report/datastore" + scanDS "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/search" + "golang.org/x/exp/maps" +) + +// GetFailedClusters returns the failed clusters metadata associated with a ScanConfiguration +func GetFailedClusters(ctx context.Context, scanConfigID string, snapshotStore snapshotDS.DataStore, scanStore scanDS.DataStore) (map[string]*report.FailedCluster, error) { + failedClusters := make(map[string]*report.FailedCluster) + prevSnapshot, err := snapshotStore.GetLastSnapshotFromScanConfig(ctx, scanConfigID) + if err != nil { + return nil, err + } + for _, failedCluster := range prevSnapshot.GetFailedClusters() { + scans, err := populateFailedScans(ctx, failedCluster.GetScans(), prevSnapshot.GetScans(), scanStore) + if err != nil { + return nil, err + } + failedClusters[failedCluster.GetClusterId()] = &report.FailedCluster{ + ClusterId: failedCluster.GetClusterId(), + ClusterName: failedCluster.GetClusterName(), + Reasons: failedCluster.GetReasons(), + OperatorVersion: failedCluster.GetOperatorVersion(), + Scans: scans, + Profiles: failedCluster.GetProfiles(), + } + } + return failedClusters, nil +} + +// GetClusterData returns the cluster metadata associated with a report data +func GetClusterData(ctx context.Context, reportData *storage.ComplianceOperatorReportData, failedClusters map[string]*report.FailedCluster, scanStore scanDS.DataStore) (map[string]*report.ClusterData, error) { + clusterData := make(map[string]*report.ClusterData) + for _, cluster := range reportData.GetClusterStatus() { + data := &report.ClusterData{ + ClusterId: cluster.GetClusterId(), + ClusterName: cluster.GetClusterName(), + } + data, err := populateScansAndProfiles(ctx, data, reportData, cluster.GetClusterId(), scanStore) + if err != nil { + return nil, err + } + if failedClusters == nil { + clusterData[cluster.GetClusterId()] = data + continue + } + if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { + failedCluster.ClusterName = cluster.GetClusterName() + data.FailedInfo = failedCluster + data, err = populateFailedScansAndProfiles(ctx, data, reportData, cluster.GetClusterId(), scanStore) + if err != nil { + return nil, err + } + } + clusterData[cluster.GetClusterId()] = data + } + return clusterData, nil +} + +func populateScansAndProfiles(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { + if data == nil { + return nil, errors.New("cannot populate scans and profiles of a nil ClusterData") + } + scanNamesToProfileNames, err := scanStore.GetProfilesScanNamesByScanConfigAndCluster(ctx, reportData.GetScanConfiguration().GetId(), clusterID) + if err != nil { + return nil, errors.Wrapf(err, "unable to retrieve profiles associated with the ScanConfiguration %q in the cluster %q", reportData.GetScanConfiguration().GetId(), clusterID) + } + data.Profiles = maps.Values(scanNamesToProfileNames) + data.Scans = maps.Keys(scanNamesToProfileNames) + return data, nil +} + +func populateFailedScansAndProfiles(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { + if data.FailedInfo == nil { + return nil, errors.New("cannot populate scans and profiles of a nil FailedInfo") + } + var profileRefIDs []string + for _, scan := range data.FailedInfo.Scans { + profileRefIDs = append(profileRefIDs, scan.GetProfile().GetProfileRefId()) + } + scanNameToProfileName, err := scanStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, reportData.GetScanConfiguration().GetId(), clusterID, profileRefIDs) + if err != nil { + return nil, err + } + var profileNames []string + for _, scan := range data.FailedInfo.Scans { + if profileName, ok := scanNameToProfileName[scan.GetScanName()]; ok { + profileNames = append(profileNames, profileName) + } + } + data.FailedInfo.Profiles = profileNames + return data, nil +} + +func populateFailedScans(ctx context.Context, failedScanNames []string, snapshotScans []*storage.ComplianceOperatorReportSnapshotV2_Scan, scanStore scanDS.DataStore) ([]*storage.ComplianceOperatorScanV2, error) { + scanRefIDs := make([]string, 0, len(snapshotScans)) + for _, scan := range snapshotScans { + scanRefIDs = append(scanRefIDs, scan.GetScanRefId()) + } + query := search.NewQueryBuilder(). + AddExactMatches(search.ComplianceOperatorScanName, failedScanNames...). + AddExactMatches(search.ComplianceOperatorScanResult, scanRefIDs...).ProtoQuery() + scans, err := scanStore.SearchScans(ctx, query) + if err != nil { + return nil, err + } + return scans, nil +} diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go new file mode 100644 index 0000000000000..7597abfbaf70c --- /dev/null +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go @@ -0,0 +1,291 @@ +package helpers + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stackrox/rox/central/complianceoperator/v2/report" + snapshotDS "github.com/stackrox/rox/central/complianceoperator/v2/report/datastore/mocks" + scanDS "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore/mocks" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestGetFailedClusters(t *testing.T) { + ctx := context.Background() + scanConfigID := "scan-config-id" + ctrl := gomock.NewController(t) + snapshotStore := snapshotDS.NewMockDataStore(ctrl) + scanStore := scanDS.NewMockDataStore(ctrl) + snapshot := &storage.ComplianceOperatorReportSnapshotV2{ + FailedClusters: []*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + { + ClusterId: "cluster-id", + ClusterName: "cluster-name", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + Scans: []string{"scan-2"}, + Profiles: []string{"profile-2"}, + }, + }, + Scans: []*storage.ComplianceOperatorReportSnapshotV2_Scan{ + { + ScanRefId: "scan-ref-id-1", + }, + { + ScanRefId: "scan-ref-id-2", + }, + }, + } + t.Run("failure retrieving snapshot from the store", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(nil, errors.New("some error")) + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.Error(tt, err) + assert.Nil(tt, failedClusters) + }) + t.Run("failure retrieving scans from the store", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(snapshot, nil) + scanStore.EXPECT().SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return(nil, errors.New("some error")) + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.Error(tt, err) + assert.Nil(tt, failedClusters) + }) + t.Run("populate failed clusters successfully", func(tt *testing.T) { + snapshotStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfigID)). + Times(1).Return(snapshot, nil) + scans := []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + ScanRefId: "scan-ref-id-2", + }, + } + scanStore.EXPECT().SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return(scans, nil) + expectedFailedClusters := map[string]*report.FailedCluster{ + "cluster-id": { + ClusterId: "cluster-id", + ClusterName: "cluster-name", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + Scans: scans, + Profiles: []string{"profile-2"}, + }, + } + failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) + assert.NoError(tt, err) + require.Len(tt, failedClusters, len(expectedFailedClusters)) + for clusterID, expectedCluster := range expectedFailedClusters { + actualCluster, ok := failedClusters[clusterID] + require.True(tt, ok) + assert.Equal(tt, expectedCluster.ClusterId, actualCluster.ClusterId) + assert.Equal(tt, expectedCluster.ClusterName, actualCluster.ClusterName) + assert.Equal(tt, expectedCluster.Reasons, actualCluster.Reasons) + assert.Equal(tt, expectedCluster.OperatorVersion, actualCluster.OperatorVersion) + protoassert.SlicesEqual(t, expectedCluster.Scans, actualCluster.Scans) + assert.Len(tt, actualCluster.Profiles, len(expectedCluster.Profiles)) + for _, profile := range expectedCluster.Profiles { + assert.Contains(tt, actualCluster.Profiles, profile) + } + } + }) +} + +func TestGetClusterData(t *testing.T) { + ctx := context.Background() + reportData := &storage.ComplianceOperatorReportData{ + ScanConfiguration: &storage.ComplianceOperatorScanConfigurationV2{ + Id: "scan-config-id", + }, + ClusterStatus: []*storage.ComplianceOperatorReportData_ClusterStatus{ + { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + }, + { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + }, + }, + } + failedClusters := map[string]*report.FailedCluster{ + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + Reasons: []string{"some reason"}, + OperatorVersion: "v1.6.0", + Scans: []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + Profile: &storage.ProfileShim{ + ProfileRefId: "profile-ref-id", + }, + }, + }, + }, + } + ctrl := gomock.NewController(t) + scanStore := scanDS.NewMockDataStore(ctrl) + t.Run("empty cluster status", func(tt *testing.T) { + clusterData, err := GetClusterData(ctx, nil, failedClusters, scanStore) + assert.NoError(tt, err) + assert.Len(tt, clusterData, 0) + }) + t.Run("failure querying the scan store", func(tt *testing.T) { + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). + Times(1).Return(nil, errors.New("some error")) + clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) + assert.Error(tt, err) + assert.Nil(tt, clusterData) + }) + t.Run("no failed clusters", func(tt *testing.T) { + gomock.InOrder( + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + ) + expectedClusterData := map[string]*report.ClusterData{ + "cluster-1": { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + Scans: []string{"scan-1", "scan-2"}, + Profiles: []string{"profile-1", "profile-2"}, + }, + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + Scans: []string{"scan-1", "scan-2"}, + Profiles: []string{"profile-1", "profile-2"}, + }, + } + clusterData, err := GetClusterData(ctx, reportData, nil, scanStore) + assert.NoError(tt, err) + assertClusterData(tt, expectedClusterData, clusterData) + }) + t.Run("failure querying for failed profiles", func(tt *testing.T) { + gomock.InOrder( + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + scanStore.EXPECT(). + GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2"), gomock.Eq([]string{"profile-ref-id"})). + Times(1).Return(nil, errors.New("some error")), + ) + clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) + assert.Error(tt, err) + assert.Nil(tt, clusterData) + }) + t.Run("with failed clusters", func(tt *testing.T) { + gomock.InOrder( + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + scanStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). + Times(1).Return(map[string]string{ + "scan-1": "profile-1", + "scan-2": "profile-2", + }, nil), + scanStore.EXPECT(). + GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2"), gomock.Eq([]string{"profile-ref-id"})). + Times(1).Return(map[string]string{ + "scan-2": "profile-2", + }, nil), + ) + expectedClusterData := map[string]*report.ClusterData{ + "cluster-1": { + ClusterId: "cluster-1", + ClusterName: "cluster-1", + Scans: []string{"scan-1", "scan-2"}, + Profiles: []string{"profile-1", "profile-2"}, + }, + "cluster-2": { + ClusterId: "cluster-2", + ClusterName: "cluster-2", + Scans: []string{"scan-1", "scan-2"}, + Profiles: []string{"profile-1", "profile-2"}, + FailedInfo: &report.FailedCluster{ + ClusterId: "cluster-2", + ClusterName: "cluster-2", + OperatorVersion: "v1.6.0", + Reasons: []string{"some reason"}, + Scans: []*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-2", + Profile: &storage.ProfileShim{ + ProfileRefId: "profile-ref-id", + }, + }, + }, + Profiles: []string{"profile-2"}, + }, + }, + } + clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) + assert.NoError(tt, err) + assertClusterData(tt, expectedClusterData, clusterData) + }) +} + +func assertClusterData(t *testing.T, expected map[string]*report.ClusterData, actual map[string]*report.ClusterData) { + assert.Len(t, actual, len(expected)) + for clusterID, expectedCluster := range expected { + actualCluster, ok := actual[clusterID] + require.True(t, ok) + assert.Equal(t, expectedCluster.ClusterId, actualCluster.ClusterId) + assert.Equal(t, expectedCluster.ClusterName, actualCluster.ClusterName) + assert.Len(t, actualCluster.Scans, len(expectedCluster.Scans)) + for _, scan := range expectedCluster.Scans { + assert.Contains(t, actualCluster.Scans, scan) + } + assert.Len(t, actualCluster.Profiles, len(expectedCluster.Profiles)) + for _, profile := range expectedCluster.Profiles { + assert.Contains(t, actualCluster.Profiles, profile) + } + if expectedCluster.FailedInfo != nil { + require.NotNil(t, actualCluster.FailedInfo) + assert.Equal(t, expectedCluster.FailedInfo.ClusterId, actualCluster.FailedInfo.ClusterId) + assert.Equal(t, expectedCluster.FailedInfo.ClusterName, actualCluster.FailedInfo.ClusterName) + assert.Equal(t, expectedCluster.FailedInfo.Reasons, actualCluster.FailedInfo.Reasons) + assert.Equal(t, expectedCluster.FailedInfo.OperatorVersion, actualCluster.FailedInfo.OperatorVersion) + protoassert.SlicesEqual(t, expectedCluster.FailedInfo.Scans, actualCluster.FailedInfo.Scans) + assert.Len(t, actualCluster.FailedInfo.Profiles, len(expectedCluster.FailedInfo.Profiles)) + for _, profile := range expectedCluster.FailedInfo.Profiles { + assert.Contains(t, actualCluster.FailedInfo.Profiles, profile) + } + } else { + assert.Nil(t, actualCluster.FailedInfo) + } + } +} diff --git a/central/complianceoperator/v2/report/manager/manager_impl.go b/central/complianceoperator/v2/report/manager/manager_impl.go index f0d0ec2aaedc0..5676edb9aea4e 100644 --- a/central/complianceoperator/v2/report/manager/manager_impl.go +++ b/central/complianceoperator/v2/report/manager/manager_impl.go @@ -47,7 +47,7 @@ type reportRequest struct { snapshotID string notificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod clusterData map[string]*report.ClusterData - failedClusters int + numFailedClusters int } type managerImpl struct { @@ -221,7 +221,7 @@ func (m *managerImpl) generateReportNoLock(req *reportRequest) { SnapshotID: req.snapshotID, NotificationMethod: req.notificationMethod, ClusterData: req.clusterData, - FailedClusters: req.failedClusters, + NumFailedClusters: req.numFailedClusters, } log.Infof("Executing report request for scan config %q", req.scanConfig.GetId()) if err := m.reportGen.ProcessReportRequest(repRequest); err != nil { @@ -300,6 +300,23 @@ func (m *managerImpl) handleReportRequest(request *reportRequest) (bool, error) return false, errors.Wrap(err, "unable to upsert snapshot on report preparation") } request.snapshotID = snapshot.GetReportId() + failedClusters, err := helpers.GetFailedClusters(m.automaticReportingCtx, request.scanConfig.GetId(), m.snapshotDataStore, m.scanDataStore) + if err != nil { + log.Warnf("unable to retrieve failed clusters: %v", err) + } + request.numFailedClusters = len(failedClusters) + request.clusterData, err = helpers.GetClusterData(m.automaticReportingCtx, snapshot.GetReportData(), failedClusters, m.scanDataStore) + if err != nil { + log.Errorf("unable to get clusters information: %v", err) + if dbErr := helpers.UpdateSnapshotOnError(request.ctx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { + return false, errors.Wrap(dbErr, "unable to upsert snapshot on generation failure") + } + } + // Add failed clusters to the report snapshot + if _, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot); err != nil { + log.Errorf("unable to updata snapshot with failed clusters: %v", err) + return false, err + } m.generateReportNoLock(request) return true, nil } @@ -563,40 +580,25 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca log.Infof("Snapshot for ScanConfig %s: %+v -- %+v", result.ScanConfig.GetScanConfigName(), snapshot.GetReportStatus(), snapshot.GetFailedClusters()) // Update ReportData snapshot.ReportData = m.getReportData(result.ScanConfig) - // Add failed clusters to the report - clusterData := make(map[string]*report.ClusterData) - for _, cluster := range snapshot.ReportData.GetClusterStatus() { - data := &report.ClusterData{ - ClusterId: cluster.GetClusterId(), - ClusterName: cluster.GetClusterName(), - } - if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { - failedCluster.ClusterName = cluster.GetClusterName() - data.FailedInfo = failedCluster - } - clusterData[cluster.GetClusterId()] = data - } - if len(failedClusters) > 0 { - failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) - for _, failedCluster := range failedClusters { - failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: failedCluster.ClusterId, - ClusterName: failedCluster.ClusterName, - OperatorVersion: failedCluster.OperatorVersion, - Reasons: failedCluster.Reasons, - }) + // Populate ClusterData + clusterData, err := helpers.GetClusterData(m.automaticReportingCtx, snapshot.ReportData, failedClusters, m.scanDataStore) + if err != nil { + log.Errorf("unable to populate cluster data: %v", err) + if dbErr := helpers.UpdateSnapshotOnError(m.automaticReportingCtx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { + return errors.Wrap(dbErr, "unable to update snapshot on populate cluster data error") } - snapshot.FailedClusters = failedClustersSlice } - if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { - return errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) + // Add failed clusters to the report snapshot + snapshot, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot) + if err != nil { + return err } generateReportReq := &reportRequest{ ctx: m.automaticReportingCtx, scanConfig: result.ScanConfig, snapshotID: snapshot.GetReportId(), notificationMethod: snapshot.GetReportStatus().GetReportNotificationMethod(), - failedClusters: len(failedClusters), + numFailedClusters: len(failedClusters), clusterData: clusterData, } isOnDemand := snapshot.GetReportStatus().GetReportRequestType() == storage.ComplianceOperatorReportStatus_ON_DEMAND @@ -606,6 +608,31 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca return nil } +func (m *managerImpl) addFailedClustersToTheSnapshot(failedClusters map[string]*report.FailedCluster, snapshot *storage.ComplianceOperatorReportSnapshotV2) (*storage.ComplianceOperatorReportSnapshotV2, error) { + if len(failedClusters) > 0 { + failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) + for _, failedCluster := range failedClusters { + scans := make([]string, 0, len(failedCluster.Scans)) + for _, scan := range failedCluster.Scans { + scans = append(scans, scan.GetScanName()) + } + failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + ClusterId: failedCluster.ClusterId, + ClusterName: failedCluster.ClusterName, + OperatorVersion: failedCluster.OperatorVersion, + Reasons: failedCluster.Reasons, + Scans: scans, + Profiles: failedCluster.Profiles, + }) + } + snapshot.FailedClusters = failedClustersSlice + if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { + return snapshot, errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) + } + } + return snapshot, nil +} + func (m *managerImpl) handleReportScheduled(request *reportRequest, isOnDemand bool) error { if err := m.concurrencySem.Acquire(context.Background(), 1); err != nil { return errors.Wrap(err, "Error acquiring semaphore to run new report") diff --git a/central/complianceoperator/v2/report/manager/manager_test.go b/central/complianceoperator/v2/report/manager/manager_test.go index 1a74b6f3cd8c6..a6659a5e78b2b 100644 --- a/central/complianceoperator/v2/report/manager/manager_test.go +++ b/central/complianceoperator/v2/report/manager/manager_test.go @@ -104,6 +104,7 @@ func (m *ManagerTestSuite) TestHandleReportRequest() { m.Run("Successful report, no watchers running", func() { manager := New(m.scanConfigDataStore, m.scanDataStore, m.profileDataStore, m.snapshotDataStore, m.complianceIntegrationDataStore, m.suiteDataStore, m.bindingsDataStore, m.checkResultDataStore, m.reportGen) manager.Start() + scanConfig := getTestScanConfig() wg := concurrency.NewWaitGroup(1) m.snapshotDataStore.EXPECT().UpsertSnapshot(gomock.Any(), gomock.Any()).Times(1). Return(nil) @@ -112,6 +113,13 @@ func (m *ManagerTestSuite) TestHandleReportRequest() { wg.Add(-1) return nil }) + m.snapshotDataStore.EXPECT(). + GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfig.GetId())). + Times(1).Return(nil, nil) + m.scanDataStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(scanConfig.GetId()), gomock.Cond[string](func(id string) bool { + return id == "cluster-1" || id == "cluster-2" + })).Times(len(scanConfig.GetClusters())).Return(getScanToProfileMap(len(scanConfig.GetProfiles())), nil) err := manager.SubmitReportRequest(ctx, getTestScanConfig(), storage.ComplianceOperatorReportStatus_EMAIL) m.Require().NoError(err) handleWaitGroup(m.T(), &wg, 10*time.Millisecond, "report generation") @@ -610,10 +618,10 @@ func (m *ManagerTestSuite) setupExpectCallsFromFinishAllScans(sc *storage.Compli m.scanDataStore.EXPECT(). SearchScans(gomock.Any(), gomock.Any()). Times(1).Return(allScans, nil), - // Upsert Snapshots - m.snapshotDataStore.EXPECT(). - UpsertSnapshot(gomock.Any(), gomock.Any()). - Times(numSnapshots).Return(nil), + m.scanDataStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { + return id == "cluster-1" || id == "cluster-2" + })).Times(len(sc.GetClusters())*numSnapshots).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -656,6 +664,15 @@ func (m *ManagerTestSuite) setupExpectCallsFromFailAllScans(sc *storage.Complian })). Times(numSnapshots).Return(nil), + // GetClusterData + m.scanDataStore.EXPECT(). + GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { + return id == "cluster-1" || id == "cluster-2" + })).Times(len(sc.GetClusters())*numSnapshots).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), + m.scanDataStore.EXPECT(). + GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { + return id == "cluster-1" || id == "cluster-2" + }), gomock.Any()).Times(2).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -748,6 +765,15 @@ func handleWaitGroup(t *testing.T, wg *concurrency.WaitGroup, timeout time.Durat } } +func getScanToProfileMap(numProfiles int) map[string]string { + ret := make(map[string]string) + for i := 0; i < numProfiles; i++ { + name := fmt.Sprintf("profile-%d", i) + ret[name] = name + } + return ret +} + func newGetScanConfigClusterStatusMatcher(sc *storage.ComplianceOperatorScanConfigurationV2) *getScanConfigClusterStatusMatcher { return &getScanConfigClusterStatusMatcher{ scanConfigID: sc.GetId(), diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator.go b/central/complianceoperator/v2/report/manager/results/results_aggregator.go index 374a6105ef6ea..9c2673a3af695 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator.go @@ -5,7 +5,6 @@ import ( "fmt" "strings" - "github.com/pkg/errors" benchmarksDS "github.com/stackrox/rox/central/complianceoperator/v2/benchmarks/datastore" checkResults "github.com/stackrox/rox/central/complianceoperator/v2/checkresults/datastore" "github.com/stackrox/rox/central/complianceoperator/v2/checkresults/utils" @@ -69,9 +68,9 @@ func (g *Aggregator) GetReportData(req *report.Request) *report.Results { reportResults := &report.Results{} reportResults.ClustersData = make(map[string]*report.ClusterData) for _, clusterID := range req.ClusterIDs { - clusterData, err := g.getClusterData(req.Ctx, req.ScanConfigID, clusterID, req.ClusterData[clusterID]) - if err != nil { - log.Errorf("Data unable to enhance cluster data for cluster %q: %v", clusterID, err) + clusterData, ok := req.ClusterData[clusterID] + if !ok { + log.Errorf("empty cluster data for cluster %q", clusterID) continue } clusterResults, clusterStatus, err := g.getReportDataForCluster(req.Ctx, req.ScanConfigID, clusterID, clusterData) @@ -92,51 +91,6 @@ func (g *Aggregator) GetReportData(req *report.Request) *report.Results { return reportResults } -func (g *Aggregator) getClusterData(ctx context.Context, scanConfigID, clusterID string, clusterData *report.ClusterData) (*report.ClusterData, error) { - if clusterData == nil { - return nil, errors.Errorf("cluster data for cluster %q is nil", clusterID) - } - - scanNamesToProfileNames, err := g.scanDS.GetProfilesScanNamesByScanConfigAndCluster(ctx, scanConfigID, clusterID) - if err != nil { - return nil, errors.Wrapf(err, "unable to retrieve profiles associated with the ScanConfiguration %q in the cluster %q", scanConfigID, clusterID) - } - var profileNames []string - var scanNames []string - for scanName, profileName := range scanNamesToProfileNames { - profileNames = append(profileNames, profileName) - scanNames = append(scanNames, scanName) - } - clusterData.Profiles = profileNames - clusterData.Scans = scanNames - if clusterData.FailedInfo != nil { - clusterData.FailedInfo, err = g.getFailedClusterData(ctx, clusterData.FailedInfo, scanConfigID, clusterID) - if err != nil { - return nil, errors.Wrapf(err, "unable to retrieve profiles associated the cluster %q", clusterData.ClusterId) - } - } - return clusterData, nil -} - -func (g *Aggregator) getFailedClusterData(ctx context.Context, failedCluster *report.FailedCluster, scanConfigID, clusterID string) (*report.FailedCluster, error) { - var profileRefIDs []string - for _, scan := range failedCluster.Scans { - profileRefIDs = append(profileRefIDs, scan.GetProfile().GetProfileRefId()) - } - scanNameToProfileName, err := g.scanDS.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfigID, clusterID, profileRefIDs) - if err != nil { - return nil, err - } - var profileNames []string - for _, scan := range failedCluster.Scans { - if profileName, ok := scanNameToProfileName[scan.GetScanName()]; ok { - profileNames = append(profileNames, profileName) - } - } - failedCluster.Profiles = profileNames - return failedCluster, nil -} - func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, clusterID string, clusterData *report.ClusterData) ([]*report.ResultRow, *checkStatus, error) { var ret []*report.ResultRow statuses := &checkStatus{ diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go index 2dae6140f9d88..7036247dd5ff2 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go @@ -82,24 +82,6 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataResultsGeneration() s.Run(tname, func() { ctx := context.Background() req := getRequest(ctx, tcase.numClusters, tcase.numProfiles, tcase.numFailedClusters) - s.scanDS.EXPECT().GetProfilesScanNamesByScanConfigAndCluster(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { - for _, cluster := range req.ClusterData { - if target == cluster.ClusterId { - return true - } - } - return false - })).Times(tcase.numClusters + tcase.numFailedClusters) - s.scanDS.EXPECT().GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { - for _, cluster := range req.ClusterData { - if cluster.FailedInfo != nil { - if target == cluster.ClusterId { - return true - } - } - } - return false - }), gomock.Any()).Times(tcase.numFailedClusters) s.checkResultsDS.EXPECT().WalkByQuery(gomock.Eq(ctx), gomock.Any(), gomock.Any()). Times(tcase.numClusters + tcase.numFailedClusters). DoAndReturn(fakeWalkByResponse( @@ -115,198 +97,6 @@ func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataResultsGeneration() } } -func (s *ComplianceResultsAggregatorSuite) Test_GetReportDataGetClusterDataErrors() { - cases := map[string]struct { - expectedGetScanToProfileErr []error - expectedGetScanToProfileWithRef []error - emptyClusterData bool - numClusters int - numProfiles int - numFailedClusters int - numPassedChecksPerCluster int - numFailedChecksPerCluster int - numMixedChecksPerCluster int - expectClusters []string - expectFailedClusters map[string]struct{} - }{ - "empty cluster data should generate to empty results": { - emptyClusterData: true, - numClusters: 2, - numProfiles: 2, - numFailedClusters: 1, - }, - "get scan to profile data fails in the first cluster": { - expectedGetScanToProfileErr: []error{errors.New("some error"), nil}, - numClusters: 2, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-1"}, - }, - "get scan to profile data fails always": { - expectedGetScanToProfileErr: []error{errors.New("some error"), errors.New("some error")}, - numClusters: 2, - numProfiles: 2, - }, - "get scan to profile data fails always after the first call": { - expectedGetScanToProfileErr: []error{nil, errors.New("some error"), errors.New("some error")}, - numClusters: 3, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-0"}, - }, - "get scan to profile data fails on the failed cluster": { - expectedGetScanToProfileErr: []error{nil, nil, errors.New("some error")}, - numClusters: 2, - numFailedClusters: 1, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-0", "cluster-1"}, - }, - "get scan to profile data with scan ref fails on the first failed cluster": { - expectedGetScanToProfileErr: []error{nil, nil, nil, nil}, - expectedGetScanToProfileWithRef: []error{errors.New("some error"), nil}, - numClusters: 2, - numFailedClusters: 2, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-0", "cluster-1", "cluster-3"}, - expectFailedClusters: map[string]struct{}{ - "cluster-3": {}, - }, - }, - "get scan to profile data with scan ref fails always": { - expectedGetScanToProfileErr: []error{nil, nil, nil, nil}, - expectedGetScanToProfileWithRef: []error{errors.New("some error"), errors.New("some error")}, - numClusters: 2, - numFailedClusters: 2, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-0", "cluster-1"}, - }, - "get scan to profile data with scan ref fails after the first call": { - expectedGetScanToProfileErr: []error{nil, nil, nil, nil, nil}, - expectedGetScanToProfileWithRef: []error{nil, errors.New("some error"), errors.New("some error")}, - numClusters: 2, - numFailedClusters: 3, - numProfiles: 2, - numPassedChecksPerCluster: 2, - numFailedChecksPerCluster: 2, - numMixedChecksPerCluster: 2, - expectClusters: []string{"cluster-0", "cluster-1", "cluster-2"}, - expectFailedClusters: map[string]struct{}{ - "cluster-2": {}, - }, - }, - } - ctx := context.Background() - for tName, tCase := range cases { - s.Run(tName, func() { - expectWalkByQueryCallsWithFailedCluster := len(tCase.expectFailedClusters) - expectClustersWithResults := len(tCase.expectClusters) - expectWalkByQueryCallsWithFailedCluster - req := getRequest(ctx, tCase.numClusters, tCase.numProfiles, tCase.numFailedClusters) - if tCase.emptyClusterData { - req.ClusterData = make(map[string]*report.ClusterData) - } - scanToProfilesMap := getScanToProfileMap(tCase.numProfiles) - var getScanToProfileCalls []any - for _, err := range tCase.expectedGetScanToProfileErr { - call := s.scanDS.EXPECT().GetProfilesScanNamesByScanConfigAndCluster(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { - for _, cluster := range req.ClusterData { - if target == cluster.ClusterId { - return true - } - } - return false - })).Times(1). - Return(scanToProfilesMap, err) - - getScanToProfileCalls = append(getScanToProfileCalls, call) - } - if len(getScanToProfileCalls) > 0 { - gomock.InOrder(getScanToProfileCalls...) - } - var getScanToProfileWithRefCalls []any - for _, err := range tCase.expectedGetScanToProfileWithRef { - call := s.scanDS.EXPECT().GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Eq(ctx), scanConfigID, gomock.Cond[string](func(target string) bool { - for _, cluster := range req.ClusterData { - if target == cluster.ClusterId { - return true - } - } - return false - }), gomock.Any()).Times(1). - Return(scanToProfilesMap, err) - getScanToProfileWithRefCalls = append(getScanToProfileWithRefCalls, call) - } - if len(getScanToProfileWithRefCalls) > 0 { - gomock.InOrder(getScanToProfileWithRefCalls...) - } - - s.checkResultsDS.EXPECT().WalkByQuery(gomock.Any(), gomock.Any(), gomock.Any()). - Times(expectClustersWithResults + expectWalkByQueryCallsWithFailedCluster). - DoAndReturn(fakeWalkByResponse( - req.ClusterData, - nil, - tCase.numPassedChecksPerCluster, - tCase.numFailedChecksPerCluster, - tCase.numMixedChecksPerCluster)) - - s.aggregator.aggreateResults = mockWalkByQueryWrapper - res := s.aggregator.GetReportData(req) - s.Require().NotNil(res) - s.Assert().Equal(tCase.numClusters+tCase.numFailedClusters, res.Clusters) - s.Assert().Equal(req.Profiles, res.Profiles) - s.Assert().Equal(expectClustersWithResults*tCase.numPassedChecksPerCluster, res.TotalPass) - s.Assert().Equal(expectClustersWithResults*tCase.numFailedChecksPerCluster, res.TotalFail) - s.Assert().Equal(expectClustersWithResults*tCase.numMixedChecksPerCluster, res.TotalMixed) - if tCase.emptyClusterData { - s.Assert().Len(res.ClustersData, 0) - s.Assert().Len(res.ResultCSVs, 0) - return - } - s.Assert().Len(res.ClustersData, expectClustersWithResults+expectWalkByQueryCallsWithFailedCluster) - s.Assert().Len(res.ResultCSVs, expectClustersWithResults+expectWalkByQueryCallsWithFailedCluster) - for _, clusterID := range tCase.expectClusters { - data := res.ClustersData[clusterID] - s.Require().NotNil(data) - for scan, profile := range scanToProfilesMap { - s.Assert().Contains(data.Profiles, profile) - s.Assert().Contains(data.Scans, scan) - } - s.Assert().Equal(clusterID, data.ClusterId) - s.Assert().Equal(clusterID, data.ClusterName) - if _, ok := tCase.expectFailedClusters[clusterID]; ok { - s.Require().NotNil(data.FailedInfo) - s.Assert().Equal(clusterID, data.FailedInfo.ClusterId) - s.Assert().Equal(clusterID, data.FailedInfo.ClusterName) - s.Assert().Equal([]string{"timeout"}, data.FailedInfo.Reasons) - s.Assert().Equal("v1.6.0", data.FailedInfo.OperatorVersion) - for scan, profile := range scanToProfilesMap { - s.Assert().Contains(data.FailedInfo.Profiles, profile) - scanFound := false - for _, storageScan := range data.FailedInfo.Scans { - if storageScan.GetScanName() == scan { - scanFound = true - } - } - s.Assert().Truef(scanFound, "expected scan %q not found", scan) - } - } - } - }) - } -} - func fakeWalkByResponse( clusterData map[string]*report.ClusterData, expectedErr error, @@ -678,7 +468,7 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters } clusterData[id].FailedInfo = failedInfo } - ret.FailedClusters = numFailedClusters + ret.NumFailedClusters = numFailedClusters } ret.ClusterData = clusterData return ret @@ -833,12 +623,3 @@ func assertResult(t *testing.T, tcase walkByQueryTestCase, row *report.ResultRow } assert.Equal(t, strings.Join(expControlInfos, ","), row.ControlRef) } - -func getScanToProfileMap(numProfiles int) map[string]string { - ret := make(map[string]string) - for i := 0; i < numProfiles; i++ { - name := fmt.Sprintf("profile-%d", i) - ret[name] = name - } - return ret -} diff --git a/central/complianceoperator/v2/report/request.go b/central/complianceoperator/v2/report/request.go index 153abc5786db6..c0253715eae65 100644 --- a/central/complianceoperator/v2/report/request.go +++ b/central/complianceoperator/v2/report/request.go @@ -17,7 +17,7 @@ type Request struct { SnapshotID string NotificationMethod storage.ComplianceOperatorReportStatus_NotificationMethod ClusterData map[string]*ClusterData - FailedClusters int + NumFailedClusters int } // ClusterData holds the metadata for the clusters diff --git a/generated/storage/compliance_operator_v2.pb.go b/generated/storage/compliance_operator_v2.pb.go index 1f0cbe2f672e1..db2ad58b19f2b 100644 --- a/generated/storage/compliance_operator_v2.pb.go +++ b/generated/storage/compliance_operator_v2.pb.go @@ -2619,6 +2619,8 @@ type ComplianceOperatorReportSnapshotV2_FailedCluster struct { ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` Reasons []string `protobuf:"bytes,3,rep,name=reasons,proto3" json:"reasons,omitempty"` OperatorVersion string `protobuf:"bytes,4,opt,name=operator_version,json=operatorVersion,proto3" json:"operator_version,omitempty"` + Scans []string `protobuf:"bytes,5,rep,name=scans,proto3" json:"scans,omitempty"` + Profiles []string `protobuf:"bytes,6,rep,name=profiles,proto3" json:"profiles,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2681,6 +2683,20 @@ func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetOperatorVersion() return "" } +func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetScans() []string { + if x != nil { + return x.Scans + } + return nil +} + +func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetProfiles() []string { + if x != nil { + return x.Profiles + } + return nil +} + // Next available tag: 5 type ComplianceOperatorReportData_SuiteStatus struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -3060,7 +3076,7 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0foutdated_object\x18\x06 \x01(\tR\x0eoutdatedObject\x12)\n" + "\x10enforcement_type\x18\a \x01(\tR\x0fenforcementType\x12\x1d\n" + "\n" + - "cluster_id\x18\b \x01(\tR\tclusterId\"\x9d\x06\n" + + "cluster_id\x18\b \x01(\tR\tclusterId\"\xcf\x06\n" + "\"ComplianceOperatorReportSnapshotV2\x12\x1b\n" + "\treport_id\x18\x01 \x01(\tR\breportId\x122\n" + "\x15scan_configuration_id\x18\x02 \x01(\tR\x13scanConfigurationId\x12\x12\n" + @@ -3074,13 +3090,15 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0ffailed_clusters\x18\t \x03(\v29.storage.ComplianceOperatorReportSnapshotV2.FailedClusterR\x0efailedClusters\x1an\n" + "\x04Scan\x12\x1e\n" + "\vscan_ref_id\x18\x01 \x01(\tR\tscanRefId\x12F\n" + - "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\x96\x01\n" + + "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\xc8\x01\n" + "\rFailedCluster\x12\x1d\n" + "\n" + "cluster_id\x18\x01 \x01(\tR\tclusterId\x12!\n" + "\fcluster_name\x18\x02 \x01(\tR\vclusterName\x12\x18\n" + "\areasons\x18\x03 \x03(\tR\areasons\x12)\n" + - "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\"\x96\x05\n" + + "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\x12\x14\n" + + "\x05scans\x18\x05 \x03(\tR\x05scans\x12\x1a\n" + + "\bprofiles\x18\x06 \x03(\tR\bprofiles\"\x96\x05\n" + "\x1cComplianceOperatorReportData\x12]\n" + "\x12scan_configuration\x18\x01 \x01(\v2..storage.ComplianceOperatorScanConfigurationV2R\x11scanConfiguration\x12Z\n" + "\x0ecluster_status\x18\x02 \x03(\v23.storage.ComplianceOperatorReportData.ClusterStatusR\rclusterStatus\x12H\n" + diff --git a/generated/storage/compliance_operator_v2_vtproto.pb.go b/generated/storage/compliance_operator_v2_vtproto.pb.go index b92db7270787c..52cb17b86f6bb 100644 --- a/generated/storage/compliance_operator_v2_vtproto.pb.go +++ b/generated/storage/compliance_operator_v2_vtproto.pb.go @@ -664,6 +664,16 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) CloneVT() *Compliance copy(tmpContainer, rhs) r.Reasons = tmpContainer } + if rhs := m.Scans; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Scans = tmpContainer + } + if rhs := m.Profiles; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Profiles = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1860,6 +1870,24 @@ func (this *ComplianceOperatorReportSnapshotV2_FailedCluster) EqualVT(that *Comp if this.OperatorVersion != that.OperatorVersion { return false } + if len(this.Scans) != len(that.Scans) { + return false + } + for i, vx := range this.Scans { + vy := that.Scans[i] + if vx != vy { + return false + } + } + if len(this.Profiles) != len(that.Profiles) { + return false + } + for i, vx := range this.Profiles { + vy := that.Profiles[i] + if vx != vy { + return false + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -4099,6 +4127,24 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) MarshalToSizedBufferV i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Profiles) > 0 { + for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Profiles[iNdEx]) + copy(dAtA[i:], m.Profiles[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Profiles[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Scans) > 0 { + for iNdEx := len(m.Scans) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scans[iNdEx]) + copy(dAtA[i:], m.Scans[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scans[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if len(m.OperatorVersion) > 0 { i -= len(m.OperatorVersion) copy(dAtA[i:], m.OperatorVersion) @@ -5400,6 +5446,18 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if len(m.Scans) > 0 { + for _, s := range m.Scans { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Profiles) > 0 { + for _, s := range m.Profiles { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -12563,6 +12621,70 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVT(dAtA []by } m.OperatorVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scans", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scans = append(m.Scans, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profiles = append(m.Profiles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -21179,6 +21301,78 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVTUnsafe(dAt } m.OperatorVersion = stringValue iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scans", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Scans = append(m.Scans, stringValue) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Profiles = append(m.Profiles, stringValue) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/proto/storage/compliance_operator_v2.proto b/proto/storage/compliance_operator_v2.proto index afaf527de0256..51ab1010f42ed 100644 --- a/proto/storage/compliance_operator_v2.proto +++ b/proto/storage/compliance_operator_v2.proto @@ -310,6 +310,8 @@ message ComplianceOperatorReportSnapshotV2 { string cluster_name = 2; repeated string reasons = 3; string operator_version = 4; + repeated string scans = 5; + repeated string profiles = 6; } repeated FailedCluster failed_clusters = 9; From a1e2cba0bbbcd6958eda48710bda9945e9577d24 Mon Sep 17 00:00:00 2001 From: lvalerom Date: Fri, 30 May 2025 18:12:11 +0200 Subject: [PATCH 4/7] fix unit after rebase --- .../v2/report/manager/generator/report_gen_impl_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go index 2f24ad58d90bf..617cbc773a8ca 100644 --- a/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go +++ b/central/complianceoperator/v2/report/manager/generator/report_gen_impl_test.go @@ -198,9 +198,12 @@ func (s *ComplainceReportingTestSuite) TestProcessReportRequest() { return storage.ComplianceOperatorReportStatus_PARTIAL_SCAN_ERROR_DOWNLOAD == target.GetReportStatus().GetRunState() })).Times(1).Return(errors.New("some error")) req := newFakeDownloadRequest() - req.FailedClusters = map[string]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - "cluster-2": {}, + req.ClusterData = map[string]*report.ClusterData{ + "cluster-2": { + FailedInfo: &report.FailedCluster{}, + }, } + req.NumFailedClusters = 1 err := s.reportGen.ProcessReportRequest(req) s.Require().Error(err) s.Assert().Contains(err.Error(), errUnableToUpdateSnapshotOnGenerationSuccessStr) From 7849aee2552aeab13183bcd317f0f8d8cdbadcfb Mon Sep 17 00:00:00 2001 From: lvalerom Date: Tue, 3 Jun 2025 16:49:41 +0200 Subject: [PATCH 5/7] review plus removing profile names from file name --- .../v2/report/manager/format/formatter.go | 25 +-- .../report/manager/format/formatter_test.go | 33 ++- .../v2/report/manager/helpers/clusterdata.go | 52 ++--- .../manager/helpers/clusterdata_test.go | 122 ++++------ .../v2/report/manager/manager_impl.go | 40 ++-- .../v2/report/manager/manager_test.go | 30 ++- .../manager/results/results_aggregator.go | 10 +- .../results/results_aggregator_test.go | 8 +- .../v2/report/manager/watcher/validator.go | 4 +- .../report/manager/watcher/validator_test.go | 10 +- .../complianceoperator/v2/report/request.go | 6 +- .../v2/scans/datastore/datastore.go | 9 +- .../v2/scans/datastore/datastore_impl.go | 49 +---- .../v2/scans/datastore/datastore_impl_test.go | 208 +----------------- .../v2/scans/datastore/mocks/datastore.go | 30 --- .../v2/scans/datastore/singleton.go | 2 +- .../storage/compliance_operator_v2.pb.go | 23 +- .../compliance_operator_v2_vtproto.pb.go | 129 ++--------- proto/storage/compliance_operator_v2.proto | 3 +- 19 files changed, 166 insertions(+), 627 deletions(-) diff --git a/central/complianceoperator/v2/report/manager/format/formatter.go b/central/complianceoperator/v2/report/manager/format/formatter.go index fb79c0feb2dcc..3ee399dab14fb 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter.go +++ b/central/complianceoperator/v2/report/manager/format/formatter.go @@ -5,12 +5,10 @@ import ( "bytes" "fmt" "io" - "strings" "github.com/pkg/errors" "github.com/stackrox/rox/central/complianceoperator/v2/report" "github.com/stackrox/rox/pkg/csv" - "github.com/stackrox/rox/pkg/set" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -80,18 +78,18 @@ func (f *FormatterImpl) FormatCSVReport(results map[string][]*report.ResultRow, timestamp := timestamppb.Now() for clusterID, cluster := range clusters { if cluster.FailedInfo != nil { - fileName := getFileName(failedClusterFmt, cluster.ClusterName, cluster.FailedInfo.Profiles, timestamp) + fileName := getFileName(failedClusterFmt, cluster.ClusterName, timestamp) if err := f.createFailedClusterFileInZip(zipWriter, fileName, cluster.FailedInfo); err != nil { return nil, errors.Wrap(err, "error creating failed cluster report") } - if len(cluster.FailedInfo.Profiles) == len(cluster.Profiles) { - continue - } } - fileName := getFileName(successfulClusterFmt, cluster.ClusterName, getProfileDiff(cluster.Profiles, cluster.FailedInfo), timestamp) + if len(results[clusterID]) == 0 && cluster.FailedInfo != nil { + continue + } if _, ok := results[clusterID]; !ok { return nil, errors.Errorf("found no results for cluster %q", clusterID) } + fileName := getFileName(successfulClusterFmt, cluster.ClusterName, timestamp) if err := f.createCSVInZip(zipWriter, fileName, results[clusterID]); err != nil { return nil, errors.Wrap(err, "error creating csv report") } @@ -152,18 +150,9 @@ func generateFailRecord(clusterID, clusterName, reason, coVersion string) []stri } } -func getProfileDiff(allProfiles []string, failedCluster *report.FailedCluster) []string { - if failedCluster == nil { - return allProfiles - } - allProfilesSet := set.NewStringSet(allProfiles...) - failedProfilesSet := set.NewStringSet(failedCluster.Profiles...) - return allProfilesSet.Difference(failedProfilesSet).AsSlice() -} - -func getFileName(format string, clusterName string, profiles []string, timestamp *timestamppb.Timestamp) string { +func getFileName(format string, clusterName string, timestamp *timestamppb.Timestamp) string { year, month, day := timestamp.AsTime().Date() - return fmt.Sprintf(format, fmt.Sprintf("%s_%s_%d-%d-%d", clusterName, strings.Join(profiles, "_"), year, month, day)) + return fmt.Sprintf(format, fmt.Sprintf("%s_%d-%d-%d", clusterName, year, month, day)) } func createNewZipWriter(buf *bytes.Buffer) ZipWriter { diff --git a/central/complianceoperator/v2/report/manager/format/formatter_test.go b/central/complianceoperator/v2/report/manager/format/formatter_test.go index 8897930dc6e0e..ac965be3f5592 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter_test.go +++ b/central/complianceoperator/v2/report/manager/format/formatter_test.go @@ -37,7 +37,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportNoError() { s.Run("with empty failed clusters", func() { timestamp := timestamppb.Now() clusterData := getFakeClusterData() - fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) gomock.InOrder( s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { @@ -68,8 +68,8 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste timestamp := timestamppb.Now() results, clusterData := getFakeReportDataWithFailedCluster() - successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) - failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].Profiles, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { return target == successfulFileName || target == failedFileName @@ -94,8 +94,8 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste // Add empty results to the failed cluster results[clusterID2] = []*report.ResultRow{} - successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) - failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].Profiles, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { return target == successfulFileName || target == failedFileName @@ -117,8 +117,6 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFailedCluster() { timestamp := timestamppb.Now() results, clusterData := getFakeReportDataWithFailedCluster() - // Remove profile - clusterData[clusterID2].FailedInfo.Profiles = clusterData[clusterID2].FailedInfo.Profiles[:1] // Add partial results results[clusterID2] = []*report.ResultRow{ { @@ -132,9 +130,9 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFaile }, } - partialSuccessFileName := getFileName(successfulClusterFmt, clusterData[clusterID2].ClusterName, getProfileDiff(clusterData[clusterID2].Profiles, clusterData[clusterID2].FailedInfo), timestamp) - successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) - failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + partialSuccessFileName := getFileName(successfulClusterFmt, clusterData[clusterID2].ClusterName, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { return target == successfulFileName || target == failedFileName || target == partialSuccessFileName @@ -158,7 +156,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { s.Run("zip writer failing to create a file (with no failed clusters) should yield an error", func() { timestamp := timestamppb.Now() clusterData := getFakeClusterData() - successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) @@ -170,7 +168,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCreateError() { timestamp := timestamppb.Now() results, clusterData := getFakeReportDataWithFailedCluster() delete(clusterData, clusterID1) - failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) @@ -184,7 +182,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { s.Run("csv writer failing to create a file (with no failed clusters) should yield an error", func() { timestamp := timestamppb.Now() clusterData := getFakeClusterData() - successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) s.zipWriter.EXPECT().Create(successfulFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) @@ -198,7 +196,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { timestamp := timestamppb.Now() results, clusterData := getFakeReportDataWithFailedCluster() delete(clusterData, clusterID1) - failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, clusterData[clusterID2].FailedInfo.Profiles, timestamp) + failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo @@ -216,7 +214,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCloseError() { timestamp := timestamppb.Now() clusterData := getFakeClusterData() - fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Any()).Times(2) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(nil) @@ -230,7 +228,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportCloseError() { func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportEmptyReportNoError() { timestamp := timestamppb.Now() clusterData := getFakeClusterData() - fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, clusterData[clusterID1].Profiles, timestamp) + fileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) s.zipWriter.EXPECT().Create(fileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(&emptyValueMatcher{ t: s.T(), @@ -304,12 +302,10 @@ func getFakeReportDataWithFailedCluster() (map[string][]*report.ResultRow, map[s clusterData[clusterID2] = &report.ClusterData{ ClusterName: "test_cluster-2", ClusterId: "test_cluster-2-id", - Profiles: []string{"test_profile-1", "test_profile-2"}, } clusterData[clusterID2].FailedInfo = &report.FailedCluster{ Reasons: []string{"timeout"}, OperatorVersion: "v1.6.0", - Profiles: []string{"test_profile-1", "test_profile-2"}, } results := getFakeReportData() return results, clusterData @@ -320,7 +316,6 @@ func getFakeClusterData() map[string]*report.ClusterData { ret[clusterID1] = &report.ClusterData{ ClusterId: clusterID1, ClusterName: "test_cluster-1", - Profiles: []string{"test_profile-1", "test_profile-2"}, } return ret } diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go index 88b1f4a2be326..4d53fb0b9f41b 100644 --- a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go @@ -9,7 +9,6 @@ import ( scanDS "github.com/stackrox/rox/central/complianceoperator/v2/scans/datastore" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/search" - "golang.org/x/exp/maps" ) // GetFailedClusters returns the failed clusters metadata associated with a ScanConfiguration @@ -20,7 +19,7 @@ func GetFailedClusters(ctx context.Context, scanConfigID string, snapshotStore s return nil, err } for _, failedCluster := range prevSnapshot.GetFailedClusters() { - scans, err := populateFailedScans(ctx, failedCluster.GetScans(), prevSnapshot.GetScans(), scanStore) + scans, err := populateFailedScans(ctx, failedCluster.GetScanNames(), prevSnapshot.GetScans(), scanStore) if err != nil { return nil, err } @@ -29,8 +28,7 @@ func GetFailedClusters(ctx context.Context, scanConfigID string, snapshotStore s ClusterName: failedCluster.GetClusterName(), Reasons: failedCluster.GetReasons(), OperatorVersion: failedCluster.GetOperatorVersion(), - Scans: scans, - Profiles: failedCluster.GetProfiles(), + FailedScans: scans, } } return failedClusters, nil @@ -44,7 +42,7 @@ func GetClusterData(ctx context.Context, reportData *storage.ComplianceOperatorR ClusterId: cluster.GetClusterId(), ClusterName: cluster.GetClusterName(), } - data, err := populateScansAndProfiles(ctx, data, reportData, cluster.GetClusterId(), scanStore) + data, err := populateScanNames(ctx, data, reportData, cluster.GetClusterId(), scanStore) if err != nil { return nil, err } @@ -55,48 +53,27 @@ func GetClusterData(ctx context.Context, reportData *storage.ComplianceOperatorR if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { failedCluster.ClusterName = cluster.GetClusterName() data.FailedInfo = failedCluster - data, err = populateFailedScansAndProfiles(ctx, data, reportData, cluster.GetClusterId(), scanStore) - if err != nil { - return nil, err - } } clusterData[cluster.GetClusterId()] = data } return clusterData, nil } -func populateScansAndProfiles(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { +func populateScanNames(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { if data == nil { return nil, errors.New("cannot populate scans and profiles of a nil ClusterData") } - scanNamesToProfileNames, err := scanStore.GetProfilesScanNamesByScanConfigAndCluster(ctx, reportData.GetScanConfiguration().GetId(), clusterID) - if err != nil { - return nil, errors.Wrapf(err, "unable to retrieve profiles associated with the ScanConfiguration %q in the cluster %q", reportData.GetScanConfiguration().GetId(), clusterID) - } - data.Profiles = maps.Values(scanNamesToProfileNames) - data.Scans = maps.Keys(scanNamesToProfileNames) - return data, nil -} - -func populateFailedScansAndProfiles(ctx context.Context, data *report.ClusterData, reportData *storage.ComplianceOperatorReportData, clusterID string, scanStore scanDS.DataStore) (*report.ClusterData, error) { - if data.FailedInfo == nil { - return nil, errors.New("cannot populate scans and profiles of a nil FailedInfo") - } - var profileRefIDs []string - for _, scan := range data.FailedInfo.Scans { - profileRefIDs = append(profileRefIDs, scan.GetProfile().GetProfileRefId()) - } - scanNameToProfileName, err := scanStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, reportData.GetScanConfiguration().GetId(), clusterID, profileRefIDs) + query := search.NewQueryBuilder(). + AddExactMatches(search.ClusterID, clusterID). + AddExactMatches(search.ComplianceOperatorScanConfigName, reportData.GetScanConfiguration().GetScanConfigName()). + ProtoQuery() + scans, err := scanStore.SearchScans(ctx, query) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "unable to retrieve scans associated with the ScanConfiguration %q in the cluster %q", reportData.GetScanConfiguration().GetId(), clusterID) } - var profileNames []string - for _, scan := range data.FailedInfo.Scans { - if profileName, ok := scanNameToProfileName[scan.GetScanName()]; ok { - profileNames = append(profileNames, profileName) - } + for _, scan := range scans { + data.ScanNames = append(data.ScanNames, scan.GetScanName()) } - data.FailedInfo.Profiles = profileNames return data, nil } @@ -105,6 +82,11 @@ func populateFailedScans(ctx context.Context, failedScanNames []string, snapshot for _, scan := range snapshotScans { scanRefIDs = append(scanRefIDs, scan.GetScanRefId()) } + // We need to query by ScanName and ScanRefIDs + // because ScanNames are not unique cross cluster. + // scanRefIDs holds all the scan references (failed and successful) + // associated with the ScanConfiguration. + // failedScanNames holds the scan names of the failed scans. query := search.NewQueryBuilder(). AddExactMatches(search.ComplianceOperatorScanName, failedScanNames...). AddExactMatches(search.ComplianceOperatorScanResult, scanRefIDs...).ProtoQuery() diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go index 7597abfbaf70c..70a5957adf7b2 100644 --- a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go @@ -28,8 +28,7 @@ func TestGetFailedClusters(t *testing.T) { ClusterName: "cluster-name", Reasons: []string{"some reason"}, OperatorVersion: "v1.6.0", - Scans: []string{"scan-2"}, - Profiles: []string{"profile-2"}, + ScanNames: []string{"scan-2"}, }, }, Scans: []*storage.ComplianceOperatorReportSnapshotV2_Scan{ @@ -77,8 +76,7 @@ func TestGetFailedClusters(t *testing.T) { ClusterName: "cluster-name", Reasons: []string{"some reason"}, OperatorVersion: "v1.6.0", - Scans: scans, - Profiles: []string{"profile-2"}, + FailedScans: scans, }, } failedClusters, err := GetFailedClusters(ctx, scanConfigID, snapshotStore, scanStore) @@ -91,11 +89,7 @@ func TestGetFailedClusters(t *testing.T) { assert.Equal(tt, expectedCluster.ClusterName, actualCluster.ClusterName) assert.Equal(tt, expectedCluster.Reasons, actualCluster.Reasons) assert.Equal(tt, expectedCluster.OperatorVersion, actualCluster.OperatorVersion) - protoassert.SlicesEqual(t, expectedCluster.Scans, actualCluster.Scans) - assert.Len(tt, actualCluster.Profiles, len(expectedCluster.Profiles)) - for _, profile := range expectedCluster.Profiles { - assert.Contains(tt, actualCluster.Profiles, profile) - } + protoassert.SlicesEqual(t, expectedCluster.FailedScans, actualCluster.FailedScans) } }) } @@ -123,7 +117,7 @@ func TestGetClusterData(t *testing.T) { ClusterName: "cluster-2", Reasons: []string{"some reason"}, OperatorVersion: "v1.6.0", - Scans: []*storage.ComplianceOperatorScanV2{ + FailedScans: []*storage.ComplianceOperatorScanV2{ { ScanName: "scan-2", Profile: &storage.ProfileShim{ @@ -142,7 +136,7 @@ func TestGetClusterData(t *testing.T) { }) t.Run("failure querying the scan store", func(tt *testing.T) { scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). + SearchScans(gomock.Any(), gomock.Any()). Times(1).Return(nil, errors.New("some error")) clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) assert.Error(tt, err) @@ -151,96 +145,81 @@ func TestGetClusterData(t *testing.T) { t.Run("no failed clusters", func(tt *testing.T) { gomock.InOrder( scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, }, nil), scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, }, nil), ) expectedClusterData := map[string]*report.ClusterData{ "cluster-1": { ClusterId: "cluster-1", ClusterName: "cluster-1", - Scans: []string{"scan-1", "scan-2"}, - Profiles: []string{"profile-1", "profile-2"}, + ScanNames: []string{"scan-1", "scan-2"}, }, "cluster-2": { ClusterId: "cluster-2", ClusterName: "cluster-2", - Scans: []string{"scan-1", "scan-2"}, - Profiles: []string{"profile-1", "profile-2"}, + ScanNames: []string{"scan-1", "scan-2"}, }, } clusterData, err := GetClusterData(ctx, reportData, nil, scanStore) assert.NoError(tt, err) assertClusterData(tt, expectedClusterData, clusterData) }) - t.Run("failure querying for failed profiles", func(tt *testing.T) { - gomock.InOrder( - scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", - }, nil), - scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", - }, nil), - scanStore.EXPECT(). - GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2"), gomock.Eq([]string{"profile-ref-id"})). - Times(1).Return(nil, errors.New("some error")), - ) - clusterData, err := GetClusterData(ctx, reportData, failedClusters, scanStore) - assert.Error(tt, err) - assert.Nil(tt, clusterData) - }) t.Run("with failed clusters", func(tt *testing.T) { gomock.InOrder( scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-1")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", - }, nil), - scanStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2")). - Times(1).Return(map[string]string{ - "scan-1": "profile-1", - "scan-2": "profile-2", + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, }, nil), scanStore.EXPECT(). - GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(reportData.GetScanConfiguration().GetId()), gomock.Eq("cluster-2"), gomock.Eq([]string{"profile-ref-id"})). - Times(1).Return(map[string]string{ - "scan-2": "profile-2", + SearchScans(gomock.Any(), gomock.Any()). + Times(1).Return([]*storage.ComplianceOperatorScanV2{ + { + ScanName: "scan-1", + }, + { + ScanName: "scan-2", + }, }, nil), ) expectedClusterData := map[string]*report.ClusterData{ "cluster-1": { ClusterId: "cluster-1", ClusterName: "cluster-1", - Scans: []string{"scan-1", "scan-2"}, - Profiles: []string{"profile-1", "profile-2"}, + ScanNames: []string{"scan-1", "scan-2"}, }, "cluster-2": { ClusterId: "cluster-2", ClusterName: "cluster-2", - Scans: []string{"scan-1", "scan-2"}, - Profiles: []string{"profile-1", "profile-2"}, + ScanNames: []string{"scan-1", "scan-2"}, FailedInfo: &report.FailedCluster{ ClusterId: "cluster-2", ClusterName: "cluster-2", OperatorVersion: "v1.6.0", Reasons: []string{"some reason"}, - Scans: []*storage.ComplianceOperatorScanV2{ + FailedScans: []*storage.ComplianceOperatorScanV2{ { ScanName: "scan-2", Profile: &storage.ProfileShim{ @@ -248,7 +227,6 @@ func TestGetClusterData(t *testing.T) { }, }, }, - Profiles: []string{"profile-2"}, }, }, } @@ -265,13 +243,9 @@ func assertClusterData(t *testing.T, expected map[string]*report.ClusterData, ac require.True(t, ok) assert.Equal(t, expectedCluster.ClusterId, actualCluster.ClusterId) assert.Equal(t, expectedCluster.ClusterName, actualCluster.ClusterName) - assert.Len(t, actualCluster.Scans, len(expectedCluster.Scans)) - for _, scan := range expectedCluster.Scans { - assert.Contains(t, actualCluster.Scans, scan) - } - assert.Len(t, actualCluster.Profiles, len(expectedCluster.Profiles)) - for _, profile := range expectedCluster.Profiles { - assert.Contains(t, actualCluster.Profiles, profile) + assert.Len(t, actualCluster.ScanNames, len(expectedCluster.ScanNames)) + for _, scan := range expectedCluster.ScanNames { + assert.Contains(t, actualCluster.ScanNames, scan) } if expectedCluster.FailedInfo != nil { require.NotNil(t, actualCluster.FailedInfo) @@ -279,11 +253,7 @@ func assertClusterData(t *testing.T, expected map[string]*report.ClusterData, ac assert.Equal(t, expectedCluster.FailedInfo.ClusterName, actualCluster.FailedInfo.ClusterName) assert.Equal(t, expectedCluster.FailedInfo.Reasons, actualCluster.FailedInfo.Reasons) assert.Equal(t, expectedCluster.FailedInfo.OperatorVersion, actualCluster.FailedInfo.OperatorVersion) - protoassert.SlicesEqual(t, expectedCluster.FailedInfo.Scans, actualCluster.FailedInfo.Scans) - assert.Len(t, actualCluster.FailedInfo.Profiles, len(expectedCluster.FailedInfo.Profiles)) - for _, profile := range expectedCluster.FailedInfo.Profiles { - assert.Contains(t, actualCluster.FailedInfo.Profiles, profile) - } + protoassert.SlicesEqual(t, expectedCluster.FailedInfo.FailedScans, actualCluster.FailedInfo.FailedScans) } else { assert.Nil(t, actualCluster.FailedInfo) } diff --git a/central/complianceoperator/v2/report/manager/manager_impl.go b/central/complianceoperator/v2/report/manager/manager_impl.go index 5676edb9aea4e..a67970fa8dd0c 100644 --- a/central/complianceoperator/v2/report/manager/manager_impl.go +++ b/central/complianceoperator/v2/report/manager/manager_impl.go @@ -311,6 +311,7 @@ func (m *managerImpl) handleReportRequest(request *reportRequest) (bool, error) if dbErr := helpers.UpdateSnapshotOnError(request.ctx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { return false, errors.Wrap(dbErr, "unable to upsert snapshot on generation failure") } + return false, errors.Wrap(err, "unable to get clusters information") } // Add failed clusters to the report snapshot if _, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot); err != nil { @@ -587,6 +588,7 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca if dbErr := helpers.UpdateSnapshotOnError(m.automaticReportingCtx, snapshot, report.ErrReportGeneration, m.snapshotDataStore); dbErr != nil { return errors.Wrap(dbErr, "unable to update snapshot on populate cluster data error") } + return errors.Wrap(err, "unable to populate cluster data") } // Add failed clusters to the report snapshot snapshot, err = m.addFailedClustersToTheSnapshot(failedClusters, snapshot) @@ -609,26 +611,26 @@ func (m *managerImpl) generateSingleReportFromWatcherResults(result *watcher.Sca } func (m *managerImpl) addFailedClustersToTheSnapshot(failedClusters map[string]*report.FailedCluster, snapshot *storage.ComplianceOperatorReportSnapshotV2) (*storage.ComplianceOperatorReportSnapshotV2, error) { - if len(failedClusters) > 0 { - failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) - for _, failedCluster := range failedClusters { - scans := make([]string, 0, len(failedCluster.Scans)) - for _, scan := range failedCluster.Scans { - scans = append(scans, scan.GetScanName()) - } - failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ - ClusterId: failedCluster.ClusterId, - ClusterName: failedCluster.ClusterName, - OperatorVersion: failedCluster.OperatorVersion, - Reasons: failedCluster.Reasons, - Scans: scans, - Profiles: failedCluster.Profiles, - }) - } - snapshot.FailedClusters = failedClustersSlice - if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { - return snapshot, errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) + if len(failedClusters) == 0 { + return snapshot, nil + } + failedClustersSlice := make([]*storage.ComplianceOperatorReportSnapshotV2_FailedCluster, 0, len(failedClusters)) + for _, failedCluster := range failedClusters { + scans := make([]string, 0, len(failedCluster.FailedScans)) + for _, scan := range failedCluster.FailedScans { + scans = append(scans, scan.GetScanName()) } + failedClustersSlice = append(failedClustersSlice, &storage.ComplianceOperatorReportSnapshotV2_FailedCluster{ + ClusterId: failedCluster.ClusterId, + ClusterName: failedCluster.ClusterName, + OperatorVersion: failedCluster.OperatorVersion, + Reasons: failedCluster.Reasons, + ScanNames: scans, + }) + } + snapshot.FailedClusters = failedClustersSlice + if err := m.snapshotDataStore.UpsertSnapshot(m.automaticReportingCtx, snapshot); err != nil { + return snapshot, errors.Wrapf(err, "unable to upsert the snapshot %s", snapshot.GetReportId()) } return snapshot, nil } diff --git a/central/complianceoperator/v2/report/manager/manager_test.go b/central/complianceoperator/v2/report/manager/manager_test.go index a6659a5e78b2b..e4e9e277d9cd5 100644 --- a/central/complianceoperator/v2/report/manager/manager_test.go +++ b/central/complianceoperator/v2/report/manager/manager_test.go @@ -117,9 +117,9 @@ func (m *ManagerTestSuite) TestHandleReportRequest() { GetLastSnapshotFromScanConfig(gomock.Any(), gomock.Eq(scanConfig.GetId())). Times(1).Return(nil, nil) m.scanDataStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(scanConfig.GetId()), gomock.Cond[string](func(id string) bool { - return id == "cluster-1" || id == "cluster-2" - })).Times(len(scanConfig.GetClusters())).Return(getScanToProfileMap(len(scanConfig.GetProfiles())), nil) + SearchScans(gomock.Any(), gomock.Any()). + Times(len(scanConfig.GetClusters())). + Return(getScans(len(scanConfig.GetProfiles())), nil) err := manager.SubmitReportRequest(ctx, getTestScanConfig(), storage.ComplianceOperatorReportStatus_EMAIL) m.Require().NoError(err) handleWaitGroup(m.T(), &wg, 10*time.Millisecond, "report generation") @@ -619,9 +619,9 @@ func (m *ManagerTestSuite) setupExpectCallsFromFinishAllScans(sc *storage.Compli SearchScans(gomock.Any(), gomock.Any()). Times(1).Return(allScans, nil), m.scanDataStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { - return id == "cluster-1" || id == "cluster-2" - })).Times(len(sc.GetClusters())*numSnapshots).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), + SearchScans(gomock.Any(), gomock.Any()). + Times(len(sc.GetClusters())*numSnapshots). + Return(scans, nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -666,13 +666,9 @@ func (m *ManagerTestSuite) setupExpectCallsFromFailAllScans(sc *storage.Complian Times(numSnapshots).Return(nil), // GetClusterData m.scanDataStore.EXPECT(). - GetProfilesScanNamesByScanConfigAndCluster(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { - return id == "cluster-1" || id == "cluster-2" - })).Times(len(sc.GetClusters())*numSnapshots).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), - m.scanDataStore.EXPECT(). - GetProfileScanNamesByScanConfigClusterAndProfileRef(gomock.Any(), gomock.Eq(sc.GetId()), gomock.Cond[string](func(id string) bool { - return id == "cluster-1" || id == "cluster-2" - }), gomock.Any()).Times(2).Return(getScanToProfileMap(len(sc.GetProfiles())), nil), + SearchScans(gomock.Any(), gomock.Any()). + Times(len(sc.GetClusters())*numSnapshots). + Return(scans, nil), } expectedCalls = append(expectedCalls, calls...) return expectedCalls @@ -765,11 +761,13 @@ func handleWaitGroup(t *testing.T, wg *concurrency.WaitGroup, timeout time.Durat } } -func getScanToProfileMap(numProfiles int) map[string]string { - ret := make(map[string]string) +func getScans(numProfiles int) []*storage.ComplianceOperatorScanV2 { + var ret []*storage.ComplianceOperatorScanV2 for i := 0; i < numProfiles; i++ { name := fmt.Sprintf("profile-%d", i) - ret[name] = name + ret = append(ret, &storage.ComplianceOperatorScanV2{ + ScanName: name, + }) } return ret } diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator.go b/central/complianceoperator/v2/report/manager/results/results_aggregator.go index 9c2673a3af695..218af697abd3d 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator.go @@ -98,19 +98,19 @@ func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, totalFail: 0, totalMixed: 0, } - scans := clusterData.Scans + scanNames := clusterData.ScanNames if clusterData.FailedInfo != nil { - allScansSet := set.NewStringSet(scans...) + allScansSet := set.NewStringSet(scanNames...) failedScansSet := set.NewStringSet() - for _, scan := range clusterData.FailedInfo.Scans { + for _, scan := range clusterData.FailedInfo.FailedScans { failedScansSet.Add(scan.GetScanName()) } - scans = allScansSet.Difference(failedScansSet).AsSlice() + scanNames = allScansSet.Difference(failedScansSet).AsSlice() } scanConfigQuery := search.NewQueryBuilder(). AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). AddExactMatches(search.ClusterID, clusterID). - AddExactMatches(search.ComplianceOperatorScanName, scans...). + AddExactMatches(search.ComplianceOperatorScanName, scanNames...). ProtoQuery() err := g.checkResultsDS.WalkByQuery(ctx, scanConfigQuery, g.aggreateResults(ctx, clusterID, &ret, statuses)) return ret, statuses, err diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go index 7036247dd5ff2..fa662da009a9f 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator_test.go @@ -442,8 +442,7 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters clusterData[id] = &report.ClusterData{ ClusterId: id, ClusterName: id, - Profiles: profileNames, - Scans: profileNames, + ScanNames: profileNames, } } if numFailedClusters > 0 { @@ -455,10 +454,9 @@ func getRequest(ctx context.Context, numClusters, numProfiles, numFailedClusters ClusterName: id, Reasons: []string{"timeout"}, OperatorVersion: "v1.6.0", - Profiles: clusterData[id].Profiles, - Scans: func() []*storage.ComplianceOperatorScanV2 { + FailedScans: func() []*storage.ComplianceOperatorScanV2 { var scans []*storage.ComplianceOperatorScanV2 - for _, scanName := range clusterData[id].Scans { + for _, scanName := range clusterData[id].ScanNames { scans = append(scans, &storage.ComplianceOperatorScanV2{ ScanName: scanName, }) diff --git a/central/complianceoperator/v2/report/manager/watcher/validator.go b/central/complianceoperator/v2/report/manager/watcher/validator.go index 86df7cd0871b3..6c8db050a7ac5 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator.go @@ -26,7 +26,7 @@ func ValidateScanConfigResults(ctx context.Context, results *ScanConfigWatcherRe errList.AddError(errors.New(fmt.Sprintf("scan %s failed in cluster %s", scanResult.Scan.GetScanName(), failedClusterInfo.ClusterId))) if previousFailedInfo, ok := failedClusters[failedClusterInfo.ClusterId]; ok && !isInstallationError { previousFailedInfo.Reasons = append(previousFailedInfo.Reasons, failedClusterInfo.Reasons...) - previousFailedInfo.Scans = append(previousFailedInfo.Scans, failedClusterInfo.Scans...) + previousFailedInfo.FailedScans = append(previousFailedInfo.FailedScans, failedClusterInfo.FailedScans...) continue } failedClusters[failedClusterInfo.ClusterId] = failedClusterInfo @@ -68,7 +68,7 @@ func ValidateScanResults(ctx context.Context, results *ScanWatcherResults, integ if len(ret.Reasons) > 0 { return ret, true } - ret.Scans = []*storage.ComplianceOperatorScanV2{results.Scan} + ret.FailedScans = []*storage.ComplianceOperatorScanV2{results.Scan} if errors.Is(results.Error, ErrScanRemoved) { ret.Reasons = []string{fmt.Sprintf(report.SCAN_REMOVED_FMT, results.Scan.GetScanName())} return ret, false diff --git a/central/complianceoperator/v2/report/manager/watcher/validator_test.go b/central/complianceoperator/v2/report/manager/watcher/validator_test.go index ec570f51b5cfc..7ff8787cd1728 100644 --- a/central/complianceoperator/v2/report/manager/watcher/validator_test.go +++ b/central/complianceoperator/v2/report/manager/watcher/validator_test.go @@ -458,7 +458,7 @@ func getFailedClusters(idx, numFailedClusters, numMissingClusters, numScans int) var reasons []string for j := 0; j < numScans; j++ { reasons = append(reasons, report.INTERNAL_ERROR) - failedCluster.Scans = append(failedCluster.Scans, &storage.ComplianceOperatorScanV2{ + failedCluster.FailedScans = append(failedCluster.FailedScans, &storage.ComplianceOperatorScanV2{ ClusterId: id, }) } @@ -483,7 +483,7 @@ func newFailedCluster(clusterID, coVersion string, reasons []string, expectScan Reasons: reasons, } if expectScan { - ret.Scans = []*storage.ComplianceOperatorScanV2{ + ret.FailedScans = []*storage.ComplianceOperatorScanV2{ { ClusterId: clusterID, ScanName: scanName, @@ -501,10 +501,10 @@ func assertFailedCluster(t *testing.T, expected, actual *report.FailedCluster) { assert.Equal(t, expected.ClusterName, actual.ClusterName) assert.Equal(t, expected.OperatorVersion, actual.OperatorVersion) assert.Equal(t, expected.Reasons, actual.Reasons) - assert.Equal(t, len(expected.Scans), len(actual.Scans)) - for _, expectedScan := range expected.Scans { + assert.Equal(t, len(expected.FailedScans), len(actual.FailedScans)) + for _, expectedScan := range expected.FailedScans { found := false - for _, actualScan := range actual.Scans { + for _, actualScan := range actual.FailedScans { if proto.Equal(expectedScan, actualScan) { found = true break diff --git a/central/complianceoperator/v2/report/request.go b/central/complianceoperator/v2/report/request.go index c0253715eae65..4c92a8d028536 100644 --- a/central/complianceoperator/v2/report/request.go +++ b/central/complianceoperator/v2/report/request.go @@ -24,8 +24,7 @@ type Request struct { type ClusterData struct { ClusterId string ClusterName string - Profiles []string - Scans []string + ScanNames []string FailedInfo *FailedCluster } @@ -35,6 +34,5 @@ type FailedCluster struct { ClusterName string Reasons []string OperatorVersion string - Scans []*storage.ComplianceOperatorScanV2 - Profiles []string + FailedScans []*storage.ComplianceOperatorScanV2 } diff --git a/central/complianceoperator/v2/scans/datastore/datastore.go b/central/complianceoperator/v2/scans/datastore/datastore.go index 56a55af40c964..ef96ee5da4a3a 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore.go +++ b/central/complianceoperator/v2/scans/datastore/datastore.go @@ -31,16 +31,11 @@ type DataStore interface { // SearchScans returns the scans for the given query SearchScans(ctx context.Context, query *v1.Query) ([]*storage.ComplianceOperatorScanV2, error) - - GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) - - GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfig, clusterID string, profileRefs []string) (map[string]string, error) } // New returns an instance of DataStore. -func New(complianceScanStorage pgStore.Store, db postgres.DB) DataStore { +func New(complianceScanStorage pgStore.Store) DataStore { return &datastoreImpl{ - db: db, store: complianceScanStorage, } } @@ -48,5 +43,5 @@ func New(complianceScanStorage pgStore.Store, db postgres.DB) DataStore { // GetTestPostgresDataStore provides a datastore connected to postgres for testing purposes. func GetTestPostgresDataStore(_ testing.TB, pool postgres.DB) DataStore { store := pgStore.New(pool) - return New(store, pool) + return New(store) } diff --git a/central/complianceoperator/v2/scans/datastore/datastore_impl.go b/central/complianceoperator/v2/scans/datastore/datastore_impl.go index 2d2a2e5f6feac..cc821c245b263 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/scans/datastore/datastore_impl.go @@ -2,21 +2,15 @@ package datastore import ( "context" - "fmt" - "strings" - pgStore "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" + "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/postgres" - "github.com/stackrox/rox/pkg/postgres/schema" "github.com/stackrox/rox/pkg/search" - pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) type datastoreImpl struct { - db postgres.DB - store pgStore.Store + store postgres.Store } // GetScan retrieves the scan object from the database @@ -56,42 +50,3 @@ func (d *datastoreImpl) DeleteScanByCluster(ctx context.Context, clusterID strin func (d *datastoreImpl) SearchScans(ctx context.Context, query *v1.Query) ([]*storage.ComplianceOperatorScanV2, error) { return d.store.GetByQuery(ctx, query) } - -func (d *datastoreImpl) GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) { - return d.GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfigID, clusterID, []string{}) -} - -func (d *datastoreImpl) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfigID, clusterID string, profileRefs []string) (map[string]string, error) { - query := search.NewQueryBuilder(). - AddExactMatches(search.ClusterID, clusterID). - AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). - ProtoQuery() - if len(profileRefs) > 0 { - query = search.ConjunctionQuery( - search.NewQueryBuilder(). - AddExactMatches(search.ComplianceOperatorProfileRef, profileRefs...). - ProtoQuery(), - query) - } - type ScanProfiles struct { - ProfileName string `db:"compliance_profile_name"` - ScanName string `db:"compliance_scan_name"` - } - query.Selects = []*v1.QuerySelect{ - search.NewQuerySelect(search.ComplianceOperatorProfileName).Proto(), - search.NewQuerySelect(search.ComplianceOperatorScanName).Proto(), - } - results, err := pgSearch.RunSelectRequestForSchema[ScanProfiles](ctx, d.db, schema.ComplianceOperatorScanV2Schema, query) - if err != nil { - return nil, err - } - expandedProfileNames := make(map[string]string) - for _, result := range results { - name := result.ProfileName - if result.ProfileName != result.ScanName { - name = fmt.Sprintf("%s%s", result.ProfileName, strings.TrimPrefix(result.ScanName, result.ProfileName)) - } - expandedProfileNames[result.ScanName] = name - } - return expandedProfileNames, nil -} diff --git a/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go b/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go index b2392634970c7..65979409d3f73 100644 --- a/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go +++ b/central/complianceoperator/v2/scans/datastore/datastore_impl_test.go @@ -4,13 +4,8 @@ package datastore import ( "context" - "fmt" "testing" - profileStorage "github.com/stackrox/rox/central/complianceoperator/v2/profiles/datastore" - profileSearch "github.com/stackrox/rox/central/complianceoperator/v2/profiles/datastore/search" - profilePostgresStorage "github.com/stackrox/rox/central/complianceoperator/v2/profiles/store/postgres" - scanConfigStorage "github.com/stackrox/rox/central/complianceoperator/v2/scanconfigurations/datastore" scanStorage "github.com/stackrox/rox/central/complianceoperator/v2/scans/store/postgres" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/features" @@ -280,177 +275,10 @@ func (s *complianceScanDataStoreTestSuite) TestDeleteScan() { protoassert.Equal(s.T(), testScan2, retrievedObject) } -func (s *complianceScanDataStoreTestSuite) TestGetProfileScanNames() { - // Create Test DataStores for ScanConfigurations and Profiles - scanConfigDS := scanConfigStorage.GetTestPostgresDataStore(s.T(), s.db) - profilePostgres := profilePostgresStorage.New(s.db) - searcher := profileSearch.New(profilePostgres) - profileDS := profileStorage.GetTestPostgresDataStore(s.T(), s.db, searcher) - - profileName1 := "ocp4-cis" - profileName2 := "ocp4-cis-node" - - // Define Profiles 1 and 2 in Cluster 1 - profile1 := getTestProfile(profileName1, fixtureconsts.ComplianceProfileID1, testconsts.Cluster1) - profile2 := getTestProfile(profileName2, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) - - // Define Profiles 1 and 2 in Cluster 2 - profile3 := getTestProfile(profileName1, fixtureconsts.ComplianceProfileID1, testconsts.Cluster2) - profile4 := getTestProfile(profileName2, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) - - // Define ScanConfiguration - scanConfigName := "scanConfig1" - scanConfig := getTestScanConfig(scanConfigName, - fixtureconsts.ComplianceScanConfigID1, - []string{testconsts.Cluster1, testconsts.Cluster2}, - []string{profileName1, profileName2}) - - s.T().Cleanup(func() { - _, _ = scanConfigDS.DeleteScanConfiguration(s.hasWriteCtx, fixtureconsts.ComplianceScanConfigID1) - _ = profileDS.DeleteProfilesByCluster(s.hasWriteCtx, fixtureconsts.Cluster1) - _ = profileDS.DeleteProfilesByCluster(s.hasWriteCtx, fixtureconsts.Cluster2) - }) - - // Create Profiles 1 and 2 in Cluster 1 - s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile1)) - s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile2)) - - // Create Profiles 1 and 2 in Cluster 2 - s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile3)) - s.Require().NoError(profileDS.UpsertProfile(s.hasWriteCtx, profile4)) - - // Create ScanConfiguration - s.Require().NoError(scanConfigDS.UpsertScanConfiguration(s.hasWriteCtx, scanConfig)) - - // make sure we have nothing - ScanIDs, err := s.storage.GetIDs(s.hasReadCtx) - s.Require().NoError(err) - s.Require().Empty(ScanIDs) - - scanName1 := "ocp4-cis" - scanName2 := "ocp4-cis-node-worker" - scanName3 := "ocp4-cis-node-master" - - // Define Scan 1, 2, and 3 for Cluster 1 - testScan1 := getTestScanWithScanConfig(scanName1, scanConfigName, fixtureconsts.ComplianceProfileID1, testconsts.Cluster1) - testScan2 := getTestScanWithScanConfig(scanName2, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) - testScan3 := getTestScanWithScanConfig(scanName3, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster1) - - // Define Scan 1, 2, and 3 for Cluster 2 - testScan4 := getTestScanWithScanConfig(scanName1, scanConfigName, fixtureconsts.ComplianceProfileID1, testconsts.Cluster2) - testScan5 := getTestScanWithScanConfig(scanName2, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) - testScan6 := getTestScanWithScanConfig(scanName3, scanConfigName, fixtureconsts.ComplianceProfileID2, testconsts.Cluster2) - - s.T().Cleanup(func() { - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan1.GetId()) - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan2.GetId()) - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan3.GetId()) - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan4.GetId()) - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan5.GetId()) - _ = s.dataStore.DeleteScan(s.hasWriteCtx, testScan6.GetId()) - }) - - // Create Scans - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan1)) - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan2)) - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan3)) - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan4)) - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan5)) - s.Require().NoError(s.dataStore.UpsertScan(s.hasWriteCtx, testScan6)) - - s.Run("get scan to profiles from cluster 1", func() { - scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - - s.Assert().Contains(scanToProfileMap, scanName1) - s.Assert().Contains(scanToProfileMap, scanName2) - s.Assert().Contains(scanToProfileMap, scanName3) - - s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) - s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) - s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) - }) - - s.Run("get scan to profiles from cluster 2", func() { - scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster2) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - - s.Assert().Contains(scanToProfileMap, scanName1) - s.Assert().Contains(scanToProfileMap, scanName2) - s.Assert().Contains(scanToProfileMap, scanName3) - - s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) - s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) - s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) - }) - - s.Run("get empty scan to profiles (wrong cluster)", func() { - scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.WrongCluster) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - s.Assert().Len(scanToProfileMap, 0) - }) - - s.Run("get empty scan to profiles (wrong scan config)", func() { - scanToProfileMap, err := s.dataStore.GetProfilesScanNamesByScanConfigAndCluster(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID2, testconsts.Cluster1) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - s.Assert().Len(scanToProfileMap, 0) - }) - - s.Run("get scan to profiles with profile ref 1", func() { - scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID1}) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - - s.Assert().Contains(scanToProfileMap, scanName1) - s.Assert().NotContains(scanToProfileMap, scanName2) - s.Assert().NotContains(scanToProfileMap, scanName3) - - s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) - s.Assert().Len(scanToProfileMap[scanName2], 0) - s.Assert().Len(scanToProfileMap[scanName3], 0) - }) - - s.Run("get scan to profiles with profile ref 2", func() { - scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID2}) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - - s.Assert().NotContains(scanToProfileMap, scanName1) - s.Assert().Contains(scanToProfileMap, scanName2) - s.Assert().Contains(scanToProfileMap, scanName3) - - s.Assert().Len(scanToProfileMap[scanName1], 0) - s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) - s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) - }) - - s.Run("get scan to profiles with profile ref 1 and 2", func() { - scanToProfileMap, err := s.dataStore.GetProfileScanNamesByScanConfigClusterAndProfileRef(s.hasReadCtx, fixtureconsts.ComplianceScanConfigID1, testconsts.Cluster1, []string{fixtureconsts.ComplianceProfileID1, fixtureconsts.ComplianceProfileID2}) - s.Require().NoError(err) - s.Require().NotNil(scanToProfileMap) - - s.Assert().Contains(scanToProfileMap, scanName1) - s.Assert().Contains(scanToProfileMap, scanName2) - s.Assert().Contains(scanToProfileMap, scanName3) - - s.Assert().Equal(profileName1, scanToProfileMap[scanName1]) - s.Assert().Equal(fmt.Sprintf("%s-worker", profileName2), scanToProfileMap[scanName2]) - s.Assert().Equal(fmt.Sprintf("%s-master", profileName2), scanToProfileMap[scanName3]) - }) -} - func getTestScan(scanName string, profileID string, clusterID string) *storage.ComplianceOperatorScanV2 { - return getTestScanWithScanConfig(scanName, scanName, profileID, clusterID) -} - -func getTestScanWithScanConfig(scanName string, scanConfig string, profileID string, clusterID string) *storage.ComplianceOperatorScanV2 { return &storage.ComplianceOperatorScanV2{ Id: uuid.NewV4().String(), - ScanConfigName: scanConfig, + ScanConfigName: scanName, ScanName: scanName, ClusterId: clusterID, Errors: "", @@ -467,37 +295,3 @@ func getTestScanWithScanConfig(scanName string, scanConfig string, profileID str LastExecutedTime: protocompat.TimestampNow(), } } - -func getTestProfile(profileName string, profileRefID string, clusterID string) *storage.ComplianceOperatorProfileV2 { - return &storage.ComplianceOperatorProfileV2{ - Id: uuid.NewV4().String(), - Name: profileName, - ProfileRefId: profileRefID, - ClusterId: clusterID, - } -} - -func getTestScanConfig(scanConfigName string, scanConfigID string, clusterID []string, profiles []string) *storage.ComplianceOperatorScanConfigurationV2 { - return &storage.ComplianceOperatorScanConfigurationV2{ - Id: scanConfigID, - ScanConfigName: scanConfigName, - Profiles: func() []*storage.ComplianceOperatorScanConfigurationV2_ProfileName { - var ret []*storage.ComplianceOperatorScanConfigurationV2_ProfileName - for _, profile := range profiles { - ret = append(ret, &storage.ComplianceOperatorScanConfigurationV2_ProfileName{ - ProfileName: profile, - }) - } - return ret - }(), - Clusters: func() []*storage.ComplianceOperatorScanConfigurationV2_Cluster { - var ret []*storage.ComplianceOperatorScanConfigurationV2_Cluster - for _, cluster := range clusterID { - ret = append(ret, &storage.ComplianceOperatorScanConfigurationV2_Cluster{ - ClusterId: cluster, - }) - } - return ret - }(), - } -} diff --git a/central/complianceoperator/v2/scans/datastore/mocks/datastore.go b/central/complianceoperator/v2/scans/datastore/mocks/datastore.go index 9b4fabcb4567d..fab52feb0f8e7 100644 --- a/central/complianceoperator/v2/scans/datastore/mocks/datastore.go +++ b/central/complianceoperator/v2/scans/datastore/mocks/datastore.go @@ -70,36 +70,6 @@ func (mr *MockDataStoreMockRecorder) DeleteScanByCluster(ctx, clusterID any) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScanByCluster", reflect.TypeOf((*MockDataStore)(nil).DeleteScanByCluster), ctx, clusterID) } -// GetProfileScanNamesByScanConfigClusterAndProfileRef mocks base method. -func (m *MockDataStore) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx context.Context, scanConfig, clusterID string, profileRefs []string) (map[string]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProfileScanNamesByScanConfigClusterAndProfileRef", ctx, scanConfig, clusterID, profileRefs) - ret0, _ := ret[0].(map[string]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProfileScanNamesByScanConfigClusterAndProfileRef indicates an expected call of GetProfileScanNamesByScanConfigClusterAndProfileRef. -func (mr *MockDataStoreMockRecorder) GetProfileScanNamesByScanConfigClusterAndProfileRef(ctx, scanConfig, clusterID, profileRefs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProfileScanNamesByScanConfigClusterAndProfileRef", reflect.TypeOf((*MockDataStore)(nil).GetProfileScanNamesByScanConfigClusterAndProfileRef), ctx, scanConfig, clusterID, profileRefs) -} - -// GetProfilesScanNamesByScanConfigAndCluster mocks base method. -func (m *MockDataStore) GetProfilesScanNamesByScanConfigAndCluster(ctx context.Context, scanConfigID, clusterID string) (map[string]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProfilesScanNamesByScanConfigAndCluster", ctx, scanConfigID, clusterID) - ret0, _ := ret[0].(map[string]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProfilesScanNamesByScanConfigAndCluster indicates an expected call of GetProfilesScanNamesByScanConfigAndCluster. -func (mr *MockDataStoreMockRecorder) GetProfilesScanNamesByScanConfigAndCluster(ctx, scanConfigID, clusterID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProfilesScanNamesByScanConfigAndCluster", reflect.TypeOf((*MockDataStore)(nil).GetProfilesScanNamesByScanConfigAndCluster), ctx, scanConfigID, clusterID) -} - // GetScan mocks base method. func (m *MockDataStore) GetScan(ctx context.Context, id string) (*storage.ComplianceOperatorScanV2, bool, error) { m.ctrl.T.Helper() diff --git a/central/complianceoperator/v2/scans/datastore/singleton.go b/central/complianceoperator/v2/scans/datastore/singleton.go index 115c800036ab0..0766482003777 100644 --- a/central/complianceoperator/v2/scans/datastore/singleton.go +++ b/central/complianceoperator/v2/scans/datastore/singleton.go @@ -14,7 +14,7 @@ var ( ) func initialize() { - dataStore = New(pgStore.New(globaldb.GetPostgres()), globaldb.GetPostgres()) + dataStore = New(pgStore.New(globaldb.GetPostgres())) } // Singleton provides the interface for non-service external interaction. diff --git a/generated/storage/compliance_operator_v2.pb.go b/generated/storage/compliance_operator_v2.pb.go index db2ad58b19f2b..71ab962354648 100644 --- a/generated/storage/compliance_operator_v2.pb.go +++ b/generated/storage/compliance_operator_v2.pb.go @@ -2619,8 +2619,7 @@ type ComplianceOperatorReportSnapshotV2_FailedCluster struct { ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` Reasons []string `protobuf:"bytes,3,rep,name=reasons,proto3" json:"reasons,omitempty"` OperatorVersion string `protobuf:"bytes,4,opt,name=operator_version,json=operatorVersion,proto3" json:"operator_version,omitempty"` - Scans []string `protobuf:"bytes,5,rep,name=scans,proto3" json:"scans,omitempty"` - Profiles []string `protobuf:"bytes,6,rep,name=profiles,proto3" json:"profiles,omitempty"` + ScanNames []string `protobuf:"bytes,5,rep,name=scanNames,proto3" json:"scanNames,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2683,16 +2682,9 @@ func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetOperatorVersion() return "" } -func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetScans() []string { +func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetScanNames() []string { if x != nil { - return x.Scans - } - return nil -} - -func (x *ComplianceOperatorReportSnapshotV2_FailedCluster) GetProfiles() []string { - if x != nil { - return x.Profiles + return x.ScanNames } return nil } @@ -3076,7 +3068,7 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0foutdated_object\x18\x06 \x01(\tR\x0eoutdatedObject\x12)\n" + "\x10enforcement_type\x18\a \x01(\tR\x0fenforcementType\x12\x1d\n" + "\n" + - "cluster_id\x18\b \x01(\tR\tclusterId\"\xcf\x06\n" + + "cluster_id\x18\b \x01(\tR\tclusterId\"\xbb\x06\n" + "\"ComplianceOperatorReportSnapshotV2\x12\x1b\n" + "\treport_id\x18\x01 \x01(\tR\breportId\x122\n" + "\x15scan_configuration_id\x18\x02 \x01(\tR\x13scanConfigurationId\x12\x12\n" + @@ -3090,15 +3082,14 @@ const file_storage_compliance_operator_v2_proto_rawDesc = "" + "\x0ffailed_clusters\x18\t \x03(\v29.storage.ComplianceOperatorReportSnapshotV2.FailedClusterR\x0efailedClusters\x1an\n" + "\x04Scan\x12\x1e\n" + "\vscan_ref_id\x18\x01 \x01(\tR\tscanRefId\x12F\n" + - "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\xc8\x01\n" + + "\x11last_started_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x1a\xb4\x01\n" + "\rFailedCluster\x12\x1d\n" + "\n" + "cluster_id\x18\x01 \x01(\tR\tclusterId\x12!\n" + "\fcluster_name\x18\x02 \x01(\tR\vclusterName\x12\x18\n" + "\areasons\x18\x03 \x03(\tR\areasons\x12)\n" + - "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\x12\x14\n" + - "\x05scans\x18\x05 \x03(\tR\x05scans\x12\x1a\n" + - "\bprofiles\x18\x06 \x03(\tR\bprofiles\"\x96\x05\n" + + "\x10operator_version\x18\x04 \x01(\tR\x0foperatorVersion\x12\x1c\n" + + "\tscanNames\x18\x05 \x03(\tR\tscanNames\"\x96\x05\n" + "\x1cComplianceOperatorReportData\x12]\n" + "\x12scan_configuration\x18\x01 \x01(\v2..storage.ComplianceOperatorScanConfigurationV2R\x11scanConfiguration\x12Z\n" + "\x0ecluster_status\x18\x02 \x03(\v23.storage.ComplianceOperatorReportData.ClusterStatusR\rclusterStatus\x12H\n" + diff --git a/generated/storage/compliance_operator_v2_vtproto.pb.go b/generated/storage/compliance_operator_v2_vtproto.pb.go index 52cb17b86f6bb..fb39ebeda5e2c 100644 --- a/generated/storage/compliance_operator_v2_vtproto.pb.go +++ b/generated/storage/compliance_operator_v2_vtproto.pb.go @@ -664,15 +664,10 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) CloneVT() *Compliance copy(tmpContainer, rhs) r.Reasons = tmpContainer } - if rhs := m.Scans; rhs != nil { + if rhs := m.ScanNames; rhs != nil { tmpContainer := make([]string, len(rhs)) copy(tmpContainer, rhs) - r.Scans = tmpContainer - } - if rhs := m.Profiles; rhs != nil { - tmpContainer := make([]string, len(rhs)) - copy(tmpContainer, rhs) - r.Profiles = tmpContainer + r.ScanNames = tmpContainer } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -1870,20 +1865,11 @@ func (this *ComplianceOperatorReportSnapshotV2_FailedCluster) EqualVT(that *Comp if this.OperatorVersion != that.OperatorVersion { return false } - if len(this.Scans) != len(that.Scans) { - return false - } - for i, vx := range this.Scans { - vy := that.Scans[i] - if vx != vy { - return false - } - } - if len(this.Profiles) != len(that.Profiles) { + if len(this.ScanNames) != len(that.ScanNames) { return false } - for i, vx := range this.Profiles { - vy := that.Profiles[i] + for i, vx := range this.ScanNames { + vy := that.ScanNames[i] if vx != vy { return false } @@ -4127,20 +4113,11 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) MarshalToSizedBufferV i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Profiles) > 0 { - for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Profiles[iNdEx]) - copy(dAtA[i:], m.Profiles[iNdEx]) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Profiles[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Scans) > 0 { - for iNdEx := len(m.Scans) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Scans[iNdEx]) - copy(dAtA[i:], m.Scans[iNdEx]) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Scans[iNdEx]))) + if len(m.ScanNames) > 0 { + for iNdEx := len(m.ScanNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ScanNames[iNdEx]) + copy(dAtA[i:], m.ScanNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ScanNames[iNdEx]))) i-- dAtA[i] = 0x2a } @@ -5446,14 +5423,8 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if len(m.Scans) > 0 { - for _, s := range m.Scans { - l = len(s) - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - } - if len(m.Profiles) > 0 { - for _, s := range m.Profiles { + if len(m.ScanNames) > 0 { + for _, s := range m.ScanNames { l = len(s) n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } @@ -12623,7 +12594,7 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVT(dAtA []by iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scans", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScanNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12651,39 +12622,7 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVT(dAtA []by if postIndex > l { return io.ErrUnexpectedEOF } - m.Scans = append(m.Scans, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Profiles = append(m.Profiles, string(dAtA[iNdEx:postIndex])) + m.ScanNames = append(m.ScanNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -21303,43 +21242,7 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVTUnsafe(dAt iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scans", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var stringValue string - if intStringLen > 0 { - stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) - } - m.Scans = append(m.Scans, stringValue) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScanNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21371,7 +21274,7 @@ func (m *ComplianceOperatorReportSnapshotV2_FailedCluster) UnmarshalVTUnsafe(dAt if intStringLen > 0 { stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) } - m.Profiles = append(m.Profiles, stringValue) + m.ScanNames = append(m.ScanNames, stringValue) iNdEx = postIndex default: iNdEx = preIndex diff --git a/proto/storage/compliance_operator_v2.proto b/proto/storage/compliance_operator_v2.proto index 51ab1010f42ed..c93217c496a20 100644 --- a/proto/storage/compliance_operator_v2.proto +++ b/proto/storage/compliance_operator_v2.proto @@ -310,8 +310,7 @@ message ComplianceOperatorReportSnapshotV2 { string cluster_name = 2; repeated string reasons = 3; string operator_version = 4; - repeated string scans = 5; - repeated string profiles = 6; + repeated string scanNames = 5; } repeated FailedCluster failed_clusters = 9; From ee99884c06d2ed7d1733b51bb96847e203f3ead1 Mon Sep 17 00:00:00 2001 From: lvalerom Date: Tue, 3 Jun 2025 17:26:02 +0200 Subject: [PATCH 6/7] update protolock --- proto/storage/proto.lock | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 2ff9d9f207bea..ad1258ae5777f 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -4863,6 +4863,12 @@ "id": 4, "name": "operator_version", "type": "string" + }, + { + "id": 5, + "name": "scanNames", + "type": "string", + "is_repeated": true } ] } From a7435162568b1ebb406b3988e26378dc8be913a6 Mon Sep 17 00:00:00 2001 From: lvalerom Date: Tue, 3 Jun 2025 19:01:10 +0200 Subject: [PATCH 7/7] review --- .../v2/report/manager/format/formatter.go | 13 +-- .../report/manager/format/formatter_test.go | 109 ++++++++++++------ .../v2/report/manager/helpers/clusterdata.go | 15 +-- .../manager/helpers/clusterdata_test.go | 5 +- .../manager/results/results_aggregator.go | 8 +- 5 files changed, 88 insertions(+), 62 deletions(-) diff --git a/central/complianceoperator/v2/report/manager/format/formatter.go b/central/complianceoperator/v2/report/manager/format/formatter.go index 3ee399dab14fb..72477ebc5e39a 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter.go +++ b/central/complianceoperator/v2/report/manager/format/formatter.go @@ -135,21 +135,12 @@ func (f *FormatterImpl) createFailedClusterFileInZip(zipWriter ZipWriter, filena } csvWriter := f.newCSVWriter(failedClusterCSVHeader, true) for _, reason := range failedCluster.Reasons { - csvWriter.AddValue(generateFailRecord(failedCluster.ClusterId, failedCluster.ClusterName, reason, failedCluster.OperatorVersion)) + // The order in the slice needs to match the order defined in `failedClusterCSVHeader` + csvWriter.AddValue([]string{failedCluster.ClusterId, failedCluster.ClusterName, reason, failedCluster.OperatorVersion}) } return csvWriter.WriteCSV(w) } -func generateFailRecord(clusterID, clusterName, reason, coVersion string) []string { - // The order in the slice needs to match the order defined in `failedClusterCSVHeader` - return []string{ - clusterID, - clusterName, - reason, - coVersion, - } -} - func getFileName(format string, clusterName string, timestamp *timestamppb.Timestamp) string { year, month, day := timestamp.AsTime().Date() return fmt.Sprintf(format, fmt.Sprintf("%s_%d-%d-%d", clusterName, year, month, day)) diff --git a/central/complianceoperator/v2/report/manager/format/formatter_test.go b/central/complianceoperator/v2/report/manager/format/formatter_test.go index ac965be3f5592..aead83f4beab9 100644 --- a/central/complianceoperator/v2/report/manager/format/formatter_test.go +++ b/central/complianceoperator/v2/report/manager/format/formatter_test.go @@ -71,17 +71,27 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) - s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { - return target == successfulFileName || target == failedFileName - })).Times(2).Return(nil, nil) - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) - })).Times(3) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) - s.zipWriter.EXPECT().Close().Times(1).Return(nil) + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(1), + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) buf, err := s.formatter.FormatCSVReport(getFakeReportDataWithFailedCluster()) s.Require().NoError(err) @@ -97,17 +107,27 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithFailedCluste successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) - s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { - return target == successfulFileName || target == failedFileName - })).Times(2).Return(nil, nil) - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) - })).Times(3) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil) - s.zipWriter.EXPECT().Close().Times(1).Return(nil) + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(1), + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(2).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().NoError(err) @@ -134,18 +154,33 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWithPartialFaile successfulFileName := getFileName(successfulClusterFmt, clusterData[clusterID1].ClusterName, timestamp) failedFileName := getFileName(failedClusterFmt, clusterData[clusterID2].ClusterName, timestamp) - s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { - return target == successfulFileName || target == failedFileName || target == partialSuccessFileName - })).Times(3).Return(nil, nil) - s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { - failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) || - compareStringSlice(s.T(), target, generateRecord(results[clusterID2][0])) - })).Times(4) - s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(3).Return(nil) - s.zipWriter.EXPECT().Close().Times(1).Return(nil) + calls := []any{ + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == successfulFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == failedFileName + })).Times(1).Return(nil, nil), + s.zipWriter.EXPECT().Create(gomock.Cond[string](func(target string) bool { + return target == partialSuccessFileName + })).Times(1).Return(nil, nil), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + failedInfo := clusterData[clusterID2].FailedInfo + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][0])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID1][1])) + })).Times(1), + s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { + return compareStringSlice(s.T(), target, generateRecord(results[clusterID2][0])) + })).Times(1), + s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(3).Return(nil), + s.zipWriter.EXPECT().Close().Times(1).Return(nil), + } + gomock.InAnyOrder(calls) buf, err := s.formatter.FormatCSVReport(results, clusterData) s.Require().NoError(err) @@ -200,7 +235,7 @@ func (s *ComplianceReportingFormatterSuite) Test_FormatCSVReportWriteError() { s.zipWriter.EXPECT().Create(failedFileName).Times(1).Return(nil, nil) s.csvWriter.EXPECT().AddValue(gomock.Cond[csv.Value](func(target csv.Value) bool { failedInfo := clusterData[clusterID2].FailedInfo - return compareStringSlice(s.T(), target, generateFailRecord(failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion)) + return compareStringSlice(s.T(), target, []string{failedInfo.ClusterId, failedInfo.ClusterName, failedInfo.Reasons[0], failedInfo.OperatorVersion}) })).Times(1) s.csvWriter.EXPECT().WriteCSV(gomock.Any()).Times(1).Return(errors.New("error")) s.zipWriter.EXPECT().Close().Times(1).Return(nil) @@ -344,12 +379,14 @@ func (m *emptyValueMatcher) String() string { return m.error } -func compareStringSlice(_ *testing.T, actual []string, expected []string) bool { +func compareStringSlice(t *testing.T, actual []string, expected []string) bool { if len(actual) != len(expected) { + t.Logf("Expected slice %v but got %v", expected, actual) return false } for i := 0; i < len(actual); i++ { if strings.Compare(actual[i], expected[i]) != 0 { + t.Logf("Expected slice %v but got %v", expected, actual) return false } } diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go index 4d53fb0b9f41b..097a87114c8a0 100644 --- a/central/complianceoperator/v2/report/manager/helpers/clusterdata.go +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata.go @@ -46,15 +46,16 @@ func GetClusterData(ctx context.Context, reportData *storage.ComplianceOperatorR if err != nil { return nil, err } - if failedClusters == nil { - clusterData[cluster.GetClusterId()] = data + clusterData[cluster.GetClusterId()] = data + } + for failedClusterId, failedCluster := range failedClusters { + cluster, found := clusterData[failedClusterId] + if !found { continue } - if failedCluster, ok := failedClusters[cluster.GetClusterId()]; ok { - failedCluster.ClusterName = cluster.GetClusterName() - data.FailedInfo = failedCluster - } - clusterData[cluster.GetClusterId()] = data + + failedCluster.ClusterName = cluster.ClusterName + cluster.FailedInfo = failedCluster } return clusterData, nil } diff --git a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go index 70a5957adf7b2..d284c400bffe0 100644 --- a/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go +++ b/central/complianceoperator/v2/report/manager/helpers/clusterdata_test.go @@ -243,10 +243,7 @@ func assertClusterData(t *testing.T, expected map[string]*report.ClusterData, ac require.True(t, ok) assert.Equal(t, expectedCluster.ClusterId, actualCluster.ClusterId) assert.Equal(t, expectedCluster.ClusterName, actualCluster.ClusterName) - assert.Len(t, actualCluster.ScanNames, len(expectedCluster.ScanNames)) - for _, scan := range expectedCluster.ScanNames { - assert.Contains(t, actualCluster.ScanNames, scan) - } + assert.ElementsMatch(t, expectedCluster.ScanNames, actualCluster.ScanNames) if expectedCluster.FailedInfo != nil { require.NotNil(t, actualCluster.FailedInfo) assert.Equal(t, expectedCluster.FailedInfo.ClusterId, actualCluster.FailedInfo.ClusterId) diff --git a/central/complianceoperator/v2/report/manager/results/results_aggregator.go b/central/complianceoperator/v2/report/manager/results/results_aggregator.go index 218af697abd3d..5fab32b0be997 100644 --- a/central/complianceoperator/v2/report/manager/results/results_aggregator.go +++ b/central/complianceoperator/v2/report/manager/results/results_aggregator.go @@ -98,19 +98,19 @@ func (g *Aggregator) getReportDataForCluster(ctx context.Context, scanConfigID, totalFail: 0, totalMixed: 0, } - scanNames := clusterData.ScanNames + successfulScanNames := clusterData.ScanNames if clusterData.FailedInfo != nil { - allScansSet := set.NewStringSet(scanNames...) + allScansSet := set.NewStringSet(successfulScanNames...) failedScansSet := set.NewStringSet() for _, scan := range clusterData.FailedInfo.FailedScans { failedScansSet.Add(scan.GetScanName()) } - scanNames = allScansSet.Difference(failedScansSet).AsSlice() + successfulScanNames = allScansSet.Difference(failedScansSet).AsSlice() } scanConfigQuery := search.NewQueryBuilder(). AddExactMatches(search.ComplianceOperatorScanConfig, scanConfigID). AddExactMatches(search.ClusterID, clusterID). - AddExactMatches(search.ComplianceOperatorScanName, scanNames...). + AddExactMatches(search.ComplianceOperatorScanName, successfulScanNames...). ProtoQuery() err := g.checkResultsDS.WalkByQuery(ctx, scanConfigQuery, g.aggreateResults(ctx, clusterID, &ret, statuses)) return ret, statuses, err