diff --git a/central/pod/datastore/datastore_impl_real_test.go b/central/pod/datastore/datastore_impl_real_test.go index 63f444079c326..101424dbd2ece 100644 --- a/central/pod/datastore/datastore_impl_real_test.go +++ b/central/pod/datastore/datastore_impl_real_test.go @@ -60,7 +60,7 @@ func (s *PodDatastoreSuite) SetupTest() { indicatorStorage := processIndicatorStorage.New(s.postgres.DB) - s.indicatorDataStore = processIndicatorDataStore.New( + s.indicatorDataStore = processIndicatorDataStore.New(s.postgres.DB, indicatorStorage, plopStorage, nil) s.plopDS = plopDataStore.New(plopStorage, s.indicatorDataStore, s.postgres.DB) diff --git a/central/processbaseline/baselineutils.go b/central/processbaseline/baselineutils.go index a291fcad50ded..3939d1813124a 100644 --- a/central/processbaseline/baselineutils.go +++ b/central/processbaseline/baselineutils.go @@ -4,6 +4,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/protoutils" @@ -78,6 +79,26 @@ func IsStartupProcess(process *storage.ProcessIndicator) bool { if process.ContainerStartTime == nil { return false } + // TODO(ROX-31107): Determine if nil SignalTime should be considered startup task. By this logic it is. durationBetweenProcessAndContainerStart := protoutils.Sub(process.GetSignal().GetTime(), process.GetContainerStartTime()) return durationBetweenProcessAndContainerStart < ContainerStartupDuration } + +// IsStartupProcessView determines if the process is a startup process +// A process is considered a startup process if it happens within the first ContainerStartupDuration and was not scraped +// but instead pulled from exec +func IsStartupProcessView(process *views.ProcessIndicatorRiskView) bool { + if process.ContainerStartTime == nil { + return false + } + // TODO(ROX-31107): Determine if nil SignalTime should be considered startup task. By this logic it is. + durationBetweenProcessAndContainerStart := protoutils.Sub(protocompat.ConvertTimeToTimestampOrNil(process.SignalTime), + protocompat.ConvertTimeToTimestampOrNil(process.ContainerStartTime)) + return durationBetweenProcessAndContainerStart < ContainerStartupDuration +} + +// BaselineItemFromProcessView returns what we baseline for a given process. +// It exists to make sure that we're using the same thing in every place (name vs execfilepath). +func BaselineItemFromProcessView(process *views.ProcessIndicatorRiskView) string { + return process.ExecFilePath +} diff --git a/central/processbaseline/evaluator/evaluator.go b/central/processbaseline/evaluator/evaluator.go index b0d75cccd8a97..e5b6537158894 100644 --- a/central/processbaseline/evaluator/evaluator.go +++ b/central/processbaseline/evaluator/evaluator.go @@ -4,6 +4,7 @@ import ( baselinesStore "github.com/stackrox/rox/central/processbaseline/datastore" baselineResultsStore "github.com/stackrox/rox/central/processbaselineresults/datastore" indicatorsStore "github.com/stackrox/rox/central/processindicator/datastore" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/generated/storage" ) @@ -11,7 +12,7 @@ import ( // //go:generate mockgen-wrapper type Evaluator interface { - EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) (violatingProcesses []*storage.ProcessIndicator, err error) + EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) (violatingProcesses []*views.ProcessIndicatorRiskView, err error) } // New returns a new evaluator. diff --git a/central/processbaseline/evaluator/evaluator_impl.go b/central/processbaseline/evaluator/evaluator_impl.go index a0f65188c2dcf..4230ed62a4f20 100644 --- a/central/processbaseline/evaluator/evaluator_impl.go +++ b/central/processbaseline/evaluator/evaluator_impl.go @@ -8,8 +8,8 @@ import ( baselinesStore "github.com/stackrox/rox/central/processbaseline/datastore" baselineResultsStore "github.com/stackrox/rox/central/processbaselineresults/datastore" indicatorsStore "github.com/stackrox/rox/central/processindicator/datastore" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/generated/storage" - processBaselinePkg "github.com/stackrox/rox/pkg/processbaseline" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" @@ -54,7 +54,7 @@ func (e *evaluator) persistResults(ctx context.Context, deployment *storage.Depl return e.baselineResults.UpsertBaselineResults(ctx, results) } -func (e *evaluator) EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) (violatingProcesses []*storage.ProcessIndicator, err error) { +func (e *evaluator) EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) (violatingProcesses []*views.ProcessIndicatorRiskView, err error) { containerNameToBaselinedProcesses := make(map[string]*set.StringSet) containerNameToBaselineResults := make(map[string]*storage.ContainerNameAndBaselineStatus) @@ -86,29 +86,29 @@ func (e *evaluator) EvaluateBaselinesAndPersistResult(deployment *storage.Deploy } } - var processes []*storage.ProcessIndicator + var processes []*views.ProcessIndicatorRiskView if hasAtLeastOneLockedBaseline { - processes, err = e.indicators.SearchRawProcessIndicators(evaluatorCtx, search.NewQueryBuilder().AddExactMatches(search.DeploymentID, deployment.GetId()).ProtoQuery()) + processes, err = e.indicators.GetProcessIndicatorsRiskView(evaluatorCtx, search.NewQueryBuilder().AddExactMatches(search.DeploymentID, deployment.GetId()).ProtoQuery()) if err != nil { return nil, errors.Wrapf(err, "searching process indicators for deployment %s/%s/%s", deployment.GetClusterName(), deployment.GetNamespace(), deployment.GetName()) } } for _, process := range processes { - processSet, exists := containerNameToBaselinedProcesses[process.GetContainerName()] + processSet, exists := containerNameToBaselinedProcesses[process.ContainerName] // If no explicit baseline, then all processes are valid. if !exists { continue } - baselineItem := processBaselinePkg.BaselineItemFromProcess(process) + baselineItem := processbaseline.BaselineItemFromProcessView(process) if baselineItem == "" { continue } - if processbaseline.IsStartupProcess(process) { + if processbaseline.IsStartupProcessView(process) { continue } - if !processSet.Contains(processBaselinePkg.BaselineItemFromProcess(process)) { + if !processSet.Contains(processbaseline.BaselineItemFromProcessView(process)) { violatingProcesses = append(violatingProcesses, process) - containerNameToBaselineResults[process.GetContainerName()].AnomalousProcessesExecuted = true + containerNameToBaselineResults[process.ContainerName].AnomalousProcessesExecuted = true } } diff --git a/central/processbaseline/evaluator/evaluator_integration_test.go b/central/processbaseline/evaluator/evaluator_integration_test.go new file mode 100644 index 0000000000000..adae50aa6399d --- /dev/null +++ b/central/processbaseline/evaluator/evaluator_integration_test.go @@ -0,0 +1,910 @@ +//go:build sql_integration + +package evaluator + +import ( + "context" + "testing" + "time" + + baselineDatastore "github.com/stackrox/rox/central/processbaseline/datastore" + resultDatastore "github.com/stackrox/rox/central/processbaselineresults/datastore" + indicatorDatastore "github.com/stackrox/rox/central/processindicator/datastore" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/fixtures" + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/postgres" + "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/protoconv" + "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/sac/resources" + "github.com/stackrox/rox/pkg/search" + "github.com/stackrox/rox/pkg/uuid" + "github.com/stretchr/testify/suite" +) + +var log = logging.LoggerForModule() + +func TestProcessBaselineEvaluatorIntegration(t *testing.T) { + suite.Run(t, new(ProcessBaselineEvaluatorIntegrationTestSuite)) +} + +type ProcessBaselineEvaluatorIntegrationTestSuite struct { + suite.Suite + + pool postgres.DB + + baselinesDatastore baselineDatastore.DataStore + resultsDatastore resultDatastore.DataStore + indicatorsDatastore indicatorDatastore.DataStore + + evaluator Evaluator + + ctx context.Context +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) SetupSuite() { + pgtestbase := pgtest.ForT(suite.T()) + suite.Require().NotNil(pgtestbase) + suite.pool = pgtestbase.DB + + // Create real datastores + suite.baselinesDatastore = baselineDatastore.GetTestPostgresDataStore(suite.T(), suite.pool) + suite.resultsDatastore = resultDatastore.GetTestPostgresDataStore(suite.T(), suite.pool) + suite.indicatorsDatastore = indicatorDatastore.GetTestPostgresDataStore(suite.T(), suite.pool) + + suite.evaluator = New(suite.resultsDatastore, suite.baselinesDatastore, suite.indicatorsDatastore) + + suite.ctx = sac.WithGlobalAccessScopeChecker(context.Background(), + sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), + sac.ResourceScopeKeys(resources.DeploymentExtension), + ), + ) +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TearDownSuite() { + if suite.indicatorsDatastore != nil { + suite.indicatorsDatastore.Stop() + } + if suite.pool != nil { + suite.pool.Close() + } +} + +// addLockedBaseline adds a baseline and locks it using UserLock +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) addLockedBaseline(baseline *storage.ProcessBaseline) { + _, err := suite.baselinesDatastore.AddProcessBaseline(suite.ctx, baseline) + suite.NoError(err) + + // If we want it locked, use UserLockProcessBaseline which locks it immediately + if baseline.UserLockedTimestamp != nil { + _, err = suite.baselinesDatastore.UserLockProcessBaseline(suite.ctx, baseline.Key, true) + suite.NoError(err) + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestNoProcessBaseline() { + deployment := fixtures.GetDeployment() + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Empty(results) + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Equal(deployment.GetId(), persistedResult.GetDeploymentId()) + suite.Equal(deployment.GetClusterId(), persistedResult.GetClusterId()) + suite.Equal(deployment.GetNamespace(), persistedResult.GetNamespace()) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + for _, status := range persistedResult.GetBaselineStatuses() { + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, status.GetBaselineStatus()) + suite.False(status.GetAnomalousProcessesExecuted()) + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestProcessBaselineExistsButNotLocked() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName := deployment.GetContainers()[0].GetName() + + // Create an unlocked baseline + baseline := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + Elements: []*storage.BaselineElement{}, + } + _, err := suite.baselinesDatastore.AddProcessBaseline(suite.ctx, baseline) + suite.NoError(err) + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Empty(results) + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + for _, status := range persistedResult.GetBaselineStatuses() { + if status.GetContainerName() == containerName { + suite.Equal(storage.ContainerNameAndBaselineStatus_UNLOCKED, status.GetBaselineStatus()) + } else { + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, status.GetBaselineStatus()) + } + suite.False(status.GetAnomalousProcessesExecuted()) + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestLockedProcessBaselineAllProcessesInBaseline() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName := deployment.GetContainers()[0].GetName() + + // Create a locked baseline with processes + baseline := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: fixtures.MakeBaselineElements("/bin/apt-get", "/unrelated"), + } + suite.addLockedBaseline(baseline) + + // Add a process indicator that is in the baseline + var err error + processIndicator := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "apt-get", + Args: "install nmap", + ExecFilePath: "/bin/apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator) + suite.NoError(err) + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Empty(results) + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + log.Infof("SHREWS -- %v", persistedResult) + + for _, status := range persistedResult.GetBaselineStatuses() { + // We only locked the first container + if status.GetContainerName() == containerName { + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, status.GetBaselineStatus()) + suite.False(status.GetAnomalousProcessesExecuted()) + } else { + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, status.GetBaselineStatus()) + suite.False(status.GetAnomalousProcessesExecuted()) + } + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestLockedProcessBaselineOneNotInBaselineProcess() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName := deployment.GetContainers()[0].GetName() + + // Create a locked baseline without the process we'll add + baseline := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline) + + // Add a process indicator that is NOT in the baseline + var err error + processIndicator := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "apt-get", + Args: "install nmap", + ExecFilePath: "apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator) + suite.NoError(err) + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 1) + suite.Equal("apt-get", results[0].ExecFilePath) + suite.Equal("install nmap", results[0].SignalArgs) + suite.Equal(containerName, results[0].ContainerName) + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + // Check first container has anomalous process, second doesn't + for _, status := range persistedResult.GetBaselineStatuses() { + if status.GetContainerName() == containerName { + suite.True(status.GetAnomalousProcessesExecuted()) + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, status.GetBaselineStatus()) + } else { + suite.False(status.GetAnomalousProcessesExecuted()) + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, status.GetBaselineStatus()) + } + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestLockedProcessBaselineTwoNotInBaselineProcesses() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName := deployment.GetContainers()[1].GetName() + + // Create a locked baseline without the processes we'll add + baseline := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline) + + // Add two process indicators that are NOT in the baseline + var err error + processIndicator1 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "apt-get", + Args: "install nmap", + ExecFilePath: "apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + processIndicator2 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "curl", + Args: "badssl.com", + ExecFilePath: "curl", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator1, processIndicator2) + suite.NoError(err) + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 2) + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + // Check second container has anomalous processes, first doesn't + for _, status := range persistedResult.GetBaselineStatuses() { + if status.GetContainerName() == containerName { + suite.True(status.GetAnomalousProcessesExecuted()) + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, status.GetBaselineStatus()) + } else { + suite.False(status.GetAnomalousProcessesExecuted()) + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, status.GetBaselineStatus()) + } + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestLockedProcessBaselineTwoContainersDifferentProcesses() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName1 := deployment.GetContainers()[0].GetName() + containerName2 := deployment.GetContainers()[1].GetName() + + // Create locked baselines for both containers + baseline1 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: fixtures.MakeBaselineElements("/bin/apt-get"), + } + suite.addLockedBaseline(baseline1) + + baseline2 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline2) + + // Add process indicators: one not in baseline for container 0, one in baseline, one not in baseline for container 1 + var err error + processIndicator1 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "not-apt-get", + Args: "install nmap", + ExecFilePath: "/bin/not-apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + processIndicator2 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "apt-get", + Args: "install nmap", + ExecFilePath: "/bin/apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + processIndicator3 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "curl", + Args: "badssl.com", + ExecFilePath: "/bin/curl", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator1, processIndicator2, processIndicator3) + suite.NoError(err) + + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 2) + + // Check that we got the right violations + paths := []string{results[0].ExecFilePath, results[1].ExecFilePath} + suite.Contains(paths, "/bin/not-apt-get") + suite.Contains(paths, "/bin/curl") + + // Verify the result was persisted + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + // Both containers have anomalous processes + for _, status := range persistedResult.GetBaselineStatuses() { + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, status.GetBaselineStatus()) + suite.True(status.GetAnomalousProcessesExecuted()) + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestResultAlreadyExistsNoUpdate() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName1 := deployment.GetContainers()[0].GetName() + containerName2 := deployment.GetContainers()[1].GetName() + + // Create locked baselines + baseline1 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: fixtures.MakeBaselineElements("/bin/apt-get"), + } + suite.addLockedBaseline(baseline1) + + baseline2 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline2) + + // Add process indicators + var err error + processIndicator1 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "not-apt-get", + Args: "install nmap", + ExecFilePath: "/bin/not-apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + processIndicator2 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "curl", + Args: "badssl.com", + ExecFilePath: "/bin/curl", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator1, processIndicator2) + suite.NoError(err) + + // Create the existing result that matches what we expect + existingResult := &storage.ProcessBaselineResults{ + DeploymentId: deployment.GetId(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + BaselineStatuses: []*storage.ContainerNameAndBaselineStatus{ + { + ContainerName: containerName2, + BaselineStatus: storage.ContainerNameAndBaselineStatus_LOCKED, + AnomalousProcessesExecuted: true, + }, + { + ContainerName: containerName1, + BaselineStatus: storage.ContainerNameAndBaselineStatus_LOCKED, + AnomalousProcessesExecuted: true, + }, + }, + } + err = suite.resultsDatastore.UpsertBaselineResults(suite.ctx, existingResult) + suite.NoError(err) + + // Get initial timestamp + initialResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(initialResult) + + // Run the evaluator + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 2) + + // The result should still be the same (no update needed) + // Note: we can't easily test that UpsertBaselineResults was NOT called since we're using real datastores + // but we can verify the result is still correct + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestResultAlreadyExistsNeedsUpdate() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName1 := deployment.GetContainers()[0].GetName() + containerName2 := deployment.GetContainers()[1].GetName() + + // Create locked baselines + baseline1 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: fixtures.MakeBaselineElements("/bin/apt-get"), + } + suite.addLockedBaseline(baseline1) + + baseline2 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline2) + + // Add process indicators + var err error + processIndicator1 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "not-apt-get", + Args: "install nmap", + ExecFilePath: "/bin/not-apt-get", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + processIndicator2 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "curl", + Args: "badssl.com", + ExecFilePath: "/bin/curl", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator1, processIndicator2) + suite.NoError(err) + + // Create an existing result that needs update (UNLOCKED instead of LOCKED for container 1) + existingResult := &storage.ProcessBaselineResults{ + DeploymentId: deployment.GetId(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + BaselineStatuses: []*storage.ContainerNameAndBaselineStatus{ + { + ContainerName: containerName1, + BaselineStatus: storage.ContainerNameAndBaselineStatus_UNLOCKED, // Different from expected + AnomalousProcessesExecuted: true, + }, + { + ContainerName: containerName2, + BaselineStatus: storage.ContainerNameAndBaselineStatus_LOCKED, + AnomalousProcessesExecuted: true, + }, + }, + } + err = suite.resultsDatastore.UpsertBaselineResults(suite.ctx, existingResult) + suite.NoError(err) + + // Run the evaluator + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 2) + + // Verify the result was updated + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.NotNil(persistedResult) + suite.Len(persistedResult.GetBaselineStatuses(), 2) + + // Check that all statuses are now LOCKED + for _, status := range persistedResult.GetBaselineStatuses() { + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, status.GetBaselineStatus()) + suite.True(status.GetAnomalousProcessesExecuted()) + } +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestComplexWorkflow() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName := deployment.GetContainers()[0].GetName() + + // Start with no baseline - should get NOT_GENERATED + results, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Empty(results) + + persistedResult, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.Equal(storage.ContainerNameAndBaselineStatus_NOT_GENERATED, persistedResult.GetBaselineStatuses()[0].GetBaselineStatus()) + + // Create a locked baseline + baseline := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment.GetId(), + ContainerName: deployment.GetContainers()[0].GetName(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: fixtures.MakeBaselineElements("/bin/allowed"), + } + suite.addLockedBaseline(baseline) + + // Add an anomalous process + processIndicator := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "malicious", + Args: "badargs", + ExecFilePath: "/bin/malicious", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator) + suite.NoError(err) + + // Should now detect the anomalous process + results, err = suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Len(results, 1) + suite.Equal("/bin/malicious", results[0].ExecFilePath) + + persistedResult, err = suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, persistedResult.GetBaselineStatuses()[0].GetBaselineStatus()) + suite.True(persistedResult.GetBaselineStatuses()[0].GetAnomalousProcessesExecuted()) + + // Remove the anomalous process + err = suite.indicatorsDatastore.RemoveProcessIndicators(suite.ctx, []string{processIndicator.GetId()}) + suite.NoError(err) + + // Should now show no anomalous processes + results, err = suite.evaluator.EvaluateBaselinesAndPersistResult(deployment) + suite.NoError(err) + suite.Empty(results) + + persistedResult, err = suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment.GetId()) + suite.NoError(err) + suite.Equal(storage.ContainerNameAndBaselineStatus_LOCKED, persistedResult.GetBaselineStatuses()[0].GetBaselineStatus()) + suite.False(persistedResult.GetBaselineStatuses()[0].GetAnomalousProcessesExecuted()) +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestQueryProcessIndicators() { + deployment := fixtures.GetDeployment() + deployment.Id = uuid.NewV4().String() + containerName1 := deployment.GetContainers()[0].GetName() + containerName2 := deployment.GetContainers()[1].GetName() + + // Add multiple process indicators for the deployment + indicators := []*storage.ProcessIndicator{ + { + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName1, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "process1", + Args: "args1", + ExecFilePath: "/bin/process1", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + }, + { + Id: uuid.NewV4().String(), + DeploymentId: deployment.GetId(), + ContainerName: containerName2, + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment.GetClusterId(), + Namespace: deployment.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "process2", + Args: "args2", + ExecFilePath: "/bin/process2", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + }, + } + + err := suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, indicators...) + suite.NoError(err) + + // Query for indicators by deployment ID + query := search.NewQueryBuilder().AddExactMatches(search.DeploymentID, deployment.GetId()).ProtoQuery() + riskViews, err := suite.indicatorsDatastore.GetProcessIndicatorsRiskView(suite.ctx, query) + suite.NoError(err) + suite.Len(riskViews, 2) + + // Verify the risk views have the expected data + paths := []string{riskViews[0].ExecFilePath, riskViews[1].ExecFilePath} + suite.Contains(paths, "/bin/process1") + suite.Contains(paths, "/bin/process2") +} + +func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestMultipleDeploymentsIsolation() { + deployment1 := fixtures.GetDeployment() + deployment1.Id = uuid.NewV4().String() + deployment2 := fixtures.GetDeployment() + deployment2.Id = uuid.NewV4().String() + + // Create baselines for both deployments + baseline1 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment1.GetId(), + ContainerName: deployment1.GetContainers()[0].GetName(), + ClusterId: deployment1.GetClusterId(), + Namespace: deployment1.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline1) + + baseline2 := &storage.ProcessBaseline{ + Key: &storage.ProcessBaselineKey{ + DeploymentId: deployment2.GetId(), + ContainerName: deployment2.GetContainers()[0].GetName(), + ClusterId: deployment2.GetClusterId(), + Namespace: deployment2.GetNamespace(), + }, + UserLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), + Elements: []*storage.BaselineElement{}, + } + suite.addLockedBaseline(baseline2) + + // Add process indicators for deployment1 only + var err error + processIndicator1 := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deployment1.GetId(), + ContainerName: deployment1.GetContainers()[0].GetName(), + PodId: uuid.NewV4().String(), + PodUid: uuid.NewV4().String(), + ClusterId: deployment1.GetClusterId(), + Namespace: deployment1.GetNamespace(), + Signal: &storage.ProcessSignal{ + Name: "malicious", + Args: "badargs", + ExecFilePath: "/bin/malicious", + Time: protoconv.ConvertTimeToTimestamp(time.Now()), + ContainerId: uuid.NewV4().String(), + Uid: 1000, + }, + ContainerStartTime: protoconv.ConvertTimeToTimestamp(time.Now().Add(-2 * time.Hour)), + } + err = suite.indicatorsDatastore.AddProcessIndicators(suite.ctx, processIndicator1) + suite.NoError(err) + + // Evaluate deployment1 - should have violations + results1, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment1) + suite.NoError(err) + suite.Len(results1, 1) + + // Evaluate deployment2 - should have no violations + results2, err := suite.evaluator.EvaluateBaselinesAndPersistResult(deployment2) + suite.NoError(err) + suite.Empty(results2) + + // Verify results are isolated + persistedResult1, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment1.GetId()) + suite.NoError(err) + suite.True(persistedResult1.GetBaselineStatuses()[0].GetAnomalousProcessesExecuted()) + + persistedResult2, err := suite.resultsDatastore.GetBaselineResults(suite.ctx, deployment2.GetId()) + suite.NoError(err) + suite.False(persistedResult2.GetBaselineStatuses()[0].GetAnomalousProcessesExecuted()) +} diff --git a/central/processbaseline/evaluator/evaluator_test.go b/central/processbaseline/evaluator/evaluator_test.go index eec29c8f9e9ba..2df415dc8b7a5 100644 --- a/central/processbaseline/evaluator/evaluator_test.go +++ b/central/processbaseline/evaluator/evaluator_test.go @@ -8,9 +8,9 @@ import ( processBaselineMocks "github.com/stackrox/rox/central/processbaseline/datastore/mocks" processBaselineResultMocks "github.com/stackrox/rox/central/processbaselineresults/datastore/mocks" processIndicatorMocks "github.com/stackrox/rox/central/processindicator/datastore/mocks" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/fixtures" - "github.com/stackrox/rox/pkg/protoassert" "github.com/stackrox/rox/pkg/protoconv" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -32,7 +32,7 @@ func TestProcessBaselineEvaluator(t *testing.T) { name string baseline *storage.ProcessBaseline baselineErr error - indicators []*storage.ProcessIndicator + indicators []*views.ProcessIndicatorRiskView indicatorErr error // Specify expectedIndicators as indices into the indicators slice above. expectedIndicatorIndices []int @@ -64,12 +64,10 @@ func TestProcessBaselineEvaluator(t *testing.T) { StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), Elements: fixtures.MakeBaselineElements("/bin/apt-get", "/unrelated"), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, }, @@ -83,12 +81,10 @@ func TestProcessBaselineEvaluator(t *testing.T) { baseline: &storage.ProcessBaseline{ StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "apt-get", - Args: "install nmap", - }, + ExecFilePath: "apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, }, @@ -103,19 +99,15 @@ func TestProcessBaselineEvaluator(t *testing.T) { baseline: &storage.ProcessBaseline{ StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "apt-get", - Args: "install nmap", - }, + ExecFilePath: "apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[1].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "curl", - Args: "badssl.com", - }, + ExecFilePath: "curl", + SignalArgs: "badssl.com", ContainerName: deployment.GetContainers()[1].GetName(), }, }, @@ -131,26 +123,20 @@ func TestProcessBaselineEvaluator(t *testing.T) { StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), Elements: fixtures.MakeBaselineElements("/bin/apt-get"), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/not-apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/not-apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/curl", - Args: "badssl.com", - }, + ExecFilePath: "/bin/curl", + SignalArgs: "badssl.com", ContainerName: deployment.GetContainers()[1].GetName(), }, }, @@ -166,26 +152,20 @@ func TestProcessBaselineEvaluator(t *testing.T) { StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), Elements: fixtures.MakeBaselineElements("/bin/apt-get"), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/not-apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/not-apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/curl", - Args: "badssl.com", - }, + ExecFilePath: "/bin/curl", + SignalArgs: "badssl.com", ContainerName: deployment.GetContainers()[1].GetName(), }, }, @@ -214,26 +194,20 @@ func TestProcessBaselineEvaluator(t *testing.T) { StackRoxLockedTimestamp: protoconv.MustConvertTimeToTimestamp(time.Now().Add(-1 * time.Hour)), Elements: fixtures.MakeBaselineElements("/bin/apt-get"), }, - indicators: []*storage.ProcessIndicator{ + indicators: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/not-apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/not-apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/apt-get", - Args: "install nmap", - }, + ExecFilePath: "/bin/apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - ExecFilePath: "/bin/curl", - Args: "badssl.com", - }, + ExecFilePath: "/bin/curl", + SignalArgs: "badssl.com", ContainerName: deployment.GetContainers()[1].GetName(), }, }, @@ -269,7 +243,7 @@ func TestProcessBaselineEvaluator(t *testing.T) { mockBaselines.EXPECT().GetProcessBaseline(gomock.Any(), gomock.Any()).MaxTimes(len(deployment.GetContainers())).Return(c.baseline, c.baseline != nil, c.baselineErr) if c.indicators != nil { - mockIndicators.EXPECT().SearchRawProcessIndicators(gomock.Any(), gomock.Any()).Return(c.indicators, c.indicatorErr) + mockIndicators.EXPECT().GetProcessIndicatorsRiskView(gomock.Any(), gomock.Any()).Return(c.indicators, c.indicatorErr) } expectedBaselineResult := &storage.ProcessBaselineResults{ @@ -292,11 +266,11 @@ func TestProcessBaselineEvaluator(t *testing.T) { results, err := New(mockResults, mockBaselines, mockIndicators).EvaluateBaselinesAndPersistResult(deployment) require.NoError(t, err) - expectedIndicators := make([]*storage.ProcessIndicator, 0, len(c.expectedIndicatorIndices)) + expectedIndicators := make([]*views.ProcessIndicatorRiskView, 0, len(c.expectedIndicatorIndices)) for _, idx := range c.expectedIndicatorIndices { expectedIndicators = append(expectedIndicators, c.indicators[idx]) } - protoassert.ElementsMatch(t, results, expectedIndicators) + require.ElementsMatch(t, expectedIndicators, results) }) } } diff --git a/central/processbaseline/evaluator/mocks/evaluator.go b/central/processbaseline/evaluator/mocks/evaluator.go index 7a533afbd281a..698cf558c0108 100644 --- a/central/processbaseline/evaluator/mocks/evaluator.go +++ b/central/processbaseline/evaluator/mocks/evaluator.go @@ -12,6 +12,7 @@ package mocks import ( reflect "reflect" + views "github.com/stackrox/rox/central/processindicator/views" storage "github.com/stackrox/rox/generated/storage" gomock "go.uber.org/mock/gomock" ) @@ -41,10 +42,10 @@ func (m *MockEvaluator) EXPECT() *MockEvaluatorMockRecorder { } // EvaluateBaselinesAndPersistResult mocks base method. -func (m *MockEvaluator) EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) ([]*storage.ProcessIndicator, error) { +func (m *MockEvaluator) EvaluateBaselinesAndPersistResult(deployment *storage.Deployment) ([]*views.ProcessIndicatorRiskView, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EvaluateBaselinesAndPersistResult", deployment) - ret0, _ := ret[0].([]*storage.ProcessIndicator) + ret0, _ := ret[0].([]*views.ProcessIndicatorRiskView) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/central/processindicator/datastore/bench_test.go b/central/processindicator/datastore/bench_test.go index ea94765e9ac17..331327d6a36ed 100644 --- a/central/processindicator/datastore/bench_test.go +++ b/central/processindicator/datastore/bench_test.go @@ -28,7 +28,7 @@ func BenchmarkAddIndicator(b *testing.B) { store := postgresStore.New(db) plopStore := plopStore.New(db) - datastore := New(store, plopStore, nil) + datastore := New(db, store, plopStore, nil) ctx := sac.WithAllAccess(context.Background()) b.ResetTimer() @@ -60,7 +60,7 @@ func BenchmarkSearchIndicator(b *testing.B) { store := postgresStore.New(db) plopStore := plopStore.New(db) - datastore := New(store, plopStore, nil) + datastore := New(db, store, plopStore, nil) ctx := sac.WithAllAccess(context.Background()) // Add the data first. diff --git a/central/processindicator/datastore/datastore.go b/central/processindicator/datastore/datastore.go index 31e24cf305257..771103b2ddbb7 100644 --- a/central/processindicator/datastore/datastore.go +++ b/central/processindicator/datastore/datastore.go @@ -8,6 +8,7 @@ import ( "github.com/stackrox/rox/central/processindicator/pruner" "github.com/stackrox/rox/central/processindicator/store" pgStore "github.com/stackrox/rox/central/processindicator/store/postgres" + "github.com/stackrox/rox/central/processindicator/views" plopStore "github.com/stackrox/rox/central/processlisteningonport/store/postgres" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" @@ -36,6 +37,9 @@ type DataStore interface { WalkAll(ctx context.Context, fn func(pi *storage.ProcessIndicator) error) error + // GetProcessIndicatorsRiskView retrieves minimal fields from process indicator for risk evaluation + GetProcessIndicatorsRiskView(ctx context.Context, q *v1.Query) ([]*views.ProcessIndicatorRiskView, error) + // Stop signals all goroutines associated with this object to terminate. Stop() // Wait waits until all goroutines associated with this object have terminated, or cancelWhen gets triggered. @@ -44,8 +48,9 @@ type DataStore interface { } // New returns a new instance of DataStore using the input store, and searcher. -func New(store store.Store, plopStorage plopStore.Store, prunerFactory pruner.Factory) DataStore { +func New(db postgres.DB, store store.Store, plopStorage plopStore.Store, prunerFactory pruner.Factory) DataStore { d := &datastoreImpl{ + db: db, storage: store, plopStorage: plopStorage, prunerFactory: prunerFactory, @@ -64,5 +69,5 @@ func New(store store.Store, plopStorage plopStore.Store, prunerFactory pruner.Fa func GetTestPostgresDataStore(_ testing.TB, pool postgres.DB) DataStore { dbstore := pgStore.New(pool) plopDBstore := plopStore.New(pool) - return New(dbstore, plopDBstore, nil) + return New(pool, dbstore, plopDBstore, nil) } diff --git a/central/processindicator/datastore/datastore_impl.go b/central/processindicator/datastore/datastore_impl.go index e8e11fbf39d1b..ce84d6606a8af 100644 --- a/central/processindicator/datastore/datastore_impl.go +++ b/central/processindicator/datastore/datastore_impl.go @@ -8,15 +8,19 @@ import ( "github.com/stackrox/rox/central/processindicator" "github.com/stackrox/rox/central/processindicator/pruner" "github.com/stackrox/rox/central/processindicator/store" + "github.com/stackrox/rox/central/processindicator/views" plopStore "github.com/stackrox/rox/central/processlisteningonport/store/postgres" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/env" ops "github.com/stackrox/rox/pkg/metrics" + "github.com/stackrox/rox/pkg/postgres" + pkgSchema "github.com/stackrox/rox/pkg/postgres/schema" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" pkgSearch "github.com/stackrox/rox/pkg/search" + pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) const ( @@ -30,6 +34,8 @@ var ( ) type datastoreImpl struct { + db postgres.DB + storage store.Store // ProcessListeningOnPort storage is needed for correct pruning. It // logically belongs to the datastore implementation of PLOP, but this way @@ -219,6 +225,36 @@ func (ds *datastoreImpl) RemoveProcessIndicatorsByPod(ctx context.Context, id st return ds.storage.DeleteByQuery(ctx, q) } +// GetProcessIndicatorsRiskView retrieves minimal fields from process indicator for risk evaluation +func (ds *datastoreImpl) GetProcessIndicatorsRiskView(ctx context.Context, q *v1.Query) ([]*views.ProcessIndicatorRiskView, error) { + if ok, err := deploymentExtensionSAC.WriteAllowed(ctx); err != nil { + return nil, err + } else if !ok { + return nil, sac.ErrResourceAccessDenied + } + + cloned := q.CloneVT() + // Add the select fields of the view to the query. + cloned.Selects = []*v1.QuerySelect{ + pkgSearch.NewQuerySelect(pkgSearch.ProcessID).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ContainerName).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ProcessExecPath).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ProcessContainerStartTime).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ProcessCreationTime).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ProcessName).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ProcessArguments).Proto(), + } + + // We do not need the entire process indicator to process risk. That object is large. Use a view instead + var results []*views.ProcessIndicatorRiskView + results, err := pgSearch.RunSelectRequestForSchema[views.ProcessIndicatorRiskView](ctx, ds.db, pkgSchema.ProcessIndicatorsSchema, cloned) + if err != nil { + log.Errorf("unable to retrieve indicators for risk processing: %v", err) + } + + return results, err +} + func (ds *datastoreImpl) prunePeriodically(ctx context.Context) { defer ds.stopper.Flow().ReportStopped() diff --git a/central/processindicator/datastore/datastore_impl_test.go b/central/processindicator/datastore/datastore_impl_test.go index dd74690b9cab4..a1703c4b6b467 100644 --- a/central/processindicator/datastore/datastore_impl_test.go +++ b/central/processindicator/datastore/datastore_impl_test.go @@ -96,12 +96,12 @@ func (suite *IndicatorDataStoreTestSuite) initPodToIndicatorsMap() { } func (suite *IndicatorDataStoreTestSuite) setupDataStoreNoPruning() { - suite.datastore = New(suite.storage, suite.plopStorage, nil) + suite.datastore = New(suite.postgres.DB, suite.storage, suite.plopStorage, nil) } func (suite *IndicatorDataStoreTestSuite) setupDataStoreWithMocks() *storeMocks.MockStore { mockStorage := storeMocks.NewMockStore(suite.mockCtrl) - suite.datastore = New(mockStorage, nil, nil) + suite.datastore = New(suite.postgres.DB, mockStorage, nil, nil) return mockStorage } @@ -312,7 +312,7 @@ func (suite *IndicatorDataStoreTestSuite) TestPruning() { return true }) } - suite.datastore = New(suite.storage, suite.plopStorage, mockPrunerFactory) + suite.datastore = New(suite.postgres.DB, suite.storage, suite.plopStorage, mockPrunerFactory) suite.NoError(suite.datastore.AddProcessIndicators(suite.hasWriteCtx, indicators...)) suite.verifyIndicatorsAre(indicators...) diff --git a/central/processindicator/datastore/mocks/datastore.go b/central/processindicator/datastore/mocks/datastore.go index b0526a7557122..806f3d7c36f7d 100644 --- a/central/processindicator/datastore/mocks/datastore.go +++ b/central/processindicator/datastore/mocks/datastore.go @@ -13,6 +13,7 @@ import ( context "context" reflect "reflect" + views "github.com/stackrox/rox/central/processindicator/views" v1 "github.com/stackrox/rox/generated/api/v1" storage "github.com/stackrox/rox/generated/storage" concurrency "github.com/stackrox/rox/pkg/concurrency" @@ -110,6 +111,21 @@ func (mr *MockDataStoreMockRecorder) GetProcessIndicators(ctx, ids any) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcessIndicators", reflect.TypeOf((*MockDataStore)(nil).GetProcessIndicators), ctx, ids) } +// GetProcessIndicatorsRiskView mocks base method. +func (m *MockDataStore) GetProcessIndicatorsRiskView(ctx context.Context, q *v1.Query) ([]*views.ProcessIndicatorRiskView, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProcessIndicatorsRiskView", ctx, q) + ret0, _ := ret[0].([]*views.ProcessIndicatorRiskView) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProcessIndicatorsRiskView indicates an expected call of GetProcessIndicatorsRiskView. +func (mr *MockDataStoreMockRecorder) GetProcessIndicatorsRiskView(ctx, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcessIndicatorsRiskView", reflect.TypeOf((*MockDataStore)(nil).GetProcessIndicatorsRiskView), ctx, q) +} + // PruneProcessIndicators mocks base method. func (m *MockDataStore) PruneProcessIndicators(ctx context.Context, ids []string) (int, error) { m.ctrl.T.Helper() diff --git a/central/processindicator/datastore/singleton.go b/central/processindicator/datastore/singleton.go index b3549b9a305f2..0203bb22a6b75 100644 --- a/central/processindicator/datastore/singleton.go +++ b/central/processindicator/datastore/singleton.go @@ -25,12 +25,13 @@ var ( ) func initialize() { - storage := pgStore.New(globaldb.GetPostgres()) - plopStorage := plopStore.New(globaldb.GetPostgres()) + db := globaldb.GetPostgres() + storage := pgStore.New(db) + plopStorage := plopStore.New(db) p := pruner.NewFactory(minArgsPerProcess, pruneInterval) - ad = New(storage, plopStorage, p) + ad = New(db, storage, plopStorage, p) } // Singleton provides the interface for non-service external interaction. diff --git a/central/processindicator/views/risk_evaluation_view.go b/central/processindicator/views/risk_evaluation_view.go new file mode 100644 index 0000000000000..3afd2e3881fdb --- /dev/null +++ b/central/processindicator/views/risk_evaluation_view.go @@ -0,0 +1,16 @@ +package views + +import ( + "time" +) + +type ProcessIndicatorRiskView struct { + ID string `db:"process_id"` + ContainerName string `db:"container_name"` + ExecFilePath string `db:"process_path"` + ContainerStartTime *time.Time `db:"process_container_start_time"` + // These are only needed for violations + SignalTime *time.Time `db:"process_creation_time"` + SignalName string `db:"process_name"` + SignalArgs string `db:"process_arguments"` +} diff --git a/central/processlisteningonport/datastore/datastore_impl_test.go b/central/processlisteningonport/datastore/datastore_impl_test.go index ce3f7fa7a354a..460b7b5c17050 100644 --- a/central/processlisteningonport/datastore/datastore_impl_test.go +++ b/central/processlisteningonport/datastore/datastore_impl_test.go @@ -70,7 +70,7 @@ func (suite *PLOPDataStoreTestSuite) SetupTest() { indicatorStorage := processIndicatorStorage.New(suite.postgres.DB) - suite.indicatorDataStore = processIndicatorDataStore.New( + suite.indicatorDataStore = processIndicatorDataStore.New(suite.postgres.DB, indicatorStorage, suite.store, nil) suite.datastore = New(suite.store, suite.indicatorDataStore, suite.postgres) } diff --git a/central/processlisteningonport/service/service_impl_test.go b/central/processlisteningonport/service/service_impl_test.go index 1f48c7fe5d678..9bc98edd05795 100644 --- a/central/processlisteningonport/service/service_impl_test.go +++ b/central/processlisteningonport/service/service_impl_test.go @@ -61,7 +61,7 @@ func (suite *PLOPServiceTestSuite) SetupTest() { indicatorStorage := processIndicatorStorage.New(suite.postgres.DB) - suite.indicatorDataStore = processIndicatorDataStore.New( + suite.indicatorDataStore = processIndicatorDataStore.New(suite.postgres.DB, indicatorStorage, suite.store, nil) suite.datastore = plopDataStore.New(suite.store, suite.indicatorDataStore, suite.postgres) suite.service = &serviceImpl{ diff --git a/central/risk/multipliers/deployment/process_baseline_violations.go b/central/risk/multipliers/deployment/process_baseline_violations.go index ab7775d695598..2ad12013de856 100644 --- a/central/risk/multipliers/deployment/process_baseline_violations.go +++ b/central/risk/multipliers/deployment/process_baseline_violations.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/stackrox/rox/central/processbaseline/evaluator" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/central/risk/multipliers" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/env" @@ -55,13 +56,13 @@ func NewProcessBaselines(evaluator evaluator.Evaluator) Multiplier { } } -func formatProcess(process *storage.ProcessIndicator) string { +func formatProcess(process *views.ProcessIndicatorRiskView) string { sb := strings.Builder{} - stringutils.WriteStringf(&sb, "Detected execution of suspicious process %q", process.GetSignal().GetName()) - if len(process.GetSignal().GetArgs()) > 0 { - stringutils.WriteStringf(&sb, " with args %q", process.GetSignal().GetArgs()) + stringutils.WriteStringf(&sb, "Detected execution of suspicious process %q", process.SignalName) + if len(process.SignalArgs) > 0 { + stringutils.WriteStringf(&sb, " with args %q", process.SignalArgs) } - stringutils.WriteStrings(&sb, " in container ", process.GetContainerName()) + stringutils.WriteStrings(&sb, " in container ", process.ContainerName) return sb.String() } diff --git a/central/risk/multipliers/deployment/process_baseline_violations_test.go b/central/risk/multipliers/deployment/process_baseline_violations_test.go index fe13b59b11525..1d47bdec6db7d 100644 --- a/central/risk/multipliers/deployment/process_baseline_violations_test.go +++ b/central/risk/multipliers/deployment/process_baseline_violations_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stackrox/rox/central/processbaseline/evaluator/mocks" + "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/central/risk/multipliers" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/protoassert" @@ -17,7 +18,7 @@ func TestProcessBaselines(t *testing.T) { deployment := multipliers.GetMockDeployment() cases := []struct { name string - violatingProcesses []*storage.ProcessIndicator + violatingProcesses []*views.ProcessIndicatorRiskView evaluatorErr error expected *storage.Risk_Result }{ @@ -26,21 +27,19 @@ func TestProcessBaselines(t *testing.T) { }, { name: "Evaluator error", - violatingProcesses: []*storage.ProcessIndicator{ + violatingProcesses: []*views.ProcessIndicatorRiskView{ { - Id: "SHOULD BE IGNORED", + ID: "SHOULD BE IGNORED", }, }, evaluatorErr: errors.New("here's an error"), }, { name: "One violating process", - violatingProcesses: []*storage.ProcessIndicator{ + violatingProcesses: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - Name: "apt-get", - Args: "install nmap", - }, + SignalName: "apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, }, @@ -54,19 +53,15 @@ func TestProcessBaselines(t *testing.T) { }, { name: "Two violating processes", - violatingProcesses: []*storage.ProcessIndicator{ + violatingProcesses: []*views.ProcessIndicatorRiskView{ { - Signal: &storage.ProcessSignal{ - Name: "apt-get", - Args: "install nmap", - }, + SignalName: "apt-get", + SignalArgs: "install nmap", ContainerName: deployment.GetContainers()[0].GetName(), }, { - Signal: &storage.ProcessSignal{ - Name: "curl", - Args: "badssl.com", - }, + SignalName: "curl", + SignalArgs: "badssl.com", ContainerName: deployment.GetContainers()[0].GetName(), }, }, diff --git a/pkg/processbaseline/processbaseline.go b/pkg/processbaseline/processbaseline.go index 51fdb9715dcdc..38d5e427350ad 100644 --- a/pkg/processbaseline/processbaseline.go +++ b/pkg/processbaseline/processbaseline.go @@ -1,6 +1,8 @@ package processbaseline -import "github.com/stackrox/rox/generated/storage" +import ( + "github.com/stackrox/rox/generated/storage" +) // BaselineItemFromProcess returns what we baseline for a given process. // It exists to make sure that we're using the same thing in every place (name vs execfilepath).