From 6e6dd269186c33a0933834bd4897bfc12d42eb2e Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 8 Feb 2022 15:22:43 +0100 Subject: [PATCH 01/46] new SensorComponent to keep local scanner secrets up to date --- pkg/centralsensor/caps_list.go | 3 + .../localscanner/certificate_requester.go | 10 - sensor/kubernetes/localscanner/tls_issuer.go | 220 ++++++++++++++++++ sensor/kubernetes/sensor/sensor.go | 5 + 4 files changed, 228 insertions(+), 10 deletions(-) create mode 100644 sensor/kubernetes/localscanner/tls_issuer.go diff --git a/pkg/centralsensor/caps_list.go b/pkg/centralsensor/caps_list.go index e31cd51e8a556..e0b85406eee2b 100644 --- a/pkg/centralsensor/caps_list.go +++ b/pkg/centralsensor/caps_list.go @@ -24,4 +24,7 @@ const ( // AuditLogEventsCap identifies the capability to handle audit log event detection. AuditLogEventsCap SensorCapability = "AuditLogEvents" + + // LocalScannerCredentialsRefresh identifies the capability to maintain the Local scanner TLS credentials refreshed. + LocalScannerCredentialsRefresh SensorCapability = "LocalScannerCredentialsRefresh" ) diff --git a/sensor/kubernetes/localscanner/certificate_requester.go b/sensor/kubernetes/localscanner/certificate_requester.go index 4de43ad69a69e..001d929c902bf 100644 --- a/sensor/kubernetes/localscanner/certificate_requester.go +++ b/sensor/kubernetes/localscanner/certificate_requester.go @@ -6,7 +6,6 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/pkg/concurrency" - "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/pkg/uuid" ) @@ -15,17 +14,8 @@ var ( // ErrCertificateRequesterStopped is returned by RequestCertificates when the certificate // requester is not initialized. ErrCertificateRequesterStopped = errors.New("stopped") - log = logging.LoggerForModule() - _ CertificateRequester = (*certificateRequesterImpl)(nil) ) -// CertificateRequester requests a new set of local scanner certificates from central. -type CertificateRequester interface { - Start() - Stop() - RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) -} - // NewCertificateRequester creates a new certificate requester that communicates through // the specified channels and initializes a new request ID for reach request. // To use it call Start, and then make requests with RequestCertificates, concurrent requests are supported. diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go new file mode 100644 index 0000000000000..2af73cca98f4b --- /dev/null +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -0,0 +1,220 @@ +package localscanner + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/stackrox/rox/generated/internalapi/central" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/centralsensor" + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/sync" + "github.com/stackrox/rox/sensor/common" + appsApiv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +var ( + log = logging.LoggerForModule() + + scannerSpec = ServiceCertSecretSpec{ + secretName: "scanner-slim-tls", + caCertFileName: "ca.pem", // FIXME review this and for db + serviceCertFileName: "cert.pem", + serviceKeyFileName: "key.pem", + } + scannerDBSpec = ServiceCertSecretSpec{ + secretName: "scanner-slim-db-tls", + caCertFileName: "ca.pem", + serviceCertFileName: "cert.pem", + serviceKeyFileName: "key.pem", + } + + startTimeout = 5 * time.Minute + processMessageTimeout = time.Second + certRefreshTimeout = 5 * time.Minute + certRefreshBackoff = wait.Backoff{ + Duration: 5 * time.Second, + Factor: 3.0, + Jitter: 0.1, + Steps: 5, + Cap: 10 * time.Minute, + } + _ common.SensorComponent = (*localScannerTLSIssuerImpl)(nil) +) + +// NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates +// up to date, using the specified retry parameters. +func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string) common.SensorComponent { + msgToCentralC := make(chan *central.MsgFromSensor) + msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) + return &localScannerTLSIssuerImpl{ + secretsClient: k8sClient.CoreV1().Secrets(sensorNamespace), + deploymentsClient: k8sClient.AppsV1().Deployments(sensorNamespace), + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), + } +} + +type localScannerTLSIssuerImpl struct { + secretsClient corev1.SecretInterface + deploymentsClient appsv1.DeploymentInterface + msgToCentralC chan *central.MsgFromSensor + msgFromCentralC chan *central.IssueLocalScannerCertsResponse + requester CertificateRequester + refresher CertificateRefresher +} + +// CertificateRequester requests a new set of local scanner certificates from central. +type CertificateRequester interface { + Start() + Stop() + RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) +} + +type CertificateRefresher interface { + Start() error + Stop() +} + +// ServiceCertificatesRepo TODO replace by ROX-9148 +type ServiceCertificatesRepo interface {} +// requestCertificatesFunc TODO replace by ROX-9148 +type requestCertificatesFunc func(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) +// newCertRefresher TODO replace by ROX-9148 +func newCertRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, + backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher { + return nil +} +// ServiceCertSecretSpec TODO replace by ROX-9128 +type ServiceCertSecretSpec struct { + secretName string + caCertFileName string + serviceCertFileName string + serviceKeyFileName string +} +// NewServiceCertificatesRepo TODO replace by ROX-9128 +func NewServiceCertificatesRepo(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, + sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), + secretsClient corev1.SecretInterface) (ServiceCertificatesRepo, error) { + return nil, nil +} + +func (i *localScannerTLSIssuerImpl) Start() error { + log.Info("starting local scanner TLS issuer.") + ctx, cancel := context.WithTimeout(context.Background(), startTimeout) + defer cancel() + + if i.refresher != nil { + return errors.New("already started") + } + + sensorDeployment, getSensorDeploymentErr := i.getSensorDeployment(ctx) + if getSensorDeploymentErr != nil { + return errors.Wrap(getSensorDeploymentErr, "fetching sensor deployment") + } + + i.requester.Start() + + certsRepo, createCertsRepoErr := NewServiceCertificatesRepo(ctx, scannerSpec, scannerDBSpec, sensorDeployment, + i.initialCertsSupplier(), i.secretsClient) + if createCertsRepoErr != nil { + return errors.Wrap(createCertsRepoErr, "creating service certificates repository") + } + i.refresher = newCertRefresher(i.requester.RequestCertificates, certRefreshTimeout, certRefreshBackoff, certsRepo) + if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { + return errors.Wrap(refreshStartErr, "starting certificate refresher") + } + + log.Info("local scanner TLS issuer started.") + + return nil +} + +func (i *localScannerTLSIssuerImpl) Stop(err error) { + if i.refresher != nil { + i.refresher.Stop() + i.refresher = nil + } + + i.requester.Stop() + log.Info("local scanner TLS issuer stopped.") +} + +func (i *localScannerTLSIssuerImpl) Capabilities() []centralsensor.SensorCapability { + return []centralsensor.SensorCapability{centralsensor.LocalScannerCredentialsRefresh} +} + +// ResponsesC is called "responses" because for other SensorComponent it is central that +// initiates the interaction. However, here it is sensor which sends a request to central. +func (i *localScannerTLSIssuerImpl) ResponsesC() <-chan *central.MsgFromSensor { + return i.msgToCentralC +} + +// ProcessMessage is how the central receiver delivers messages from central to SensorComponents. +// This method must not block as it would prevent centralReceiverImpl from sending messages +// to other SensorComponents. +func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) error { + switch m := msg.GetMsg().(type) { + case *central.MsgToSensor_IssueLocalScannerCertsResponse: + response := m.IssueLocalScannerCertsResponse + go func() { + ctx, cancel := context.WithTimeout(context.Background(), processMessageTimeout) + defer cancel() + select { + case <-ctx.Done(): + // refresher will retry. + log.Errorf("timeout forwarding response %s from central: %s", response, ctx.Err()) + case i.msgFromCentralC <- response: + } + }() + return nil + default: + // silently ignore other messages broadcasted by centralReceiverImpl, as centralReceiverImpl logs + // all returned errors with error level. + return nil + } +} + +// initialCertsSupplier request the certificates at most once, and returns the memoized response to that single request. +func (i *localScannerTLSIssuerImpl) initialCertsSupplier() func(context.Context) (*storage.TypedServiceCertificateSet, error) { + var ( + requestOnce sync.Once + certificates *storage.TypedServiceCertificateSet + requestErr error + ) + requestCertificates := i.requester.RequestCertificates + return func(ctx context.Context) (*storage.TypedServiceCertificateSet, error) { + requestOnce.Do(func() { + response, err := requestCertificates(ctx) + if err != nil { + requestErr = err + return + } + if response.GetError() != nil { + requestErr = errors.Errorf("central refused to issue certificates: %s", response.GetError().GetMessage()) + return + } + certificates = response.GetCertificates() + }) + return certificates, requestErr + } +} + +func (i *localScannerTLSIssuerImpl) getSensorDeployment(ctx context.Context) (*appsApiv1.Deployment, error) { + /* + FIXME + - Either using labels + - "app.kubernetes.io/name": "stackrox" + - "app.kubernetes.io/component": "sensor", + - "app.kubernetes.io/instance": "stackrox-secured-cluster-services", + - Or adding deployment UUID as an env var for sensor pod + - Or ?? + */ + return nil, nil +} diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index c54609364aee3..a7236ae730b6d 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -45,6 +45,7 @@ import ( "github.com/stackrox/rox/sensor/kubernetes/fake" "github.com/stackrox/rox/sensor/kubernetes/listener" "github.com/stackrox/rox/sensor/kubernetes/listener/resources" + "github.com/stackrox/rox/sensor/kubernetes/localscanner" "github.com/stackrox/rox/sensor/kubernetes/networkpolicies" "github.com/stackrox/rox/sensor/kubernetes/orchestrator" "github.com/stackrox/rox/sensor/kubernetes/telemetry" @@ -162,6 +163,10 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager return nil, errors.Wrap(err, "creating central client") } + if !features.LocalImageScanning.Enabled() { + components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace)) + } + s := sensor.NewSensor( configHandler, policyDetector, From 84816d9c6966b0d155e733707c9b2459add88918 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 8 Feb 2022 15:30:14 +0100 Subject: [PATCH 02/46] fix style --- .../localscanner/certificate_requester.go | 2 +- sensor/kubernetes/localscanner/tls_issuer.go | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/sensor/kubernetes/localscanner/certificate_requester.go b/sensor/kubernetes/localscanner/certificate_requester.go index 001d929c902bf..7e1bb943c7e02 100644 --- a/sensor/kubernetes/localscanner/certificate_requester.go +++ b/sensor/kubernetes/localscanner/certificate_requester.go @@ -13,7 +13,7 @@ import ( var ( // ErrCertificateRequesterStopped is returned by RequestCertificates when the certificate // requester is not initialized. - ErrCertificateRequesterStopped = errors.New("stopped") + ErrCertificateRequesterStopped = errors.New("stopped") ) // NewCertificateRequester creates a new certificate requester that communicates through diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 2af73cca98f4b..cbc7f689ceeed 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -22,13 +22,13 @@ var ( log = logging.LoggerForModule() scannerSpec = ServiceCertSecretSpec{ - secretName: "scanner-slim-tls", + secretName: "scanner-slim-tls", caCertFileName: "ca.pem", // FIXME review this and for db serviceCertFileName: "cert.pem", serviceKeyFileName: "key.pem", } scannerDBSpec = ServiceCertSecretSpec{ - secretName: "scanner-slim-db-tls", + secretName: "scanner-slim-db-tls", caCertFileName: "ca.pem", serviceCertFileName: "cert.pem", serviceKeyFileName: "key.pem", @@ -77,20 +77,25 @@ type CertificateRequester interface { RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) } +// CertificateRefresher periodically checks the expiration of a set of certificates, and if needed +// requests new certificates to central and updates those certificates. type CertificateRefresher interface { Start() error Stop() } // ServiceCertificatesRepo TODO replace by ROX-9148 -type ServiceCertificatesRepo interface {} +type ServiceCertificatesRepo interface{} + // requestCertificatesFunc TODO replace by ROX-9148 type requestCertificatesFunc func(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) + // newCertRefresher TODO replace by ROX-9148 func newCertRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher { return nil } + // ServiceCertSecretSpec TODO replace by ROX-9128 type ServiceCertSecretSpec struct { secretName string @@ -98,6 +103,7 @@ type ServiceCertSecretSpec struct { serviceCertFileName string serviceKeyFileName string } + // NewServiceCertificatesRepo TODO replace by ROX-9128 func NewServiceCertificatesRepo(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), @@ -184,9 +190,9 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err // initialCertsSupplier request the certificates at most once, and returns the memoized response to that single request. func (i *localScannerTLSIssuerImpl) initialCertsSupplier() func(context.Context) (*storage.TypedServiceCertificateSet, error) { var ( - requestOnce sync.Once + requestOnce sync.Once certificates *storage.TypedServiceCertificateSet - requestErr error + requestErr error ) requestCertificates := i.requester.RequestCertificates return func(ctx context.Context) (*storage.TypedServiceCertificateSet, error) { From 16b5d08566db7d762cd8f5067c2d2e93ce4e604d Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 8 Feb 2022 17:47:16 +0100 Subject: [PATCH 03/46] fetch sensor deployment starting with sensor pod owner name --- .../templates/sensor.yaml.htpl | 4 ++ sensor/kubernetes/localscanner/tls_issuer.go | 69 ++++++++++++------- sensor/kubernetes/sensor/sensor.go | 3 +- 3 files changed, 49 insertions(+), 27 deletions(-) diff --git a/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl b/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl index 5cc349847c53e..4cd95ef837a1f 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl +++ b/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl @@ -108,6 +108,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_OWNER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.ownerReferences[0].name - name: ROX_CENTRAL_ENDPOINT value: {{ ._rox.centralEndpoint }} - name: ROX_ADVERTISED_ENDPOINT diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index cbc7f689ceeed..322e5b45d2c47 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -12,9 +12,9 @@ import ( "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/sensor/common" appsApiv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) @@ -49,25 +49,28 @@ var ( // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates // up to date, using the specified retry parameters. -func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string) common.SensorComponent { +func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string, + podOwnerName string) common.SensorComponent { msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ - secretsClient: k8sClient.CoreV1().Secrets(sensorNamespace), - deploymentsClient: k8sClient.AppsV1().Deployments(sensorNamespace), - msgToCentralC: msgToCentralC, - msgFromCentralC: msgFromCentralC, - requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), + sensorNamespace: sensorNamespace, + podOwnerName: podOwnerName, + k8sClient: k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } type localScannerTLSIssuerImpl struct { - secretsClient corev1.SecretInterface - deploymentsClient appsv1.DeploymentInterface - msgToCentralC chan *central.MsgFromSensor - msgFromCentralC chan *central.IssueLocalScannerCertsResponse - requester CertificateRequester - refresher CertificateRefresher + sensorNamespace string + podOwnerName string + k8sClient kubernetes.Interface + msgToCentralC chan *central.MsgFromSensor + msgFromCentralC chan *central.IssueLocalScannerCertsResponse + requester CertificateRequester + refresher CertificateRefresher } // CertificateRequester requests a new set of local scanner certificates from central. @@ -120,7 +123,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { return errors.New("already started") } - sensorDeployment, getSensorDeploymentErr := i.getSensorDeployment(ctx) + sensorDeployment, getSensorDeploymentErr := i.fetchSensorDeployment(ctx) if getSensorDeploymentErr != nil { return errors.Wrap(getSensorDeploymentErr, "fetching sensor deployment") } @@ -128,7 +131,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { i.requester.Start() certsRepo, createCertsRepoErr := NewServiceCertificatesRepo(ctx, scannerSpec, scannerDBSpec, sensorDeployment, - i.initialCertsSupplier(), i.secretsClient) + i.initialCertsSupplier(), i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) if createCertsRepoErr != nil { return errors.Wrap(createCertsRepoErr, "creating service certificates repository") } @@ -212,15 +215,29 @@ func (i *localScannerTLSIssuerImpl) initialCertsSupplier() func(context.Context) } } -func (i *localScannerTLSIssuerImpl) getSensorDeployment(ctx context.Context) (*appsApiv1.Deployment, error) { - /* - FIXME - - Either using labels - - "app.kubernetes.io/name": "stackrox" - - "app.kubernetes.io/component": "sensor", - - "app.kubernetes.io/instance": "stackrox-secured-cluster-services", - - Or adding deployment UUID as an env var for sensor pod - - Or ?? - */ - return nil, nil +func (i *localScannerTLSIssuerImpl) fetchSensorDeployment(ctx context.Context) (*appsApiv1.Deployment, error) { + if i.podOwnerName == "" { + return nil, errors.New("fetching sensor deployment: empty pod owner name") + } + + replicaSetClient := i.k8sClient.AppsV1().ReplicaSets(i.sensorNamespace) + ownerReplicaSet, getReplicaSetErr := replicaSetClient.Get(ctx, i.podOwnerName, metav1.GetOptions{}) + if getReplicaSetErr != nil { + return nil, errors.Wrap(getReplicaSetErr, "fetching owner replica set") + } + + replicaSetOwners := ownerReplicaSet.GetObjectMeta().GetOwnerReferences() + if len(replicaSetOwners) != 1 { + return nil, errors.Errorf("fetching sensor deployment: replica set %q has unexpected owners %v", + ownerReplicaSet.GetName(), replicaSetOwners) + } + replicaSetOwner := replicaSetOwners[0] + + deploymentClient := i.k8sClient.AppsV1().Deployments(i.sensorNamespace) + sensorDeployment, getSensorDeploymentErr := deploymentClient.Get(ctx, replicaSetOwner.Name, metav1.GetOptions{}) + if getSensorDeploymentErr != nil { + return nil, errors.Wrap(getReplicaSetErr, "fetching sensor deployment") + } + + return sensorDeployment, nil } diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index a7236ae730b6d..f1063d5620600 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -163,8 +163,9 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager return nil, errors.Wrap(err, "creating central client") } + podOwnerName := os.Getenv("POD_OWNER_NAME") if !features.LocalImageScanning.Enabled() { - components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace)) + components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podOwnerName)) } s := sensor.NewSensor( From dfe21215347ff055153e28f4034fee67bdbc89be Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 8 Feb 2022 19:32:33 +0100 Subject: [PATCH 04/46] add some tests --- sensor/kubernetes/localscanner/tls_issuer.go | 19 +- .../localscanner/tls_issuer_test.go | 186 ++++++++++++++++++ 2 files changed, 202 insertions(+), 3 deletions(-) create mode 100644 sensor/kubernetes/localscanner/tls_issuer_test.go diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 322e5b45d2c47..18a7933a15d24 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -59,6 +59,8 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st k8sClient: k8sClient, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, + certificateRefresherSupplier: newCertRefresher, + serviceCertificatesRepoSupplier: NewServiceCertificatesRepo, requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } @@ -69,6 +71,8 @@ type localScannerTLSIssuerImpl struct { k8sClient kubernetes.Interface msgToCentralC chan *central.MsgFromSensor msgFromCentralC chan *central.IssueLocalScannerCertsResponse + certificateRefresherSupplier certificateRefresherSupplier + serviceCertificatesRepoSupplier serviceCertificatesRepoSupplier requester CertificateRequester refresher CertificateRefresher } @@ -87,6 +91,9 @@ type CertificateRefresher interface { Stop() } +type certificateRefresherSupplier func(requestCertificates requestCertificatesFunc, timeout time.Duration, + backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher + // ServiceCertificatesRepo TODO replace by ROX-9148 type ServiceCertificatesRepo interface{} @@ -109,11 +116,17 @@ type ServiceCertSecretSpec struct { // NewServiceCertificatesRepo TODO replace by ROX-9128 func NewServiceCertificatesRepo(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, - sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), + sensorDeployment *appsApiv1.Deployment, + initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient corev1.SecretInterface) (ServiceCertificatesRepo, error) { return nil, nil } +type serviceCertificatesRepoSupplier func(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, + sensorDeployment *appsApiv1.Deployment, + initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), + secretsClient corev1.SecretInterface) (ServiceCertificatesRepo, error) + func (i *localScannerTLSIssuerImpl) Start() error { log.Info("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) @@ -130,12 +143,12 @@ func (i *localScannerTLSIssuerImpl) Start() error { i.requester.Start() - certsRepo, createCertsRepoErr := NewServiceCertificatesRepo(ctx, scannerSpec, scannerDBSpec, sensorDeployment, + certsRepo, createCertsRepoErr := i.serviceCertificatesRepoSupplier(ctx, scannerSpec, scannerDBSpec, sensorDeployment, i.initialCertsSupplier(), i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) if createCertsRepoErr != nil { return errors.Wrap(createCertsRepoErr, "creating service certificates repository") } - i.refresher = newCertRefresher(i.requester.RequestCertificates, certRefreshTimeout, certRefreshBackoff, certsRepo) + i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certRefreshTimeout, certRefreshBackoff, certsRepo) if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { return errors.Wrap(refreshStartErr, "starting certificate refresher") } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go new file mode 100644 index 0000000000000..5700ba2338e93 --- /dev/null +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -0,0 +1,186 @@ +package localscanner + +import ( + "context" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stackrox/rox/generated/internalapi/central" + "github.com/stackrox/rox/generated/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + appsApiv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/fake" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +var ( + sensorNamespace = "stackrox-ns" + sensorReplicasetName = "sensor-replicaset" + sensorDeploymentName = "sensor-deployment" + errForced = errors.New("forced") +) + +type localScannerTLSIssuerFixture struct { + k8sClient *fake.Clientset + requester *certificateRequesterMock + refresher *certificateRefresherMock + supplier *suppliersMock + issuer *localScannerTLSIssuerImpl +} + +func newLocalScannerTLSIssuerFixture(withSensorDeployment, withReplicaSet bool) *localScannerTLSIssuerFixture { + fixture := &localScannerTLSIssuerFixture{} + fixture.requester = &certificateRequesterMock{} + fixture.refresher = &certificateRefresherMock{} + fixture.supplier = &suppliersMock{} + fixture.k8sClient = testK8sClient(withSensorDeployment, withReplicaSet) + msgToCentralC := make(chan *central.MsgFromSensor) + msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) + fixture.issuer = &localScannerTLSIssuerImpl{ + sensorNamespace: sensorNamespace, + podOwnerName: sensorReplicasetName, + k8sClient: fixture.k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + certificateRefresherSupplier: fixture.supplier.supplyCertificateRefresher, + serviceCertificatesRepoSupplier: fixture.supplier.supplyServiceCertificatesRepoSupplier, + requester: fixture.requester, + } + + return fixture +} + +func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { + f.requester.AssertExpectations(t) + f.requester.AssertExpectations(t) + f.supplier.AssertExpectations(t) +} + +func (f *localScannerTLSIssuerFixture) mockForStart(refresherStartReturn error) { + f.requester.On("Start").Once() + f.refresher.On("Start").Once().Return(refresherStartReturn) + repo := struct{}{}// TODO ROX-9128 replace by nil casting to impl pointer + f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, scannerSpec, scannerDBSpec, + mock.Anything, mock.Anything, mock.Anything).Once().Return(repo, nil) + f.supplier.On("supplyCertificateRefresher", mock.Anything, + certRefreshTimeout, certRefreshBackoff, repo).Once().Return(f.refresher) +} + +func TestLocalScannerTLSIssuerStartSuccess(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(true, true) + fixture.mockForStart(nil) + + startErr := fixture.issuer.Start() + + assert.NoError(t, startErr) + fixture.assertExpectations(t) +} + +func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(true, true) + fixture.mockForStart(errForced) + + startErr := fixture.issuer.Start() + + assert.Error(t, startErr) + fixture.assertExpectations(t) +} + +func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(true, true) + fixture.mockForStart(nil) + + startErr := fixture.issuer.Start() + secondStartErr := fixture.issuer.Start() + + assert.NoError(t, startErr) + assert.Error(t, secondStartErr) + fixture.assertExpectations(t) +} + +// TODO fetch sensor deployment failures +// TODO sensor component interface methods + +// if withSensorDeployment is false then withReplicaSet is not included +func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { + objects := make([]runtime.Object, 0) + if withSensorDeployment { + sensorDeployment := &appsApiv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: sensorDeploymentName, + Namespace: sensorNamespace, + }, + } + objects = append(objects, sensorDeployment) + if withReplicaSet { + sensorDeploymentGVK := sensorDeployment.GroupVersionKind() + sensorReplicaSet := &appsApiv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: sensorReplicasetName, + Namespace: sensorNamespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: sensorDeploymentGVK.GroupVersion().String(), + Kind: sensorDeploymentGVK.Kind, + Name: sensorDeployment.GetName(), + UID: sensorDeployment.GetUID(), + }, + }, + }, + } + objects = append(objects, sensorReplicaSet) + } + + } + + k8sClient := fake.NewSimpleClientset(objects...) + + return k8sClient +} + +type certificateRequesterMock struct { + mock.Mock +} + +func (m *certificateRequesterMock) Start() { + m.Called() +} +func (m *certificateRequesterMock) Stop() { + m.Called() +} +func (m *certificateRequesterMock) RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) { + args := m.Called(ctx) + return args.Get(0).(*central.IssueLocalScannerCertsResponse), args.Error(1) +} + +type certificateRefresherMock struct { + mock.Mock +} + +func (m *certificateRefresherMock) Start() error { + args := m.Called() + return args.Error(0) +} + +func (m *certificateRefresherMock) Stop() { + m.Called() +} + +type suppliersMock struct { + mock.Mock +} + +func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher { + args := m.Called(requestCertificates, timeout, backoff, repository) + return args.Get(0).(CertificateRefresher) +} + +func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient v1.SecretInterface) (ServiceCertificatesRepo, error) { + args := m.Called(ctx, scannerSpec, scannerDBSpec, sensorDeployment, initialCertsSupplier, secretsClient) + return args.Get(0).(ServiceCertificatesRepo), args.Error(1) +} \ No newline at end of file From d583faad829d22933df94ec99506f36873ea02c4 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 8 Feb 2022 19:33:54 +0100 Subject: [PATCH 05/46] fix style --- sensor/kubernetes/localscanner/tls_issuer.go | 30 ++++++++-------- .../localscanner/tls_issuer_test.go | 36 +++++++++---------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 18a7933a15d24..e8182794ffbd0 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -54,27 +54,27 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ - sensorNamespace: sensorNamespace, - podOwnerName: podOwnerName, - k8sClient: k8sClient, - msgToCentralC: msgToCentralC, - msgFromCentralC: msgFromCentralC, - certificateRefresherSupplier: newCertRefresher, + sensorNamespace: sensorNamespace, + podOwnerName: podOwnerName, + k8sClient: k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + certificateRefresherSupplier: newCertRefresher, serviceCertificatesRepoSupplier: NewServiceCertificatesRepo, - requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), + requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } type localScannerTLSIssuerImpl struct { - sensorNamespace string - podOwnerName string - k8sClient kubernetes.Interface - msgToCentralC chan *central.MsgFromSensor - msgFromCentralC chan *central.IssueLocalScannerCertsResponse - certificateRefresherSupplier certificateRefresherSupplier + sensorNamespace string + podOwnerName string + k8sClient kubernetes.Interface + msgToCentralC chan *central.MsgFromSensor + msgFromCentralC chan *central.IssueLocalScannerCertsResponse + certificateRefresherSupplier certificateRefresherSupplier serviceCertificatesRepoSupplier serviceCertificatesRepoSupplier - requester CertificateRequester - refresher CertificateRefresher + requester CertificateRequester + refresher CertificateRefresher } // CertificateRequester requests a new set of local scanner certificates from central. diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 5700ba2338e93..d9b8d1ae1b5f2 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -19,18 +19,18 @@ import ( ) var ( - sensorNamespace = "stackrox-ns" + sensorNamespace = "stackrox-ns" sensorReplicasetName = "sensor-replicaset" sensorDeploymentName = "sensor-deployment" - errForced = errors.New("forced") + errForced = errors.New("forced") ) type localScannerTLSIssuerFixture struct { k8sClient *fake.Clientset requester *certificateRequesterMock refresher *certificateRefresherMock - supplier *suppliersMock - issuer *localScannerTLSIssuerImpl + supplier *suppliersMock + issuer *localScannerTLSIssuerImpl } func newLocalScannerTLSIssuerFixture(withSensorDeployment, withReplicaSet bool) *localScannerTLSIssuerFixture { @@ -42,14 +42,14 @@ func newLocalScannerTLSIssuerFixture(withSensorDeployment, withReplicaSet bool) msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) fixture.issuer = &localScannerTLSIssuerImpl{ - sensorNamespace: sensorNamespace, - podOwnerName: sensorReplicasetName, - k8sClient: fixture.k8sClient, - msgToCentralC: msgToCentralC, - msgFromCentralC: msgFromCentralC, - certificateRefresherSupplier: fixture.supplier.supplyCertificateRefresher, + sensorNamespace: sensorNamespace, + podOwnerName: sensorReplicasetName, + k8sClient: fixture.k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + certificateRefresherSupplier: fixture.supplier.supplyCertificateRefresher, serviceCertificatesRepoSupplier: fixture.supplier.supplyServiceCertificatesRepoSupplier, - requester: fixture.requester, + requester: fixture.requester, } return fixture @@ -64,7 +64,7 @@ func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { func (f *localScannerTLSIssuerFixture) mockForStart(refresherStartReturn error) { f.requester.On("Start").Once() f.refresher.On("Start").Once().Return(refresherStartReturn) - repo := struct{}{}// TODO ROX-9128 replace by nil casting to impl pointer + repo := struct{}{} // TODO ROX-9128 replace by nil casting to impl pointer f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, scannerSpec, scannerDBSpec, mock.Anything, mock.Anything, mock.Anything).Once().Return(repo, nil) f.supplier.On("supplyCertificateRefresher", mock.Anything, @@ -112,7 +112,7 @@ func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { if withSensorDeployment { sensorDeployment := &appsApiv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: sensorDeploymentName, + Name: sensorDeploymentName, Namespace: sensorNamespace, }, } @@ -121,14 +121,14 @@ func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { sensorDeploymentGVK := sensorDeployment.GroupVersionKind() sensorReplicaSet := &appsApiv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ - Name: sensorReplicasetName, + Name: sensorReplicasetName, Namespace: sensorNamespace, OwnerReferences: []metav1.OwnerReference{ { APIVersion: sensorDeploymentGVK.GroupVersion().String(), - Kind: sensorDeploymentGVK.Kind, - Name: sensorDeployment.GetName(), - UID: sensorDeployment.GetUID(), + Kind: sensorDeploymentGVK.Kind, + Name: sensorDeployment.GetName(), + UID: sensorDeployment.GetUID(), }, }, }, @@ -183,4 +183,4 @@ func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCe func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient v1.SecretInterface) (ServiceCertificatesRepo, error) { args := m.Called(ctx, scannerSpec, scannerDBSpec, sensorDeployment, initialCertsSupplier, secretsClient) return args.Get(0).(ServiceCertificatesRepo), args.Error(1) -} \ No newline at end of file +} From d3f9d1524e93a6f2520f00048d448e3ede8bd509 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 10 Feb 2022 19:38:47 +0100 Subject: [PATCH 06/46] wip adapting to changes in repo and refresher --- sensor/kubernetes/localscanner/tls_issuer.go | 137 ++++++++++-------- .../localscanner/tls_issuer_test.go | 6 +- 2 files changed, 81 insertions(+), 62 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index e8182794ffbd0..24dd17bb68c6d 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -8,10 +8,11 @@ import ( "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/centralsensor" + "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/sensor/common" - appsApiv1 "k8s.io/api/apps/v1" + k8sErrrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -21,19 +22,6 @@ import ( var ( log = logging.LoggerForModule() - scannerSpec = ServiceCertSecretSpec{ - secretName: "scanner-slim-tls", - caCertFileName: "ca.pem", // FIXME review this and for db - serviceCertFileName: "cert.pem", - serviceKeyFileName: "key.pem", - } - scannerDBSpec = ServiceCertSecretSpec{ - secretName: "scanner-slim-db-tls", - caCertFileName: "ca.pem", - serviceCertFileName: "cert.pem", - serviceKeyFileName: "key.pem", - } - startTimeout = 5 * time.Minute processMessageTimeout = time.Second certRefreshTimeout = 5 * time.Minute @@ -45,6 +33,13 @@ var ( Cap: 10 * time.Minute, } _ common.SensorComponent = (*localScannerTLSIssuerImpl)(nil) + + // ErrUnexpectedSecretsOwner TODO replace by ROX-9128 + ErrUnexpectedSecretsOwner = errors.New("unexpected owner for certificate secrets") + // ErrDifferentCAForDifferentServiceTypes TODO replace by ROX-9128 + ErrDifferentCAForDifferentServiceTypes = errors.New("found different CA PEM in secret Data for different service types") + // ErrMissingSecretData TODO replace by ROX-9128 + ErrMissingSecretData = errors.New("missing secret data") ) // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates @@ -59,8 +54,8 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st k8sClient: k8sClient, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, - certificateRefresherSupplier: newCertRefresher, - serviceCertificatesRepoSupplier: NewServiceCertificatesRepo, + certificateRefresherSupplier: newCertificatesRefresher, + serviceCertificatesRepoSupplier: newServiceCertificatesRepo, requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } @@ -74,7 +69,7 @@ type localScannerTLSIssuerImpl struct { certificateRefresherSupplier certificateRefresherSupplier serviceCertificatesRepoSupplier serviceCertificatesRepoSupplier requester CertificateRequester - refresher CertificateRefresher + refresher concurrency.RetryTicker } // CertificateRequester requests a new set of local scanner certificates from central. @@ -84,49 +79,37 @@ type CertificateRequester interface { RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) } -// CertificateRefresher periodically checks the expiration of a set of certificates, and if needed -// requests new certificates to central and updates those certificates. -type CertificateRefresher interface { - Start() error - Stop() -} - -type certificateRefresherSupplier func(requestCertificates requestCertificatesFunc, timeout time.Duration, - backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher +type certificateRefresherSupplier func(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, + timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker -// ServiceCertificatesRepo TODO replace by ROX-9148 -type ServiceCertificatesRepo interface{} +// serviceCertificatesRepo TODO replace by ROX-9148 +type serviceCertificatesRepo interface { + getServiceCertificates(ctx context.Context) (*storage.TypedServiceCertificateSet, error) + createSecrets(ctx context.Context, initialCertificates *storage.TypedServiceCertificateSet) error +} // requestCertificatesFunc TODO replace by ROX-9148 type requestCertificatesFunc func(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) // newCertRefresher TODO replace by ROX-9148 -func newCertRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, - backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher { +func newCertificatesRefresher(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, + timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker { return nil } -// ServiceCertSecretSpec TODO replace by ROX-9128 -type ServiceCertSecretSpec struct { - secretName string - caCertFileName string - serviceCertFileName string - serviceKeyFileName string -} - // NewServiceCertificatesRepo TODO replace by ROX-9128 -func NewServiceCertificatesRepo(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, - sensorDeployment *appsApiv1.Deployment, - initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), - secretsClient corev1.SecretInterface) (ServiceCertificatesRepo, error) { - return nil, nil +func newServiceCertificatesRepo(ownerReference metav1.OwnerReference, namespace string, + secretsClient corev1.SecretInterface) serviceCertificatesRepo { + return nil } -type serviceCertificatesRepoSupplier func(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, - sensorDeployment *appsApiv1.Deployment, - initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), - secretsClient corev1.SecretInterface) (ServiceCertificatesRepo, error) +type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, namespace string, + secretsClient corev1.SecretInterface) serviceCertificatesRepo +// Start +// - In case a secret doesn't have the expected owner, this logs a warning and returns nil. +// - In case a secret doesn't exist this creates it setting the deployment for sensor as owner, and initializing +// its data with fresh fetched certificates. func (i *localScannerTLSIssuerImpl) Start() error { log.Info("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) @@ -136,23 +119,25 @@ func (i *localScannerTLSIssuerImpl) Start() error { return errors.New("already started") } - sensorDeployment, getSensorDeploymentErr := i.fetchSensorDeployment(ctx) - if getSensorDeploymentErr != nil { - return errors.Wrap(getSensorDeploymentErr, "fetching sensor deployment") + sensorOwnerReference, fetchSensorDeploymentErr := i.fetchSensorDeploymentOwnerRef(ctx) + if fetchSensorDeploymentErr != nil { + return errors.Wrap(fetchSensorDeploymentErr, "fetching sensor deployment") } - i.requester.Start() + certsRepo := i.serviceCertificatesRepoSupplier(*sensorOwnerReference, i.sensorNamespace, + i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - certsRepo, createCertsRepoErr := i.serviceCertificatesRepoSupplier(ctx, scannerSpec, scannerDBSpec, sensorDeployment, - i.initialCertsSupplier(), i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - if createCertsRepoErr != nil { - return errors.Wrap(createCertsRepoErr, "creating service certificates repository") + if ensureSecretsErr := i.ensureSecrets(ctx, certsRepo); ensureSecretsErr != nil { + errors.Wrap(ensureSecretsErr, "ensuring secrets certificates are created") } - i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certRefreshTimeout, certRefreshBackoff, certsRepo) + + i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certsRepo, + certRefreshTimeout, certRefreshBackoff) + + i.requester.Start() if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { return errors.Wrap(refreshStartErr, "starting certificate refresher") } - log.Info("local scanner TLS issuer started.") return nil @@ -228,7 +213,7 @@ func (i *localScannerTLSIssuerImpl) initialCertsSupplier() func(context.Context) } } -func (i *localScannerTLSIssuerImpl) fetchSensorDeployment(ctx context.Context) (*appsApiv1.Deployment, error) { +func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context) (*metav1.OwnerReference, error) { if i.podOwnerName == "" { return nil, errors.New("fetching sensor deployment: empty pod owner name") } @@ -252,5 +237,39 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeployment(ctx context.Context) ( return nil, errors.Wrap(getReplicaSetErr, "fetching sensor deployment") } - return sensorDeployment, nil + sensorDeploymentGVK := sensorDeployment.GroupVersionKind() + blockOwnerDeletion := false + isController := false + return &metav1.OwnerReference{ + APIVersion: sensorDeploymentGVK.GroupVersion().String(), + Kind: sensorDeploymentGVK.Kind, + Name: sensorDeployment.GetName(), + UID: sensorDeployment.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + }, nil } + +func (i *localScannerTLSIssuerImpl) ensureSecrets(ctx context.Context, certsRepo serviceCertificatesRepo) error { + certificates, err := certsRepo.getServiceCertificates(ctx) + + if err == ErrUnexpectedSecretsOwner { + log.Warn("local scanner certificates secrets are not owned by sensor deployment, " + + "sensor will not keep those secrets up to date") + return nil + } + + if k8sErrrors.IsNotFound(err) { + // FIXME one secret exists and the other does not + if createSecretsErr := certsRepo.createSecrets(ctx, certificates); createSecretsErr != nil { + return errors.Wrap(createSecretsErr, "creating certificates secrets") + } + } + + if err == ErrDifferentCAForDifferentServiceTypes || err == ErrMissingSecretData { + // to be fixed on first refresh. + return nil + } + + return err +} \ No newline at end of file diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index d9b8d1ae1b5f2..e0f90fe264d71 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -175,12 +175,12 @@ type suppliersMock struct { mock.Mock } -func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, backoff wait.Backoff, repository ServiceCertificatesRepo) CertificateRefresher { +func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, backoff wait.Backoff, repository serviceCertificatesRepo) CertificateRefresher { args := m.Called(requestCertificates, timeout, backoff, repository) return args.Get(0).(CertificateRefresher) } -func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient v1.SecretInterface) (ServiceCertificatesRepo, error) { +func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient v1.SecretInterface) (serviceCertificatesRepo, error) { args := m.Called(ctx, scannerSpec, scannerDBSpec, sensorDeployment, initialCertsSupplier, secretsClient) - return args.Get(0).(ServiceCertificatesRepo), args.Error(1) + return args.Get(0).(serviceCertificatesRepo), args.Error(1) } From b52c3f50a01c73a14f1f11d1d8b5ecc2716a7292 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 15:10:42 +0100 Subject: [PATCH 07/46] rebase --- .../localscanner/certificate_requester.go | 3 +- .../service_certificates_repository.go | 2 +- sensor/kubernetes/localscanner/tls_issuer.go | 103 +++--------------- 3 files changed, 16 insertions(+), 92 deletions(-) diff --git a/sensor/kubernetes/localscanner/certificate_requester.go b/sensor/kubernetes/localscanner/certificate_requester.go index 7e1bb943c7e02..b2cb4252f19bb 100644 --- a/sensor/kubernetes/localscanner/certificate_requester.go +++ b/sensor/kubernetes/localscanner/certificate_requester.go @@ -13,7 +13,8 @@ import ( var ( // ErrCertificateRequesterStopped is returned by RequestCertificates when the certificate // requester is not initialized. - ErrCertificateRequesterStopped = errors.New("stopped") + ErrCertificateRequesterStopped = errors.New("stopped") + _ CertificateRequester = (*certificateRequesterImpl)(nil) ) // NewCertificateRequester creates a new certificate requester that communicates through diff --git a/sensor/kubernetes/localscanner/service_certificates_repository.go b/sensor/kubernetes/localscanner/service_certificates_repository.go index c369a3087594c..b369236ba532d 100644 --- a/sensor/kubernetes/localscanner/service_certificates_repository.go +++ b/sensor/kubernetes/localscanner/service_certificates_repository.go @@ -53,7 +53,7 @@ type serviceCertSecretSpec struct { // newServiceCertificatesRepo creates a new serviceCertificatesRepoSecretsImpl that persists certificates for // scanner and scanner DB in k8s secrets that are expected to have ownerReference as the only owner reference. func newServiceCertificatesRepo(ownerReference metav1.OwnerReference, namespace string, - secretsClient corev1.SecretInterface) *serviceCertificatesRepoSecretsImpl { + secretsClient corev1.SecretInterface) serviceCertificatesRepo { return &serviceCertificatesRepoSecretsImpl{ secrets: map[storage.ServiceType]serviceCertSecretSpec{ diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 24dd17bb68c6d..6f04302ca024e 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -6,13 +6,10 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" - "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/sensor/common" - k8sErrrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -23,7 +20,7 @@ var ( log = logging.LoggerForModule() startTimeout = 5 * time.Minute - processMessageTimeout = time.Second + processMessageTimeout = 5 * time.Second certRefreshTimeout = 5 * time.Minute certRefreshBackoff = wait.Backoff{ Duration: 5 * time.Second, @@ -33,13 +30,6 @@ var ( Cap: 10 * time.Minute, } _ common.SensorComponent = (*localScannerTLSIssuerImpl)(nil) - - // ErrUnexpectedSecretsOwner TODO replace by ROX-9128 - ErrUnexpectedSecretsOwner = errors.New("unexpected owner for certificate secrets") - // ErrDifferentCAForDifferentServiceTypes TODO replace by ROX-9128 - ErrDifferentCAForDifferentServiceTypes = errors.New("found different CA PEM in secret Data for different service types") - // ErrMissingSecretData TODO replace by ROX-9128 - ErrMissingSecretData = errors.New("missing secret data") ) // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates @@ -82,34 +72,11 @@ type CertificateRequester interface { type certificateRefresherSupplier func(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker -// serviceCertificatesRepo TODO replace by ROX-9148 -type serviceCertificatesRepo interface { - getServiceCertificates(ctx context.Context) (*storage.TypedServiceCertificateSet, error) - createSecrets(ctx context.Context, initialCertificates *storage.TypedServiceCertificateSet) error -} - -// requestCertificatesFunc TODO replace by ROX-9148 -type requestCertificatesFunc func(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) - -// newCertRefresher TODO replace by ROX-9148 -func newCertificatesRefresher(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, - timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker { - return nil -} - -// NewServiceCertificatesRepo TODO replace by ROX-9128 -func newServiceCertificatesRepo(ownerReference metav1.OwnerReference, namespace string, - secretsClient corev1.SecretInterface) serviceCertificatesRepo { - return nil -} - type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, namespace string, secretsClient corev1.SecretInterface) serviceCertificatesRepo -// Start -// - In case a secret doesn't have the expected owner, this logs a warning and returns nil. -// - In case a secret doesn't exist this creates it setting the deployment for sensor as owner, and initializing -// its data with fresh fetched certificates. +// Start launches a certificate refreshes that immediately checks the certificates, and that keeps them updated. +// In case a secret doesn't have the expected owner, this logs a warning and returns nil. func (i *localScannerTLSIssuerImpl) Start() error { log.Info("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) @@ -126,20 +93,25 @@ func (i *localScannerTLSIssuerImpl) Start() error { certsRepo := i.serviceCertificatesRepoSupplier(*sensorOwnerReference, i.sensorNamespace, i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - - if ensureSecretsErr := i.ensureSecrets(ctx, certsRepo); ensureSecretsErr != nil { - errors.Wrap(ensureSecretsErr, "ensuring secrets certificates are created") + _, getCertsErr := certsRepo.getServiceCertificates(ctx) + if errors.Is(getCertsErr, ErrUnexpectedSecretsOwner) { + log.Warn("local scanner certificates secrets are not owned by sensor deployment, " + + "sensor will not maintain those secrets up to date") + return nil + } + if getCertsErr != nil && !getCertsErrorIsRecoverable(getCertsErr) { + return errors.Wrap(getCertsErr, "checking certificate secrets ownership") } + // i.refresher can recover from any err such that getCertsErrorIsRecoverable(err) i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certsRepo, certRefreshTimeout, certRefreshBackoff) - i.requester.Start() if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { return errors.Wrap(refreshStartErr, "starting certificate refresher") } - log.Info("local scanner TLS issuer started.") + log.Info("local scanner TLS issuer started.") return nil } @@ -188,31 +160,6 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err } } -// initialCertsSupplier request the certificates at most once, and returns the memoized response to that single request. -func (i *localScannerTLSIssuerImpl) initialCertsSupplier() func(context.Context) (*storage.TypedServiceCertificateSet, error) { - var ( - requestOnce sync.Once - certificates *storage.TypedServiceCertificateSet - requestErr error - ) - requestCertificates := i.requester.RequestCertificates - return func(ctx context.Context) (*storage.TypedServiceCertificateSet, error) { - requestOnce.Do(func() { - response, err := requestCertificates(ctx) - if err != nil { - requestErr = err - return - } - if response.GetError() != nil { - requestErr = errors.Errorf("central refused to issue certificates: %s", response.GetError().GetMessage()) - return - } - certificates = response.GetCertificates() - }) - return certificates, requestErr - } -} - func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context) (*metav1.OwnerReference, error) { if i.podOwnerName == "" { return nil, errors.New("fetching sensor deployment: empty pod owner name") @@ -249,27 +196,3 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co Controller: &isController, }, nil } - -func (i *localScannerTLSIssuerImpl) ensureSecrets(ctx context.Context, certsRepo serviceCertificatesRepo) error { - certificates, err := certsRepo.getServiceCertificates(ctx) - - if err == ErrUnexpectedSecretsOwner { - log.Warn("local scanner certificates secrets are not owned by sensor deployment, " + - "sensor will not keep those secrets up to date") - return nil - } - - if k8sErrrors.IsNotFound(err) { - // FIXME one secret exists and the other does not - if createSecretsErr := certsRepo.createSecrets(ctx, certificates); createSecretsErr != nil { - return errors.Wrap(createSecretsErr, "creating certificates secrets") - } - } - - if err == ErrDifferentCAForDifferentServiceTypes || err == ErrMissingSecretData { - // to be fixed on first refresh. - return nil - } - - return err -} \ No newline at end of file From 40da452e8e188443e4f71851b1a82aae3df0d5bc Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 15:30:51 +0100 Subject: [PATCH 08/46] rebase tests --- .../localscanner/tls_issuer_test.go | 45 +++++++++++++------ 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index e0f90fe264d71..d0ad90199e3e8 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/concurrency" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" appsApiv1 "k8s.io/api/apps/v1" @@ -15,20 +15,20 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/fake" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) var ( sensorNamespace = "stackrox-ns" sensorReplicasetName = "sensor-replicaset" sensorDeploymentName = "sensor-deployment" - errForced = errors.New("forced") ) type localScannerTLSIssuerFixture struct { k8sClient *fake.Clientset requester *certificateRequesterMock refresher *certificateRefresherMock + repo *certsRepoMock supplier *suppliersMock issuer *localScannerTLSIssuerImpl } @@ -37,6 +37,7 @@ func newLocalScannerTLSIssuerFixture(withSensorDeployment, withReplicaSet bool) fixture := &localScannerTLSIssuerFixture{} fixture.requester = &certificateRequesterMock{} fixture.refresher = &certificateRefresherMock{} + fixture.repo = &certsRepoMock{} fixture.supplier = &suppliersMock{} fixture.k8sClient = testK8sClient(withSensorDeployment, withReplicaSet) msgToCentralC := make(chan *central.MsgFromSensor) @@ -64,11 +65,11 @@ func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { func (f *localScannerTLSIssuerFixture) mockForStart(refresherStartReturn error) { f.requester.On("Start").Once() f.refresher.On("Start").Once().Return(refresherStartReturn) - repo := struct{}{} // TODO ROX-9128 replace by nil casting to impl pointer - f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, scannerSpec, scannerDBSpec, - mock.Anything, mock.Anything, mock.Anything).Once().Return(repo, nil) - f.supplier.On("supplyCertificateRefresher", mock.Anything, - certRefreshTimeout, certRefreshBackoff, repo).Once().Return(f.refresher) + f.repo.On("getServiceCertificates", mock.Anything).Once().Return((*storage.TypedServiceCertificateSet)(nil), nil) + f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, + mock.Anything, mock.Anything).Once().Return(f.repo, nil) + f.supplier.On("supplyCertificateRefresher", mock.Anything, f.repo, + certRefreshTimeout, certRefreshBackoff).Once().Return(f.refresher) } func TestLocalScannerTLSIssuerStartSuccess(t *testing.T) { @@ -175,12 +176,28 @@ type suppliersMock struct { mock.Mock } -func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, timeout time.Duration, backoff wait.Backoff, repository serviceCertificatesRepo) CertificateRefresher { - args := m.Called(requestCertificates, timeout, backoff, repository) - return args.Get(0).(CertificateRefresher) +func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, + repository serviceCertificatesRepo, timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker { + args := m.Called(requestCertificates, repository, timeout, backoff) + return args.Get(0).(concurrency.RetryTicker) } -func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ctx context.Context, scannerSpec, scannerDBSpec ServiceCertSecretSpec, sensorDeployment *appsApiv1.Deployment, initialCertsSupplier func(context.Context) (*storage.TypedServiceCertificateSet, error), secretsClient v1.SecretInterface) (serviceCertificatesRepo, error) { - args := m.Called(ctx, scannerSpec, scannerDBSpec, sensorDeployment, initialCertsSupplier, secretsClient) - return args.Get(0).(serviceCertificatesRepo), args.Error(1) +func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ownerReference metav1.OwnerReference, namespace string, + secretsClient corev1.SecretInterface) serviceCertificatesRepo { + args := m.Called(ownerReference, namespace, secretsClient) + return args.Get(0).(serviceCertificatesRepo) +} + +type certsRepoMock struct { + mock.Mock +} + +func (m *certsRepoMock) getServiceCertificates(ctx context.Context) (*storage.TypedServiceCertificateSet, error) { + args := m.Called(ctx) + return args.Get(0).(*storage.TypedServiceCertificateSet), args.Error(1) +} + +func (m *certsRepoMock) ensureServiceCertificates(ctx context.Context, certificates *storage.TypedServiceCertificateSet) error { + args := m.Called(ctx, certificates) + return args.Error(0) } From 60844b5f01a40cff8bbc84e5cb573efe3e814647 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 16:02:59 +0100 Subject: [PATCH 09/46] add test for fetch sensor deployment --- .../localscanner/tls_issuer_test.go | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index d0ad90199e3e8..6d8228bac39e6 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -33,13 +33,13 @@ type localScannerTLSIssuerFixture struct { issuer *localScannerTLSIssuerImpl } -func newLocalScannerTLSIssuerFixture(withSensorDeployment, withReplicaSet bool) *localScannerTLSIssuerFixture { +func newLocalScannerTLSIssuerFixture(createSensorDeployment, createSensorReplicaSet bool) *localScannerTLSIssuerFixture { fixture := &localScannerTLSIssuerFixture{} fixture.requester = &certificateRequesterMock{} fixture.refresher = &certificateRefresherMock{} fixture.repo = &certsRepoMock{} fixture.supplier = &suppliersMock{} - fixture.k8sClient = testK8sClient(withSensorDeployment, withReplicaSet) + fixture.k8sClient = testK8sClient(createSensorDeployment, createSensorReplicaSet) msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) fixture.issuer = &localScannerTLSIssuerImpl{ @@ -62,6 +62,7 @@ func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { f.supplier.AssertExpectations(t) } +// mockForStart setups the mocks for the happy path of Start func (f *localScannerTLSIssuerFixture) mockForStart(refresherStartReturn error) { f.requester.On("Start").Once() f.refresher.On("Start").Once().Return(refresherStartReturn) @@ -104,13 +105,32 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { fixture.assertExpectations(t) } -// TODO fetch sensor deployment failures +func TestLocalScannerTLSIssuerFetchSensorDeploymentErrorStartFailure(t *testing.T) { + testCases := map[string]struct { + createSensorDeployment bool + createSensorReplicaset bool + }{ + "sensor replica set missing": {createSensorDeployment: true, createSensorReplicaset: false}, + "sensor deployment set missing": {createSensorDeployment: false, createSensorReplicaset: false}, + } + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(tc.createSensorDeployment, tc.createSensorReplicaset) + + startErr := fixture.issuer.Start() + + assert.Error(t, startErr) + fixture.assertExpectations(t) + }) + } +} + // TODO sensor component interface methods -// if withSensorDeployment is false then withReplicaSet is not included -func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { +// if createSensorDeployment is false then createSensorReplicaSet is not treated as false. +func testK8sClient(createSensorDeployment, createSensorReplicaSet bool) *fake.Clientset { objects := make([]runtime.Object, 0) - if withSensorDeployment { + if createSensorDeployment { sensorDeployment := &appsApiv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: sensorDeploymentName, @@ -118,7 +138,7 @@ func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { }, } objects = append(objects, sensorDeployment) - if withReplicaSet { + if createSensorReplicaSet { sensorDeploymentGVK := sensorDeployment.GroupVersionKind() sensorReplicaSet := &appsApiv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -136,7 +156,6 @@ func testK8sClient(withSensorDeployment, withReplicaSet bool) *fake.Clientset { } objects = append(objects, sensorReplicaSet) } - } k8sClient := fake.NewSimpleClientset(objects...) From 43be8fea36b05caa99e908cf5f5d994a1bfbe362 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 16:30:55 +0100 Subject: [PATCH 10/46] complete tests for start --- .../localscanner/tls_issuer_test.go | 98 ++++++++++++++----- 1 file changed, 75 insertions(+), 23 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 6d8228bac39e6..e8c1351babf8e 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -5,14 +5,17 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" appsApiv1 "k8s.io/api/apps/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -33,13 +36,13 @@ type localScannerTLSIssuerFixture struct { issuer *localScannerTLSIssuerImpl } -func newLocalScannerTLSIssuerFixture(createSensorDeployment, createSensorReplicaSet bool) *localScannerTLSIssuerFixture { +func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *localScannerTLSIssuerFixture { fixture := &localScannerTLSIssuerFixture{} fixture.requester = &certificateRequesterMock{} fixture.refresher = &certificateRefresherMock{} fixture.repo = &certsRepoMock{} fixture.supplier = &suppliersMock{} - fixture.k8sClient = testK8sClient(createSensorDeployment, createSensorReplicaSet) + fixture.k8sClient = testK8sClient(k8sClientConfig) msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) fixture.issuer = &localScannerTLSIssuerImpl{ @@ -63,29 +66,47 @@ func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { } // mockForStart setups the mocks for the happy path of Start -func (f *localScannerTLSIssuerFixture) mockForStart(refresherStartReturn error) { +func (f *localScannerTLSIssuerFixture) mockForStart(conf mockForStartConfig) { f.requester.On("Start").Once() - f.refresher.On("Start").Once().Return(refresherStartReturn) - f.repo.On("getServiceCertificates", mock.Anything).Once().Return((*storage.TypedServiceCertificateSet)(nil), nil) + f.refresher.On("Start").Once().Return(conf.refresherStartErr) + f.repo.On("getServiceCertificates", mock.Anything).Once(). + Return((*storage.TypedServiceCertificateSet)(nil), conf.getCertsErr) f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, mock.Anything, mock.Anything).Once().Return(f.repo, nil) f.supplier.On("supplyCertificateRefresher", mock.Anything, f.repo, certRefreshTimeout, certRefreshBackoff).Once().Return(f.refresher) } +type mockForStartConfig struct { + getCertsErr error + refresherStartErr error +} + func TestLocalScannerTLSIssuerStartSuccess(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(true, true) - fixture.mockForStart(nil) + testCases := map[string]struct { + getCertsErr error + }{ + "no error": {getCertsErr: nil}, + "missing secret data": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, + "inconsistent CAs": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, + "missing secret": {getCertsErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "foo")}, + } + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture.mockForStart(mockForStartConfig{getCertsErr: tc.getCertsErr}) - startErr := fixture.issuer.Start() + startErr := fixture.issuer.Start() - assert.NoError(t, startErr) - fixture.assertExpectations(t) + assert.NoError(t, startErr) + fixture.assertExpectations(t) + }) + } } func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(true, true) - fixture.mockForStart(errForced) + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture.mockForStart(mockForStartConfig{refresherStartErr: errForced}) startErr := fixture.issuer.Start() @@ -94,8 +115,8 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { } func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(true, true) - fixture.mockForStart(nil) + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture.mockForStart(mockForStartConfig{}) startErr := fixture.issuer.Start() secondStartErr := fixture.issuer.Start() @@ -107,15 +128,14 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { func TestLocalScannerTLSIssuerFetchSensorDeploymentErrorStartFailure(t *testing.T) { testCases := map[string]struct { - createSensorDeployment bool - createSensorReplicaset bool + k8sClientConfig testK8sClientConfig }{ - "sensor replica set missing": {createSensorDeployment: true, createSensorReplicaset: false}, - "sensor deployment set missing": {createSensorDeployment: false, createSensorReplicaset: false}, + "sensor replica set missing": {k8sClientConfig: testK8sClientConfig{skipSensorReplicaSet: true}}, + "sensor deployment set missing": {k8sClientConfig: testK8sClientConfig{skipSensorDeployment: true}}, } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(tc.createSensorDeployment, tc.createSensorReplicaset) + fixture := newLocalScannerTLSIssuerFixture(tc.k8sClientConfig) startErr := fixture.issuer.Start() @@ -125,12 +145,37 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentErrorStartFailure(t *testing. } } +func TestLocalScannerTLSIssuerNoopOnUnexpectedSecretsOwner(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, + mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) + fixture.repo.On("getServiceCertificates", mock.Anything).Once(). + Return((*storage.TypedServiceCertificateSet)(nil), errors.Wrap(ErrUnexpectedSecretsOwner, "forced error")) + + startErr := fixture.issuer.Start() + + assert.NoError(t, startErr) + fixture.assertExpectations(t) +} + +func TestLocalScannerTLSIssuerUnrecoverableGetCertsErrorStartFailure(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, + mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) + fixture.repo.On("getServiceCertificates", mock.Anything).Once(). + Return((*storage.TypedServiceCertificateSet)(nil), errForced) + + startErr := fixture.issuer.Start() + + assert.ErrorIs(t, startErr, errForced) + fixture.assertExpectations(t) +} + // TODO sensor component interface methods -// if createSensorDeployment is false then createSensorReplicaSet is not treated as false. -func testK8sClient(createSensorDeployment, createSensorReplicaSet bool) *fake.Clientset { +func testK8sClient(conf testK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) - if createSensorDeployment { + if !conf.skipSensorDeployment { sensorDeployment := &appsApiv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: sensorDeploymentName, @@ -138,7 +183,7 @@ func testK8sClient(createSensorDeployment, createSensorReplicaSet bool) *fake.Cl }, } objects = append(objects, sensorDeployment) - if createSensorReplicaSet { + if !conf.skipSensorReplicaSet { sensorDeploymentGVK := sensorDeployment.GroupVersionKind() sensorReplicaSet := &appsApiv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -163,6 +208,13 @@ func testK8sClient(createSensorDeployment, createSensorReplicaSet bool) *fake.Cl return k8sClient } +type testK8sClientConfig struct { + // if true then no sensor deployment and no replica set will be added to the test client. + skipSensorDeployment bool + // if true then no sensor replica set will be added to the test client. + skipSensorReplicaSet bool +} + type certificateRequesterMock struct { mock.Mock } From d6e956de8dea70e0ae0e24595e4457c9c51d2cf8 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 17:09:29 +0100 Subject: [PATCH 11/46] complete tests --- .../localscanner/tls_issuer_test.go | 54 +++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index e8c1351babf8e..9d961ad846f20 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -82,12 +82,12 @@ type mockForStartConfig struct { refresherStartErr error } -func TestLocalScannerTLSIssuerStartSuccess(t *testing.T) { +func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { testCases := map[string]struct { getCertsErr error }{ "no error": {getCertsErr: nil}, - "missing secret data": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, + "missing secret data": {getCertsErr: ErrMissingSecretData}, "inconsistent CAs": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, "missing secret": {getCertsErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "foo")}, } @@ -95,10 +95,14 @@ func TestLocalScannerTLSIssuerStartSuccess(t *testing.T) { t.Run(tcName, func(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{getCertsErr: tc.getCertsErr}) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() + fixture.issuer.Stop(nil) assert.NoError(t, startErr) + assert.Nil(t, fixture.issuer.refresher) fixture.assertExpectations(t) }) } @@ -171,7 +175,51 @@ func TestLocalScannerTLSIssuerUnrecoverableGetCertsErrorStartFailure(t *testing. fixture.assertExpectations(t) } -// TODO sensor component interface methods +func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + expectedResponse := ¢ral.IssueLocalScannerCertsResponse{ + RequestId: "requestID", + } + msg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ + IssueLocalScannerCertsResponse: expectedResponse, + }, + } + + go func() { + processErr := fixture.issuer.ProcessMessage(msg) + assert.NoError(t, processErr) + }() + + select { + case <-ctx.Done(): + assert.Fail(t, ctx.Err().Error()) + case response := <-fixture.issuer.msgFromCentralC: + assert.Equal(t, expectedResponse, response) + } +} + +func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + msg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_ReprocessDeployments{}, + } + + go func() { + processErr := fixture.issuer.ProcessMessage(msg) + assert.NoError(t, processErr) + }() + + select { + case <-ctx.Done(): + case <-fixture.issuer.msgFromCentralC: + assert.Fail(t, "unknown message is not ignored") + } +} func testK8sClient(conf testK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) From d4f7ced8ee9b017e30a37ce079ceb9f106d83d1c Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 18:04:25 +0100 Subject: [PATCH 12/46] improve comment --- sensor/kubernetes/localscanner/tls_issuer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 6f04302ca024e..51725cedb3655 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -77,6 +77,7 @@ type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, // Start launches a certificate refreshes that immediately checks the certificates, and that keeps them updated. // In case a secret doesn't have the expected owner, this logs a warning and returns nil. +// In case this component was already started it fail immediately. func (i *localScannerTLSIssuerImpl) Start() error { log.Info("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) From 39c6b8352bc7384c71fe70e57798a0dcef0a2c83 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 18:07:07 +0100 Subject: [PATCH 13/46] launch cert refresher if feature flag enabled --- sensor/kubernetes/sensor/sensor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index f1063d5620600..058aca96b0cdf 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -164,7 +164,7 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager } podOwnerName := os.Getenv("POD_OWNER_NAME") - if !features.LocalImageScanning.Enabled() { + if features.LocalImageScanning.Enabled() { components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podOwnerName)) } From efc86b6181d15bef539c6022fe048e4b2cdd0464 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 15 Feb 2022 18:08:09 +0100 Subject: [PATCH 14/46] minor refactor --- sensor/kubernetes/sensor/sensor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index 058aca96b0cdf..319697508cfe7 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -163,8 +163,8 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager return nil, errors.Wrap(err, "creating central client") } - podOwnerName := os.Getenv("POD_OWNER_NAME") if features.LocalImageScanning.Enabled() { + podOwnerName := os.Getenv("POD_OWNER_NAME") components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podOwnerName)) } From d13bf179e41f5e91cf6a72467bd1a1a1ff579cc6 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 17 Feb 2022 15:29:24 +0100 Subject: [PATCH 15/46] build sensor deployment owner reference from replica set owner --- sensor/kubernetes/localscanner/tls_issuer.go | 17 ++---- .../localscanner/tls_issuer_test.go | 56 ++++++------------- 2 files changed, 22 insertions(+), 51 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 51725cedb3655..5b2489e63bb50 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -10,6 +10,7 @@ import ( "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/sensor/common" + appsApiv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -178,21 +179,13 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co ownerReplicaSet.GetName(), replicaSetOwners) } replicaSetOwner := replicaSetOwners[0] - - deploymentClient := i.k8sClient.AppsV1().Deployments(i.sensorNamespace) - sensorDeployment, getSensorDeploymentErr := deploymentClient.Get(ctx, replicaSetOwner.Name, metav1.GetOptions{}) - if getSensorDeploymentErr != nil { - return nil, errors.Wrap(getReplicaSetErr, "fetching sensor deployment") - } - - sensorDeploymentGVK := sensorDeployment.GroupVersionKind() blockOwnerDeletion := false isController := false return &metav1.OwnerReference{ - APIVersion: sensorDeploymentGVK.GroupVersion().String(), - Kind: sensorDeploymentGVK.Kind, - Name: sensorDeployment.GetName(), - UID: sensorDeployment.GetUID(), + APIVersion: appsApiv1.SchemeGroupVersion.String(), + Kind: "Deployment", + Name: replicaSetOwner.Name, + UID: replicaSetOwner.UID, BlockOwnerDeletion: &blockOwnerDeletion, Controller: &isController, }, nil diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 9d961ad846f20..eb767386c86e4 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -24,7 +24,6 @@ import ( var ( sensorNamespace = "stackrox-ns" sensorReplicasetName = "sensor-replicaset" - sensorDeploymentName = "sensor-deployment" ) type localScannerTLSIssuerFixture struct { @@ -130,23 +129,13 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { fixture.assertExpectations(t) } -func TestLocalScannerTLSIssuerFetchSensorDeploymentErrorStartFailure(t *testing.T) { - testCases := map[string]struct { - k8sClientConfig testK8sClientConfig - }{ - "sensor replica set missing": {k8sClientConfig: testK8sClientConfig{skipSensorReplicaSet: true}}, - "sensor deployment set missing": {k8sClientConfig: testK8sClientConfig{skipSensorDeployment: true}}, - } - for tcName, tc := range testCases { - t.Run(tcName, func(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(tc.k8sClientConfig) +func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{skipSensorReplicaSet: true}) - startErr := fixture.issuer.Start() + startErr := fixture.issuer.Start() - assert.Error(t, startErr) - fixture.assertExpectations(t) - }) - } + assert.Error(t, startErr) + fixture.assertExpectations(t) } func TestLocalScannerTLSIssuerNoopOnUnexpectedSecretsOwner(t *testing.T) { @@ -223,32 +212,23 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { func testK8sClient(conf testK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) - if !conf.skipSensorDeployment { - sensorDeployment := &appsApiv1.Deployment{ + if !conf.skipSensorReplicaSet { + sensorDeploymentGVK := sensorDeployment.GroupVersionKind() + sensorReplicaSet := &appsApiv1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ - Name: sensorDeploymentName, + Name: sensorReplicasetName, Namespace: sensorNamespace, - }, - } - objects = append(objects, sensorDeployment) - if !conf.skipSensorReplicaSet { - sensorDeploymentGVK := sensorDeployment.GroupVersionKind() - sensorReplicaSet := &appsApiv1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: sensorReplicasetName, - Namespace: sensorNamespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: sensorDeploymentGVK.GroupVersion().String(), - Kind: sensorDeploymentGVK.Kind, - Name: sensorDeployment.GetName(), - UID: sensorDeployment.GetUID(), - }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: sensorDeploymentGVK.GroupVersion().String(), + Kind: sensorDeploymentGVK.Kind, + Name: sensorDeployment.GetName(), + UID: sensorDeployment.GetUID(), }, }, - } - objects = append(objects, sensorReplicaSet) + }, } + objects = append(objects, sensorReplicaSet) } k8sClient := fake.NewSimpleClientset(objects...) @@ -257,8 +237,6 @@ func testK8sClient(conf testK8sClientConfig) *fake.Clientset { } type testK8sClientConfig struct { - // if true then no sensor deployment and no replica set will be added to the test client. - skipSensorDeployment bool // if true then no sensor replica set will be added to the test client. skipSensorReplicaSet bool } From 679e31e385226eb9a3a253f5109a8799e45d07bb Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 17 Feb 2022 15:31:18 +0100 Subject: [PATCH 16/46] fix typo --- sensor/kubernetes/localscanner/tls_issuer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 5b2489e63bb50..00fd94c7fe5e5 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -78,7 +78,7 @@ type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, // Start launches a certificate refreshes that immediately checks the certificates, and that keeps them updated. // In case a secret doesn't have the expected owner, this logs a warning and returns nil. -// In case this component was already started it fail immediately. +// In case this component was already started it fails immediately. func (i *localScannerTLSIssuerImpl) Start() error { log.Info("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) From 687e7f36d60f7cbd4316d1f5ba6f081a3149614e Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 17 Feb 2022 15:47:46 +0100 Subject: [PATCH 17/46] stop on start abort --- sensor/kubernetes/localscanner/tls_issuer.go | 18 +++++++++++++----- .../kubernetes/localscanner/tls_issuer_test.go | 10 ++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 00fd94c7fe5e5..a3cc49f206a0c 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -85,12 +85,12 @@ func (i *localScannerTLSIssuerImpl) Start() error { defer cancel() if i.refresher != nil { - return errors.New("already started") + return i.abortStart(errors.New("already started")) } sensorOwnerReference, fetchSensorDeploymentErr := i.fetchSensorDeploymentOwnerRef(ctx) if fetchSensorDeploymentErr != nil { - return errors.Wrap(fetchSensorDeploymentErr, "fetching sensor deployment") + return i.abortStart(errors.Wrap(fetchSensorDeploymentErr, "fetching sensor deployment")) } certsRepo := i.serviceCertificatesRepoSupplier(*sensorOwnerReference, i.sensorNamespace, @@ -99,10 +99,10 @@ func (i *localScannerTLSIssuerImpl) Start() error { if errors.Is(getCertsErr, ErrUnexpectedSecretsOwner) { log.Warn("local scanner certificates secrets are not owned by sensor deployment, " + "sensor will not maintain those secrets up to date") - return nil + return i.abortStart(nil) } if getCertsErr != nil && !getCertsErrorIsRecoverable(getCertsErr) { - return errors.Wrap(getCertsErr, "checking certificate secrets ownership") + return i.abortStart(errors.Wrap(getCertsErr, "checking certificate secrets ownership")) } // i.refresher can recover from any err such that getCertsErrorIsRecoverable(err) @@ -110,13 +110,21 @@ func (i *localScannerTLSIssuerImpl) Start() error { certRefreshTimeout, certRefreshBackoff) i.requester.Start() if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { - return errors.Wrap(refreshStartErr, "starting certificate refresher") + return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate refresher")) } log.Info("local scanner TLS issuer started.") return nil } +func (i *localScannerTLSIssuerImpl) abortStart(err error) error { + if err != nil { + log.Errorf("local scanner TLS issuer start aborted due to error: %s", err) + } + i.Stop(err) + return err +} + func (i *localScannerTLSIssuerImpl) Stop(err error) { if i.refresher != nil { i.refresher.Stop() diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index eb767386c86e4..afbe872e4f1e1 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -110,6 +110,8 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{refresherStartErr: errForced}) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() @@ -120,6 +122,8 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{}) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() secondStartErr := fixture.issuer.Start() @@ -131,6 +135,8 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{skipSensorReplicaSet: true}) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() @@ -144,6 +150,8 @@ func TestLocalScannerTLSIssuerNoopOnUnexpectedSecretsOwner(t *testing.T) { mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) fixture.repo.On("getServiceCertificates", mock.Anything).Once(). Return((*storage.TypedServiceCertificateSet)(nil), errors.Wrap(ErrUnexpectedSecretsOwner, "forced error")) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() @@ -157,6 +165,8 @@ func TestLocalScannerTLSIssuerUnrecoverableGetCertsErrorStartFailure(t *testing. mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) fixture.repo.On("getServiceCertificates", mock.Anything).Once(). Return((*storage.TypedServiceCertificateSet)(nil), errForced) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() startErr := fixture.issuer.Start() From 24e6a078f7fde5c1215b9434c597acec720eb929 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 17 Feb 2022 17:04:11 +0100 Subject: [PATCH 18/46] check secret ownership in refresher, to do it with retries --- sensor/kubernetes/localscanner/tls_issuer.go | 38 +++++++++++-------- .../localscanner/tls_issuer_test.go | 31 --------------- 2 files changed, 23 insertions(+), 46 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index a3cc49f206a0c..bd185a70c6a24 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -11,16 +11,25 @@ import ( "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/sensor/common" appsApiv1 "k8s.io/api/apps/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/util/retry" ) var ( log = logging.LoggerForModule() - startTimeout = 5 * time.Minute + startTimeout = 6 * time.Minute + fetchSensorDeploymentOwnerRefBackoff = wait.Backoff{ + Duration: 10 * time.Millisecond, + Factor: 3, + Jitter: 0.1, + Steps: 10, + Cap: 100 * time.Minute, + } processMessageTimeout = 5 * time.Second certRefreshTimeout = 5 * time.Minute certRefreshBackoff = wait.Backoff{ @@ -88,26 +97,16 @@ func (i *localScannerTLSIssuerImpl) Start() error { return i.abortStart(errors.New("already started")) } - sensorOwnerReference, fetchSensorDeploymentErr := i.fetchSensorDeploymentOwnerRef(ctx) + sensorOwnerReference, fetchSensorDeploymentErr := i.fetchSensorDeploymentOwnerRef(ctx, fetchSensorDeploymentOwnerRefBackoff) if fetchSensorDeploymentErr != nil { return i.abortStart(errors.Wrap(fetchSensorDeploymentErr, "fetching sensor deployment")) } certsRepo := i.serviceCertificatesRepoSupplier(*sensorOwnerReference, i.sensorNamespace, i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - _, getCertsErr := certsRepo.getServiceCertificates(ctx) - if errors.Is(getCertsErr, ErrUnexpectedSecretsOwner) { - log.Warn("local scanner certificates secrets are not owned by sensor deployment, " + - "sensor will not maintain those secrets up to date") - return i.abortStart(nil) - } - if getCertsErr != nil && !getCertsErrorIsRecoverable(getCertsErr) { - return i.abortStart(errors.Wrap(getCertsErr, "checking certificate secrets ownership")) - } - - // i.refresher can recover from any err such that getCertsErrorIsRecoverable(err) i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certsRepo, certRefreshTimeout, certRefreshBackoff) + i.requester.Start() if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate refresher")) @@ -170,13 +169,22 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err } } -func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context) (*metav1.OwnerReference, error) { +func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context, + backoff wait.Backoff) (*metav1.OwnerReference, error) { + if i.podOwnerName == "" { return nil, errors.New("fetching sensor deployment: empty pod owner name") } replicaSetClient := i.k8sClient.AppsV1().ReplicaSets(i.sensorNamespace) - ownerReplicaSet, getReplicaSetErr := replicaSetClient.Get(ctx, i.podOwnerName, metav1.GetOptions{}) + var ownerReplicaSet *appsApiv1.ReplicaSet + getReplicaSetErr := retry.OnError(backoff, func(err error) bool { + return !k8sErrors.IsNotFound(err) + }, func() error { + replicaSet, getReplicaSetErr := replicaSetClient.Get(ctx, i.podOwnerName, metav1.GetOptions{}) + ownerReplicaSet = replicaSet + return getReplicaSetErr + }) if getReplicaSetErr != nil { return nil, errors.Wrap(getReplicaSetErr, "fetching owner replica set") } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index afbe872e4f1e1..13bdc03c32b04 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" @@ -144,36 +143,6 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * fixture.assertExpectations(t) } -func TestLocalScannerTLSIssuerNoopOnUnexpectedSecretsOwner(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) - fixture.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, - mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) - fixture.repo.On("getServiceCertificates", mock.Anything).Once(). - Return((*storage.TypedServiceCertificateSet)(nil), errors.Wrap(ErrUnexpectedSecretsOwner, "forced error")) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() - - startErr := fixture.issuer.Start() - - assert.NoError(t, startErr) - fixture.assertExpectations(t) -} - -func TestLocalScannerTLSIssuerUnrecoverableGetCertsErrorStartFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) - fixture.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, - mock.Anything, mock.Anything).Once().Return(fixture.repo, nil) - fixture.repo.On("getServiceCertificates", mock.Anything).Once(). - Return((*storage.TypedServiceCertificateSet)(nil), errForced) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() - - startErr := fixture.issuer.Start() - - assert.ErrorIs(t, startErr, errForced) - fixture.assertExpectations(t) -} - func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() From 295f460b4e789a1b6a605160ce0f4a9589f95feb Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 17 Feb 2022 17:29:03 +0100 Subject: [PATCH 19/46] fix cap of fetchSensorDeploymentOwnerRefBackoff --- sensor/kubernetes/localscanner/tls_issuer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index bd185a70c6a24..2e94927f08f56 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -28,7 +28,7 @@ var ( Factor: 3, Jitter: 0.1, Steps: 10, - Cap: 100 * time.Minute, + Cap: startTimeout, } processMessageTimeout = 5 * time.Second certRefreshTimeout = 5 * time.Minute From 525167cf7e19e19d41be77c8caedf6c6cf91ec02 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Mon, 21 Feb 2022 09:48:56 +0100 Subject: [PATCH 20/46] always log error on abort --- sensor/kubernetes/localscanner/tls_issuer.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 2e94927f08f56..963dfe08db429 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -117,9 +117,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { } func (i *localScannerTLSIssuerImpl) abortStart(err error) error { - if err != nil { - log.Errorf("local scanner TLS issuer start aborted due to error: %s", err) - } + log.Errorf("local scanner TLS issuer start aborted due to error: %s", err) i.Stop(err) return err } From e4ad886bc0e11e8b1420b291f7e2dae2d4ccce77 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Mon, 21 Feb 2022 09:54:08 +0100 Subject: [PATCH 21/46] do not hardcode api version and kind in owner ref of replica set owner --- sensor/kubernetes/localscanner/tls_issuer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 963dfe08db429..47d4d93cf2a97 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -196,8 +196,8 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co blockOwnerDeletion := false isController := false return &metav1.OwnerReference{ - APIVersion: appsApiv1.SchemeGroupVersion.String(), - Kind: "Deployment", + APIVersion: replicaSetOwner.APIVersion, + Kind: replicaSetOwner.Kind, Name: replicaSetOwner.Name, UID: replicaSetOwner.UID, BlockOwnerDeletion: &blockOwnerDeletion, From 37f8d85cf5e9391d55ee10f630c989720919ce74 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Mon, 21 Feb 2022 18:41:12 +0100 Subject: [PATCH 22/46] fetch deployment owner ref starting with sensor name as "metadata.ownerReferences" cannot be used in fieldRef --- .../templates/sensor.yaml.htpl | 4 +- sensor/kubernetes/localscanner/tls_issuer.go | 71 ++++++++++++++----- .../localscanner/tls_issuer_test.go | 49 ++++++++++--- sensor/kubernetes/sensor/sensor.go | 4 +- 4 files changed, 98 insertions(+), 30 deletions(-) diff --git a/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl b/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl index 4cd95ef837a1f..77b4003811b5b 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl +++ b/image/templates/helm/stackrox-secured-cluster/templates/sensor.yaml.htpl @@ -108,10 +108,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - - name: POD_OWNER_NAME + - name: POD_NAME valueFrom: fieldRef: - fieldPath: metadata.ownerReferences[0].name + fieldPath: metadata.name - name: ROX_CENTRAL_ENDPOINT value: {{ ._rox.centralEndpoint }} - name: ROX_ADVERTISED_ENDPOINT diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 47d4d93cf2a97..e2cfc8cc94647 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -10,7 +10,6 @@ import ( "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/sensor/common" - appsApiv1 "k8s.io/api/apps/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -45,12 +44,12 @@ var ( // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates // up to date, using the specified retry parameters. func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string, - podOwnerName string) common.SensorComponent { + sensorPodName string) common.SensorComponent { msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ sensorNamespace: sensorNamespace, - podOwnerName: podOwnerName, + sensorPodName: sensorPodName, k8sClient: k8sClient, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, @@ -62,7 +61,7 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st type localScannerTLSIssuerImpl struct { sensorNamespace string - podOwnerName string + sensorPodName string k8sClient kubernetes.Interface msgToCentralC chan *central.MsgFromSensor msgFromCentralC chan *central.IssueLocalScannerCertsResponse @@ -170,29 +169,48 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context, backoff wait.Backoff) (*metav1.OwnerReference, error) { - if i.podOwnerName == "" { - return nil, errors.New("fetching sensor deployment: empty pod owner name") + if i.sensorPodName == "" { + return nil, errors.New("fetching sensor deployment: empty pod name") } - replicaSetClient := i.k8sClient.AppsV1().ReplicaSets(i.sensorNamespace) - var ownerReplicaSet *appsApiv1.ReplicaSet - getReplicaSetErr := retry.OnError(backoff, func(err error) bool { - return !k8sErrors.IsNotFound(err) - }, func() error { - replicaSet, getReplicaSetErr := replicaSetClient.Get(ctx, i.podOwnerName, metav1.GetOptions{}) - ownerReplicaSet = replicaSet - return getReplicaSetErr + podsClient := i.k8sClient.CoreV1().Pods(i.sensorNamespace) + sensorPodMeta, getPodErr := i.getObjectMetaWithRetries(ctx, backoff, func(ctx context.Context) (metav1.Object, error) { + pod, err := podsClient.Get(ctx, i.sensorPodName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return pod.GetObjectMeta(), nil }) + if getPodErr != nil { + return nil, errors.Wrap(getPodErr, "fetching sensor pod") + } + podOwners := sensorPodMeta.GetOwnerReferences() + if len(podOwners) != 1 { + return nil, errors.Errorf("pod %q has unexpected owners %v", + i.sensorPodName, podOwners) + } + podOwnerName := podOwners[0].Name + + replicaSetClient := i.k8sClient.AppsV1().ReplicaSets(i.sensorNamespace) + ownerReplicaSetMeta, getReplicaSetErr := i.getObjectMetaWithRetries(ctx, backoff, + func(ctx context.Context) (metav1.Object, error) { + replicaSet, err := replicaSetClient.Get(ctx, podOwnerName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return replicaSet.GetObjectMeta(), nil + }) if getReplicaSetErr != nil { return nil, errors.Wrap(getReplicaSetErr, "fetching owner replica set") } - - replicaSetOwners := ownerReplicaSet.GetObjectMeta().GetOwnerReferences() + replicaSetOwners := ownerReplicaSetMeta.GetOwnerReferences() if len(replicaSetOwners) != 1 { - return nil, errors.Errorf("fetching sensor deployment: replica set %q has unexpected owners %v", - ownerReplicaSet.GetName(), replicaSetOwners) + return nil, errors.Errorf("replica set %q has unexpected owners %v", + ownerReplicaSetMeta.GetName(), + replicaSetOwners) } replicaSetOwner := replicaSetOwners[0] + blockOwnerDeletion := false isController := false return &metav1.OwnerReference{ @@ -204,3 +222,20 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co Controller: &isController, }, nil } + +func (i *localScannerTLSIssuerImpl) getObjectMetaWithRetries(ctx context.Context, backoff wait.Backoff, + getObject func(context.Context) (metav1.Object, error)) (metav1.Object, error) { + + var object metav1.Object + getErr := retry.OnError(backoff, func(err error) bool { + return !k8sErrors.IsNotFound(err) + }, func() error { + newObject, err := getObject(ctx) + if err == nil { + object = newObject + } + return err + }) + + return object, getErr +} diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 13bdc03c32b04..1c9acc6389ad6 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" appsApiv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -23,6 +24,7 @@ import ( var ( sensorNamespace = "stackrox-ns" sensorReplicasetName = "sensor-replicaset" + sensorPodName = "sensor-pod" ) type localScannerTLSIssuerFixture struct { @@ -45,7 +47,7 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *local msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) fixture.issuer = &localScannerTLSIssuerImpl{ sensorNamespace: sensorNamespace, - podOwnerName: sensorReplicasetName, + sensorPodName: sensorPodName, k8sClient: fixture.k8sClient, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, @@ -133,14 +135,24 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { } func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{skipSensorReplicaSet: true}) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() + testCases := map[string]struct { + testK8sClientConfig testK8sClientConfig + }{ + "sensor replica set missing": {testK8sClientConfig: testK8sClientConfig{skipSensorReplicaSet: true}}, + "sensor pod missing": {testK8sClientConfig: testK8sClientConfig{skipSensorPod: true}}, + } + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(tc.testK8sClientConfig) + fixture.refresher.On("Stop").Once() + fixture.requester.On("Stop").Once() - startErr := fixture.issuer.Start() + startErr := fixture.issuer.Start() - assert.Error(t, startErr) - fixture.assertExpectations(t) + assert.Error(t, startErr) + fixture.assertExpectations(t) + }) + } } func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { @@ -208,6 +220,25 @@ func testK8sClient(conf testK8sClientConfig) *fake.Clientset { }, } objects = append(objects, sensorReplicaSet) + + if !conf.skipSensorPod { + sensorReplicaSetGVK := sensorReplicaSet.GroupVersionKind() + sensorPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: sensorPodName, + Namespace: sensorNamespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: sensorReplicaSetGVK.GroupVersion().String(), + Kind: sensorReplicaSet.Kind, + Name: sensorReplicaSet.GetName(), + UID: sensorReplicaSet.GetUID(), + }, + }, + }, + } + objects = append(objects, sensorPod) + } } k8sClient := fake.NewSimpleClientset(objects...) @@ -216,8 +247,10 @@ func testK8sClient(conf testK8sClientConfig) *fake.Clientset { } type testK8sClientConfig struct { - // if true then no sensor replica set will be added to the test client. + // if true then no sensor replica set and no sensor pod will be added to the test client. skipSensorReplicaSet bool + // if true then no sensor pod set will be added to the test client. + skipSensorPod bool } type certificateRequesterMock struct { diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index 319697508cfe7..a00414eaed6b3 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -164,8 +164,8 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager } if features.LocalImageScanning.Enabled() { - podOwnerName := os.Getenv("POD_OWNER_NAME") - components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podOwnerName)) + podName := os.Getenv("POD_NAME") + components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podName)) } s := sensor.NewSensor( From f5ecb77eb585c6c2d18b3131abb5a06ff546fc04 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 10:01:00 +0100 Subject: [PATCH 23/46] fix function header indentation --- sensor/kubernetes/localscanner/tls_issuer.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index e2cfc8cc94647..54411409348ea 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -43,8 +43,7 @@ var ( // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates // up to date, using the specified retry parameters. -func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string, - sensorPodName string) common.SensorComponent { +func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string, sensorPodName string) common.SensorComponent { msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ @@ -166,9 +165,7 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err } } -func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context, - backoff wait.Backoff) (*metav1.OwnerReference, error) { - +func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Context, backoff wait.Backoff) (*metav1.OwnerReference, error) { if i.sensorPodName == "" { return nil, errors.New("fetching sensor deployment: empty pod name") } @@ -223,9 +220,11 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co }, nil } -func (i *localScannerTLSIssuerImpl) getObjectMetaWithRetries(ctx context.Context, backoff wait.Backoff, - getObject func(context.Context) (metav1.Object, error)) (metav1.Object, error) { - +func (i *localScannerTLSIssuerImpl) getObjectMetaWithRetries( + ctx context.Context, + backoff wait.Backoff, + getObject func(context.Context) (metav1.Object, error), +) (metav1.Object, error) { var object metav1.Object getErr := retry.OnError(backoff, func(err error) bool { return !k8sErrors.IsNotFound(err) From 267875bc76eb3b3047b039fb33d1d7b8a2570dde Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 10:13:44 +0100 Subject: [PATCH 24/46] adjust logging levels --- sensor/kubernetes/localscanner/tls_issuer.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 54411409348ea..eadd3b04f61d3 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -83,11 +83,12 @@ type certificateRefresherSupplier func(requestCertificates requestCertificatesFu type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, namespace string, secretsClient corev1.SecretInterface) serviceCertificatesRepo -// Start launches a certificate refreshes that immediately checks the certificates, and that keeps them updated. +// Start starts the sensor component and launches a certificate refreshes that immediately checks the certificates, and +// that keeps them updated. // In case a secret doesn't have the expected owner, this logs a warning and returns nil. // In case this component was already started it fails immediately. func (i *localScannerTLSIssuerImpl) Start() error { - log.Info("starting local scanner TLS issuer.") + log.Debug("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) defer cancel() @@ -110,7 +111,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate refresher")) } - log.Info("local scanner TLS issuer started.") + log.Debug("local scanner TLS issuer started.") return nil } @@ -127,7 +128,7 @@ func (i *localScannerTLSIssuerImpl) Stop(err error) { } i.requester.Stop() - log.Info("local scanner TLS issuer stopped.") + log.Debug("local scanner TLS issuer stopped.") } func (i *localScannerTLSIssuerImpl) Capabilities() []centralsensor.SensorCapability { From 93f1b0f68c9bb58c7d17de03c627ea5cfec6bc78 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:15:09 +0100 Subject: [PATCH 25/46] rename supplier to getter --- sensor/kubernetes/localscanner/tls_issuer.go | 42 +++++++++---------- .../localscanner/tls_issuer_test.go | 42 +++++++++---------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index eadd3b04f61d3..4c6eb781dd741 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -47,27 +47,27 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ - sensorNamespace: sensorNamespace, - sensorPodName: sensorPodName, - k8sClient: k8sClient, - msgToCentralC: msgToCentralC, - msgFromCentralC: msgFromCentralC, - certificateRefresherSupplier: newCertificatesRefresher, - serviceCertificatesRepoSupplier: newServiceCertificatesRepo, - requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), + sensorNamespace: sensorNamespace, + sensorPodName: sensorPodName, + k8sClient: k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + getCertificateRefresherFn: newCertificatesRefresher, + getServiceCertificatesRepoFn: newServiceCertificatesRepo, + requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } type localScannerTLSIssuerImpl struct { - sensorNamespace string - sensorPodName string - k8sClient kubernetes.Interface - msgToCentralC chan *central.MsgFromSensor - msgFromCentralC chan *central.IssueLocalScannerCertsResponse - certificateRefresherSupplier certificateRefresherSupplier - serviceCertificatesRepoSupplier serviceCertificatesRepoSupplier - requester CertificateRequester - refresher concurrency.RetryTicker + sensorNamespace string + sensorPodName string + k8sClient kubernetes.Interface + msgToCentralC chan *central.MsgFromSensor + msgFromCentralC chan *central.IssueLocalScannerCertsResponse + getCertificateRefresherFn certificateRefresherGetter + getServiceCertificatesRepoFn serviceCertificatesRepoGetter + requester CertificateRequester + refresher concurrency.RetryTicker } // CertificateRequester requests a new set of local scanner certificates from central. @@ -77,10 +77,10 @@ type CertificateRequester interface { RequestCertificates(ctx context.Context) (*central.IssueLocalScannerCertsResponse, error) } -type certificateRefresherSupplier func(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, +type certificateRefresherGetter func(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker -type serviceCertificatesRepoSupplier func(ownerReference metav1.OwnerReference, namespace string, +type serviceCertificatesRepoGetter func(ownerReference metav1.OwnerReference, namespace string, secretsClient corev1.SecretInterface) serviceCertificatesRepo // Start starts the sensor component and launches a certificate refreshes that immediately checks the certificates, and @@ -101,9 +101,9 @@ func (i *localScannerTLSIssuerImpl) Start() error { return i.abortStart(errors.Wrap(fetchSensorDeploymentErr, "fetching sensor deployment")) } - certsRepo := i.serviceCertificatesRepoSupplier(*sensorOwnerReference, i.sensorNamespace, + certsRepo := i.getServiceCertificatesRepoFn(*sensorOwnerReference, i.sensorNamespace, i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - i.refresher = i.certificateRefresherSupplier(i.requester.RequestCertificates, certsRepo, + i.refresher = i.getCertificateRefresherFn(i.requester.RequestCertificates, certsRepo, certRefreshTimeout, certRefreshBackoff) i.requester.Start() diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 1c9acc6389ad6..f1579b9f77ca9 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -28,12 +28,12 @@ var ( ) type localScannerTLSIssuerFixture struct { - k8sClient *fake.Clientset - requester *certificateRequesterMock - refresher *certificateRefresherMock - repo *certsRepoMock - supplier *suppliersMock - issuer *localScannerTLSIssuerImpl + k8sClient *fake.Clientset + requester *certificateRequesterMock + refresher *certificateRefresherMock + repo *certsRepoMock + componentGetter *componentGetterMock + issuer *localScannerTLSIssuerImpl } func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *localScannerTLSIssuerFixture { @@ -41,19 +41,19 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *local fixture.requester = &certificateRequesterMock{} fixture.refresher = &certificateRefresherMock{} fixture.repo = &certsRepoMock{} - fixture.supplier = &suppliersMock{} + fixture.componentGetter = &componentGetterMock{} fixture.k8sClient = testK8sClient(k8sClientConfig) msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) fixture.issuer = &localScannerTLSIssuerImpl{ - sensorNamespace: sensorNamespace, - sensorPodName: sensorPodName, - k8sClient: fixture.k8sClient, - msgToCentralC: msgToCentralC, - msgFromCentralC: msgFromCentralC, - certificateRefresherSupplier: fixture.supplier.supplyCertificateRefresher, - serviceCertificatesRepoSupplier: fixture.supplier.supplyServiceCertificatesRepoSupplier, - requester: fixture.requester, + sensorNamespace: sensorNamespace, + sensorPodName: sensorPodName, + k8sClient: fixture.k8sClient, + msgToCentralC: msgToCentralC, + msgFromCentralC: msgFromCentralC, + getCertificateRefresherFn: fixture.componentGetter.getCertificateRefresher, + getServiceCertificatesRepoFn: fixture.componentGetter.getServiceCertificatesRepo, + requester: fixture.requester, } return fixture @@ -62,7 +62,7 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *local func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { f.requester.AssertExpectations(t) f.requester.AssertExpectations(t) - f.supplier.AssertExpectations(t) + f.componentGetter.AssertExpectations(t) } // mockForStart setups the mocks for the happy path of Start @@ -71,9 +71,9 @@ func (f *localScannerTLSIssuerFixture) mockForStart(conf mockForStartConfig) { f.refresher.On("Start").Once().Return(conf.refresherStartErr) f.repo.On("getServiceCertificates", mock.Anything).Once(). Return((*storage.TypedServiceCertificateSet)(nil), conf.getCertsErr) - f.supplier.On("supplyServiceCertificatesRepoSupplier", mock.Anything, + f.componentGetter.On("getServiceCertificatesRepo", mock.Anything, mock.Anything, mock.Anything).Once().Return(f.repo, nil) - f.supplier.On("supplyCertificateRefresher", mock.Anything, f.repo, + f.componentGetter.On("getCertificateRefresher", mock.Anything, f.repo, certRefreshTimeout, certRefreshBackoff).Once().Return(f.refresher) } @@ -281,17 +281,17 @@ func (m *certificateRefresherMock) Stop() { m.Called() } -type suppliersMock struct { +type componentGetterMock struct { mock.Mock } -func (m *suppliersMock) supplyCertificateRefresher(requestCertificates requestCertificatesFunc, +func (m *componentGetterMock) getCertificateRefresher(requestCertificates requestCertificatesFunc, repository serviceCertificatesRepo, timeout time.Duration, backoff wait.Backoff) concurrency.RetryTicker { args := m.Called(requestCertificates, repository, timeout, backoff) return args.Get(0).(concurrency.RetryTicker) } -func (m *suppliersMock) supplyServiceCertificatesRepoSupplier(ownerReference metav1.OwnerReference, namespace string, +func (m *componentGetterMock) getServiceCertificatesRepo(ownerReference metav1.OwnerReference, namespace string, secretsClient corev1.SecretInterface) serviceCertificatesRepo { args := m.Called(ownerReference, namespace, secretsClient) return args.Get(0).(serviceCertificatesRepo) From 51d4d299b388e5ab6ec5cad83e4f77e609ba1fa8 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:17:58 +0100 Subject: [PATCH 26/46] improve comments --- sensor/kubernetes/localscanner/tls_issuer.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 4c6eb781dd741..85c3499bf401f 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -121,7 +121,7 @@ func (i *localScannerTLSIssuerImpl) abortStart(err error) error { return err } -func (i *localScannerTLSIssuerImpl) Stop(err error) { +func (i *localScannerTLSIssuerImpl) Stop(_ error) { if i.refresher != nil { i.refresher.Stop() i.refresher = nil @@ -141,7 +141,7 @@ func (i *localScannerTLSIssuerImpl) ResponsesC() <-chan *central.MsgFromSensor { return i.msgToCentralC } -// ProcessMessage is how the central receiver delivers messages from central to SensorComponents. +// ProcessMessage dispatches Central's messages to Sensor received via the central receiver. // This method must not block as it would prevent centralReceiverImpl from sending messages // to other SensorComponents. func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) error { @@ -160,8 +160,7 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err }() return nil default: - // silently ignore other messages broadcasted by centralReceiverImpl, as centralReceiverImpl logs - // all returned errors with error level. + // messages not supported by this component are ignored because unknown messages types are handled by the central receiver. return nil } } From fe0ae6aae036efa3f8d20545308aa1d439ad52d3 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:25:24 +0100 Subject: [PATCH 27/46] minor code formatting --- sensor/kubernetes/localscanner/tls_issuer.go | 8 +-- .../localscanner/tls_issuer_test.go | 57 ++++++++++--------- 2 files changed, 35 insertions(+), 30 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 85c3499bf401f..607d1874b9f00 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -88,7 +88,7 @@ type serviceCertificatesRepoGetter func(ownerReference metav1.OwnerReference, na // In case a secret doesn't have the expected owner, this logs a warning and returns nil. // In case this component was already started it fails immediately. func (i *localScannerTLSIssuerImpl) Start() error { - log.Debug("starting local scanner TLS issuer.") + log.Debug("starting local scanner TLS tlsIssuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) defer cancel() @@ -111,12 +111,12 @@ func (i *localScannerTLSIssuerImpl) Start() error { return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate refresher")) } - log.Debug("local scanner TLS issuer started.") + log.Debug("local scanner TLS tlsIssuer started.") return nil } func (i *localScannerTLSIssuerImpl) abortStart(err error) error { - log.Errorf("local scanner TLS issuer start aborted due to error: %s", err) + log.Errorf("local scanner TLS tlsIssuer start aborted due to error: %s", err) i.Stop(err) return err } @@ -128,7 +128,7 @@ func (i *localScannerTLSIssuerImpl) Stop(_ error) { } i.requester.Stop() - log.Debug("local scanner TLS issuer stopped.") + log.Debug("local scanner TLS tlsIssuer stopped.") } func (i *localScannerTLSIssuerImpl) Capabilities() []centralsensor.SensorCapability { diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index f1579b9f77ca9..3cb00918b2594 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -8,6 +8,7 @@ import ( "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" appsApiv1 "k8s.io/api/apps/v1" @@ -33,19 +34,20 @@ type localScannerTLSIssuerFixture struct { refresher *certificateRefresherMock repo *certsRepoMock componentGetter *componentGetterMock - issuer *localScannerTLSIssuerImpl + tlsIssuer *localScannerTLSIssuerImpl } func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *localScannerTLSIssuerFixture { - fixture := &localScannerTLSIssuerFixture{} - fixture.requester = &certificateRequesterMock{} - fixture.refresher = &certificateRefresherMock{} - fixture.repo = &certsRepoMock{} - fixture.componentGetter = &componentGetterMock{} - fixture.k8sClient = testK8sClient(k8sClientConfig) + fixture := &localScannerTLSIssuerFixture{ + requester: &certificateRequesterMock{}, + refresher: &certificateRefresherMock{}, + repo: &certsRepoMock{}, + componentGetter: &componentGetterMock{}, + k8sClient: testK8sClient(k8sClientConfig), + } msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) - fixture.issuer = &localScannerTLSIssuerImpl{ + fixture.tlsIssuer = &localScannerTLSIssuerImpl{ sensorNamespace: sensorNamespace, sensorPodName: sensorPodName, k8sClient: fixture.k8sClient, @@ -59,7 +61,7 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *local return fixture } -func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { +func (f *localScannerTLSIssuerFixture) assertMockExpectations(t *testing.T) { f.requester.AssertExpectations(t) f.requester.AssertExpectations(t) f.componentGetter.AssertExpectations(t) @@ -69,10 +71,13 @@ func (f *localScannerTLSIssuerFixture) assertExpectations(t *testing.T) { func (f *localScannerTLSIssuerFixture) mockForStart(conf mockForStartConfig) { f.requester.On("Start").Once() f.refresher.On("Start").Once().Return(conf.refresherStartErr) + f.repo.On("getServiceCertificates", mock.Anything).Once(). Return((*storage.TypedServiceCertificateSet)(nil), conf.getCertsErr) + f.componentGetter.On("getServiceCertificatesRepo", mock.Anything, mock.Anything, mock.Anything).Once().Return(f.repo, nil) + f.componentGetter.On("getCertificateRefresher", mock.Anything, f.repo, certRefreshTimeout, certRefreshBackoff).Once().Return(f.refresher) } @@ -89,7 +94,7 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { "no error": {getCertsErr: nil}, "missing secret data": {getCertsErr: ErrMissingSecretData}, "inconsistent CAs": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, - "missing secret": {getCertsErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "foo")}, + "missing secret": {getCertsErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "scanner-db-slim-tls")}, } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { @@ -98,12 +103,12 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() - startErr := fixture.issuer.Start() - fixture.issuer.Stop(nil) + startErr := fixture.tlsIssuer.Start() + fixture.tlsIssuer.Stop(nil) assert.NoError(t, startErr) - assert.Nil(t, fixture.issuer.refresher) - fixture.assertExpectations(t) + assert.Nil(t, fixture.tlsIssuer.refresher) + fixture.assertMockExpectations(t) }) } } @@ -114,10 +119,10 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() - startErr := fixture.issuer.Start() + startErr := fixture.tlsIssuer.Start() assert.Error(t, startErr) - fixture.assertExpectations(t) + fixture.assertMockExpectations(t) } func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { @@ -126,12 +131,12 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() - startErr := fixture.issuer.Start() - secondStartErr := fixture.issuer.Start() + startErr := fixture.tlsIssuer.Start() + secondStartErr := fixture.tlsIssuer.Start() assert.NoError(t, startErr) assert.Error(t, secondStartErr) - fixture.assertExpectations(t) + fixture.assertMockExpectations(t) } func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { @@ -147,10 +152,10 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() - startErr := fixture.issuer.Start() + startErr := fixture.tlsIssuer.Start() assert.Error(t, startErr) - fixture.assertExpectations(t) + fixture.assertMockExpectations(t) }) } } @@ -160,7 +165,7 @@ func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { defer cancel() fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) expectedResponse := ¢ral.IssueLocalScannerCertsResponse{ - RequestId: "requestID", + RequestId: uuid.NewDummy().String(), } msg := ¢ral.MsgToSensor{ Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ @@ -169,14 +174,14 @@ func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { } go func() { - processErr := fixture.issuer.ProcessMessage(msg) + processErr := fixture.tlsIssuer.ProcessMessage(msg) assert.NoError(t, processErr) }() select { case <-ctx.Done(): assert.Fail(t, ctx.Err().Error()) - case response := <-fixture.issuer.msgFromCentralC: + case response := <-fixture.tlsIssuer.msgFromCentralC: assert.Equal(t, expectedResponse, response) } } @@ -190,13 +195,13 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { } go func() { - processErr := fixture.issuer.ProcessMessage(msg) + processErr := fixture.tlsIssuer.ProcessMessage(msg) assert.NoError(t, processErr) }() select { case <-ctx.Done(): - case <-fixture.issuer.msgFromCentralC: + case <-fixture.tlsIssuer.msgFromCentralC: assert.Fail(t, "unknown message is not ignored") } } From 27ae53420e30720e70886e72e446c434d0ec0a66 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:37:21 +0100 Subject: [PATCH 28/46] wait for assertion on process message --- sensor/kubernetes/localscanner/tls_issuer_test.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 3cb00918b2594..78100ef66ed98 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -163,6 +163,7 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() + processMessageDoneSignal := concurrency.NewErrorSignal() fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) expectedResponse := ¢ral.IssueLocalScannerCertsResponse{ RequestId: uuid.NewDummy().String(), @@ -174,8 +175,8 @@ func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { } go func() { - processErr := fixture.tlsIssuer.ProcessMessage(msg) - assert.NoError(t, processErr) + assert.NoError(t, fixture.tlsIssuer.ProcessMessage(msg)) + processMessageDoneSignal.Signal() }() select { @@ -184,19 +185,23 @@ func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { case response := <-fixture.tlsIssuer.msgFromCentralC: assert.Equal(t, expectedResponse, response) } + + _, ok := processMessageDoneSignal.WaitWithTimeout(100 * time.Millisecond) + assert.True(t, ok) } func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() + processMessageDoneSignal := concurrency.NewErrorSignal() fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) msg := ¢ral.MsgToSensor{ Msg: ¢ral.MsgToSensor_ReprocessDeployments{}, } go func() { - processErr := fixture.tlsIssuer.ProcessMessage(msg) - assert.NoError(t, processErr) + assert.NoError(t, fixture.tlsIssuer.ProcessMessage(msg)) + processMessageDoneSignal.Signal() }() select { @@ -204,6 +209,8 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { case <-fixture.tlsIssuer.msgFromCentralC: assert.Fail(t, "unknown message is not ignored") } + _, ok := processMessageDoneSignal.WaitWithTimeout(100 * time.Millisecond) + assert.True(t, ok) } func testK8sClient(conf testK8sClientConfig) *fake.Clientset { From e3a738131c835edf330c9cd25cef47797652bdec Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:49:35 +0100 Subject: [PATCH 29/46] improve naming of test struct --- .../localscanner/tls_issuer_test.go | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 78100ef66ed98..94358a1ee4e38 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -37,13 +37,13 @@ type localScannerTLSIssuerFixture struct { tlsIssuer *localScannerTLSIssuerImpl } -func newLocalScannerTLSIssuerFixture(k8sClientConfig testK8sClientConfig) *localScannerTLSIssuerFixture { +func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *localScannerTLSIssuerFixture { fixture := &localScannerTLSIssuerFixture{ requester: &certificateRequesterMock{}, refresher: &certificateRefresherMock{}, repo: &certsRepoMock{}, componentGetter: &componentGetterMock{}, - k8sClient: testK8sClient(k8sClientConfig), + k8sClient: getFakeK8sClient(k8sClientConfig), } msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) @@ -98,7 +98,7 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{getCertsErr: tc.getCertsErr}) fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() @@ -114,7 +114,7 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { } func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{refresherStartErr: errForced}) fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() @@ -126,7 +126,7 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { } func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{}) fixture.refresher.On("Stop").Once() fixture.requester.On("Stop").Once() @@ -141,10 +141,10 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { testCases := map[string]struct { - testK8sClientConfig testK8sClientConfig + testK8sClientConfig fakeK8sClientConfig }{ - "sensor replica set missing": {testK8sClientConfig: testK8sClientConfig{skipSensorReplicaSet: true}}, - "sensor pod missing": {testK8sClientConfig: testK8sClientConfig{skipSensorPod: true}}, + "sensor replica set missing": {testK8sClientConfig: fakeK8sClientConfig{skipSensorReplicaSet: true}}, + "sensor pod missing": {testK8sClientConfig: fakeK8sClientConfig{skipSensorPod: true}}, } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { @@ -164,7 +164,7 @@ func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() processMessageDoneSignal := concurrency.NewErrorSignal() - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) expectedResponse := ¢ral.IssueLocalScannerCertsResponse{ RequestId: uuid.NewDummy().String(), } @@ -194,7 +194,7 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() processMessageDoneSignal := concurrency.NewErrorSignal() - fixture := newLocalScannerTLSIssuerFixture(testK8sClientConfig{}) + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) msg := ¢ral.MsgToSensor{ Msg: ¢ral.MsgToSensor_ReprocessDeployments{}, } @@ -213,7 +213,7 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { assert.True(t, ok) } -func testK8sClient(conf testK8sClientConfig) *fake.Clientset { +func getFakeK8sClient(conf fakeK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) if !conf.skipSensorReplicaSet { sensorDeploymentGVK := sensorDeployment.GroupVersionKind() @@ -258,7 +258,7 @@ func testK8sClient(conf testK8sClientConfig) *fake.Clientset { return k8sClient } -type testK8sClientConfig struct { +type fakeK8sClientConfig struct { // if true then no sensor replica set and no sensor pod will be added to the test client. skipSensorReplicaSet bool // if true then no sensor pod set will be added to the test client. From 07ee96ff4f7a3a5c432ea0b55468c8b6c88480aa Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 11:55:12 +0100 Subject: [PATCH 30/46] use longer variable names --- sensor/kubernetes/localscanner/tls_issuer.go | 26 ++++++------- .../localscanner/tls_issuer_test.go | 38 +++++++++---------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 607d1874b9f00..1b1e8ec4a0c8c 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -54,7 +54,7 @@ func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace st msgFromCentralC: msgFromCentralC, getCertificateRefresherFn: newCertificatesRefresher, getServiceCertificatesRepoFn: newServiceCertificatesRepo, - requester: NewCertificateRequester(msgToCentralC, msgFromCentralC), + certRequester: NewCertificateRequester(msgToCentralC, msgFromCentralC), } } @@ -66,8 +66,8 @@ type localScannerTLSIssuerImpl struct { msgFromCentralC chan *central.IssueLocalScannerCertsResponse getCertificateRefresherFn certificateRefresherGetter getServiceCertificatesRepoFn serviceCertificatesRepoGetter - requester CertificateRequester - refresher concurrency.RetryTicker + certRequester CertificateRequester + certRefresher concurrency.RetryTicker } // CertificateRequester requests a new set of local scanner certificates from central. @@ -92,7 +92,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { ctx, cancel := context.WithTimeout(context.Background(), startTimeout) defer cancel() - if i.refresher != nil { + if i.certRefresher != nil { return i.abortStart(errors.New("already started")) } @@ -103,12 +103,12 @@ func (i *localScannerTLSIssuerImpl) Start() error { certsRepo := i.getServiceCertificatesRepoFn(*sensorOwnerReference, i.sensorNamespace, i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) - i.refresher = i.getCertificateRefresherFn(i.requester.RequestCertificates, certsRepo, + i.certRefresher = i.getCertificateRefresherFn(i.certRequester.RequestCertificates, certsRepo, certRefreshTimeout, certRefreshBackoff) - i.requester.Start() - if refreshStartErr := i.refresher.Start(); refreshStartErr != nil { - return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate refresher")) + i.certRequester.Start() + if refreshStartErr := i.certRefresher.Start(); refreshStartErr != nil { + return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate certRefresher")) } log.Debug("local scanner TLS tlsIssuer started.") @@ -122,12 +122,12 @@ func (i *localScannerTLSIssuerImpl) abortStart(err error) error { } func (i *localScannerTLSIssuerImpl) Stop(_ error) { - if i.refresher != nil { - i.refresher.Stop() - i.refresher = nil + if i.certRefresher != nil { + i.certRefresher.Stop() + i.certRefresher = nil } - i.requester.Stop() + i.certRequester.Stop() log.Debug("local scanner TLS tlsIssuer stopped.") } @@ -153,7 +153,7 @@ func (i *localScannerTLSIssuerImpl) ProcessMessage(msg *central.MsgToSensor) err defer cancel() select { case <-ctx.Done(): - // refresher will retry. + // certRefresher will retry. log.Errorf("timeout forwarding response %s from central: %s", response, ctx.Err()) case i.msgFromCentralC <- response: } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 94358a1ee4e38..318e04d8e1355 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -30,8 +30,8 @@ var ( type localScannerTLSIssuerFixture struct { k8sClient *fake.Clientset - requester *certificateRequesterMock - refresher *certificateRefresherMock + certRequester *certificateRequesterMock + certRefresher *certificateRefresherMock repo *certsRepoMock componentGetter *componentGetterMock tlsIssuer *localScannerTLSIssuerImpl @@ -39,8 +39,8 @@ type localScannerTLSIssuerFixture struct { func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *localScannerTLSIssuerFixture { fixture := &localScannerTLSIssuerFixture{ - requester: &certificateRequesterMock{}, - refresher: &certificateRefresherMock{}, + certRequester: &certificateRequesterMock{}, + certRefresher: &certificateRefresherMock{}, repo: &certsRepoMock{}, componentGetter: &componentGetterMock{}, k8sClient: getFakeK8sClient(k8sClientConfig), @@ -55,22 +55,22 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *local msgFromCentralC: msgFromCentralC, getCertificateRefresherFn: fixture.componentGetter.getCertificateRefresher, getServiceCertificatesRepoFn: fixture.componentGetter.getServiceCertificatesRepo, - requester: fixture.requester, + certRequester: fixture.certRequester, } return fixture } func (f *localScannerTLSIssuerFixture) assertMockExpectations(t *testing.T) { - f.requester.AssertExpectations(t) - f.requester.AssertExpectations(t) + f.certRequester.AssertExpectations(t) + f.certRequester.AssertExpectations(t) f.componentGetter.AssertExpectations(t) } // mockForStart setups the mocks for the happy path of Start func (f *localScannerTLSIssuerFixture) mockForStart(conf mockForStartConfig) { - f.requester.On("Start").Once() - f.refresher.On("Start").Once().Return(conf.refresherStartErr) + f.certRequester.On("Start").Once() + f.certRefresher.On("Start").Once().Return(conf.refresherStartErr) f.repo.On("getServiceCertificates", mock.Anything).Once(). Return((*storage.TypedServiceCertificateSet)(nil), conf.getCertsErr) @@ -79,7 +79,7 @@ func (f *localScannerTLSIssuerFixture) mockForStart(conf mockForStartConfig) { mock.Anything, mock.Anything).Once().Return(f.repo, nil) f.componentGetter.On("getCertificateRefresher", mock.Anything, f.repo, - certRefreshTimeout, certRefreshBackoff).Once().Return(f.refresher) + certRefreshTimeout, certRefreshBackoff).Once().Return(f.certRefresher) } type mockForStartConfig struct { @@ -100,14 +100,14 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { t.Run(tcName, func(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{getCertsErr: tc.getCertsErr}) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() + fixture.certRefresher.On("Stop").Once() + fixture.certRequester.On("Stop").Once() startErr := fixture.tlsIssuer.Start() fixture.tlsIssuer.Stop(nil) assert.NoError(t, startErr) - assert.Nil(t, fixture.tlsIssuer.refresher) + assert.Nil(t, fixture.tlsIssuer.certRefresher) fixture.assertMockExpectations(t) }) } @@ -116,8 +116,8 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{refresherStartErr: errForced}) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() + fixture.certRefresher.On("Stop").Once() + fixture.certRequester.On("Stop").Once() startErr := fixture.tlsIssuer.Start() @@ -128,8 +128,8 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) fixture.mockForStart(mockForStartConfig{}) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() + fixture.certRefresher.On("Stop").Once() + fixture.certRequester.On("Stop").Once() startErr := fixture.tlsIssuer.Start() secondStartErr := fixture.tlsIssuer.Start() @@ -149,8 +149,8 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { fixture := newLocalScannerTLSIssuerFixture(tc.testK8sClientConfig) - fixture.refresher.On("Stop").Once() - fixture.requester.On("Stop").Once() + fixture.certRefresher.On("Stop").Once() + fixture.certRequester.On("Stop").Once() startErr := fixture.tlsIssuer.Start() From 8b3f596a001a0b24499898d54005232c86592404 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 12:03:00 +0100 Subject: [PATCH 31/46] improve logging --- sensor/kubernetes/localscanner/tls_issuer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 1b1e8ec4a0c8c..2dac883896430 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -179,7 +179,7 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co return pod.GetObjectMeta(), nil }) if getPodErr != nil { - return nil, errors.Wrap(getPodErr, "fetching sensor pod") + return nil, errors.Wrapf(getPodErr, "fetching sensor pod with name %q", i.sensorPodName) } podOwners := sensorPodMeta.GetOwnerReferences() if len(podOwners) != 1 { @@ -198,7 +198,7 @@ func (i *localScannerTLSIssuerImpl) fetchSensorDeploymentOwnerRef(ctx context.Co return replicaSet.GetObjectMeta(), nil }) if getReplicaSetErr != nil { - return nil, errors.Wrap(getReplicaSetErr, "fetching owner replica set") + return nil, errors.Wrapf(getReplicaSetErr, "fetching owner replica set with name %q", podOwnerName) } replicaSetOwners := ownerReplicaSetMeta.GetOwnerReferences() if len(replicaSetOwners) != 1 { From c47fe40b55d21f6226faf6523adcb60ea0834e26 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 12:30:47 +0100 Subject: [PATCH 32/46] do not stop Sensor start when this component fails to start --- sensor/kubernetes/localscanner/tls_issuer.go | 3 ++- sensor/kubernetes/localscanner/tls_issuer_test.go | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 2dac883896430..7934bc54be90f 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -118,7 +118,8 @@ func (i *localScannerTLSIssuerImpl) Start() error { func (i *localScannerTLSIssuerImpl) abortStart(err error) error { log.Errorf("local scanner TLS tlsIssuer start aborted due to error: %s", err) i.Stop(err) - return err + // This component should never stop Sensor. + return nil } func (i *localScannerTLSIssuerImpl) Stop(_ error) { diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 318e04d8e1355..83c66c295d3ae 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -121,7 +121,7 @@ func TestLocalScannerTLSIssuerRefresherFailureStartFailure(t *testing.T) { startErr := fixture.tlsIssuer.Start() - assert.Error(t, startErr) + assert.NoError(t, startErr) fixture.assertMockExpectations(t) } @@ -135,7 +135,7 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { secondStartErr := fixture.tlsIssuer.Start() assert.NoError(t, startErr) - assert.Error(t, secondStartErr) + assert.NoError(t, secondStartErr) fixture.assertMockExpectations(t) } @@ -154,7 +154,7 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * startErr := fixture.tlsIssuer.Start() - assert.Error(t, startErr) + assert.NoError(t, startErr) fixture.assertMockExpectations(t) }) } From 7cbf0356b5563e681afa1e8e3f6f3f621b6ce471 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Tue, 22 Feb 2022 16:01:56 +0100 Subject: [PATCH 33/46] abort start for bundle installations --- sensor/kubernetes/localscanner/tls_issuer.go | 26 ++++++++++++++----- .../localscanner/tls_issuer_test.go | 22 ++++++++++++++++ sensor/kubernetes/sensor/sensor.go | 3 ++- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 7934bc54be90f..fff3347fb1501 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" + "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" @@ -43,13 +44,19 @@ var ( // NewLocalScannerTLSIssuer creates a sensor component that will keep the local scanner certificates // up to date, using the specified retry parameters. -func NewLocalScannerTLSIssuer(k8sClient kubernetes.Interface, sensorNamespace string, sensorPodName string) common.SensorComponent { +func NewLocalScannerTLSIssuer( + k8sClient kubernetes.Interface, + sensorManagedBy storage.ManagerType, + sensorNamespace string, + sensorPodName string, +) common.SensorComponent { msgToCentralC := make(chan *central.MsgFromSensor) msgFromCentralC := make(chan *central.IssueLocalScannerCertsResponse) return &localScannerTLSIssuerImpl{ sensorNamespace: sensorNamespace, sensorPodName: sensorPodName, k8sClient: k8sClient, + sensorManagedBy: sensorManagedBy, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, getCertificateRefresherFn: newCertificatesRefresher, @@ -62,6 +69,7 @@ type localScannerTLSIssuerImpl struct { sensorNamespace string sensorPodName string k8sClient kubernetes.Interface + sensorManagedBy storage.ManagerType msgToCentralC chan *central.MsgFromSensor msgFromCentralC chan *central.IssueLocalScannerCertsResponse getCertificateRefresherFn certificateRefresherGetter @@ -88,12 +96,18 @@ type serviceCertificatesRepoGetter func(ownerReference metav1.OwnerReference, na // In case a secret doesn't have the expected owner, this logs a warning and returns nil. // In case this component was already started it fails immediately. func (i *localScannerTLSIssuerImpl) Start() error { - log.Debug("starting local scanner TLS tlsIssuer.") + log.Debug("starting local scanner TLS issuer.") ctx, cancel := context.WithTimeout(context.Background(), startTimeout) defer cancel() + if i.sensorManagedBy != storage.ManagerType_MANAGER_TYPE_HELM_CHART && + i.sensorManagedBy != storage.ManagerType_MANAGER_TYPE_KUBERNETES_OPERATOR { + log.Debugf("start aborted: local scanner scanner TLS issuer is disabled for manager type %q.", i.sensorManagedBy) + return nil + } + if i.certRefresher != nil { - return i.abortStart(errors.New("already started")) + return i.abortStart(errors.New("already started.")) } sensorOwnerReference, fetchSensorDeploymentErr := i.fetchSensorDeploymentOwnerRef(ctx, fetchSensorDeploymentOwnerRefBackoff) @@ -111,12 +125,12 @@ func (i *localScannerTLSIssuerImpl) Start() error { return i.abortStart(errors.Wrap(refreshStartErr, "starting certificate certRefresher")) } - log.Debug("local scanner TLS tlsIssuer started.") + log.Debug("local scanner TLS issuer started.") return nil } func (i *localScannerTLSIssuerImpl) abortStart(err error) error { - log.Errorf("local scanner TLS tlsIssuer start aborted due to error: %s", err) + log.Errorf("local scanner TLS issuer start aborted due to error: %s", err) i.Stop(err) // This component should never stop Sensor. return nil @@ -129,7 +143,7 @@ func (i *localScannerTLSIssuerImpl) Stop(_ error) { } i.certRequester.Stop() - log.Debug("local scanner TLS tlsIssuer stopped.") + log.Debug("local scanner TLS issuer stopped.") } func (i *localScannerTLSIssuerImpl) Capabilities() []centralsensor.SensorCapability { diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 83c66c295d3ae..78270a7b16114 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -51,6 +51,7 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *local sensorNamespace: sensorNamespace, sensorPodName: sensorPodName, k8sClient: fixture.k8sClient, + sensorManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, getCertificateRefresherFn: fixture.componentGetter.getCertificateRefresher, @@ -160,6 +161,27 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * } } +func TestLocalScannerTLSIssuerWrongManagerTypeStartNoop(t *testing.T) { + testCases := map[string]struct { + sensorManagedBy storage.ManagerType + }{ + "bundle installations": {sensorManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL}, + "unknown installation type": {sensorManagedBy: storage.ManagerType_MANAGER_TYPE_UNKNOWN}, + } + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) + fixture.tlsIssuer.sensorManagedBy = tc.sensorManagedBy + + startErr := fixture.tlsIssuer.Start() + + assert.NoError(t, startErr) + assert.Nil(t, fixture.tlsIssuer.certRefresher) + fixture.assertMockExpectations(t) + }) + } +} + func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index a00414eaed6b3..59bdf5d5da7d5 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -165,7 +165,8 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager if features.LocalImageScanning.Enabled() { podName := os.Getenv("POD_NAME") - components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podName)) + components = append(components, + localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), helmManagedConfig.GetManagedBy(), sensorNamespace, podName)) } s := sensor.NewSensor( From 9d2db924617d0925335eae97b289354df133f1dd Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 13:12:59 +0100 Subject: [PATCH 34/46] add integration test for happy path --- .../certificate_expiration_test.go | 12 +- .../localscanner/tls_issuer_test.go | 128 ++++++++++++++++++ 2 files changed, 138 insertions(+), 2 deletions(-) diff --git a/sensor/kubernetes/localscanner/certificate_expiration_test.go b/sensor/kubernetes/localscanner/certificate_expiration_test.go index 3d0a9365ca795..135e7555907b9 100644 --- a/sensor/kubernetes/localscanner/certificate_expiration_test.go +++ b/sensor/kubernetes/localscanner/certificate_expiration_test.go @@ -68,7 +68,7 @@ func (s *getSecretRenewalTimeSuite) TestGetSecretsCertRenewalTime() { s.LessOrEqual(certDuration, afterOffset/2) } -func issueCertificatePEM(issueOption mtls.IssueCertOption) ([]byte, error) { +func issueCertificate(issueOption mtls.IssueCertOption) (*mtls.IssuedCert, error) { ca, err := mtls.CAForSigning() if err != nil { return nil, err @@ -78,5 +78,13 @@ func issueCertificatePEM(issueOption mtls.IssueCertOption) ([]byte, error) { if err != nil { return nil, err } - return cert.CertPEM, err + return cert, err +} + +func issueCertificatePEM(issueOption mtls.IssueCertOption) ([]byte, error) { + cert, err := issueCertificate(issueOption) + if err != nil { + return nil, err + } + return cert.CertPEM, nil } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 78270a7b16114..eef603d00fa2d 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -8,9 +8,14 @@ import ( "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/mtls" + testutilsMTLS "github.com/stackrox/rox/pkg/mtls/testutils" + "github.com/stackrox/rox/pkg/testutils/envisolator" "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" appsApiv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" @@ -18,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) @@ -235,6 +241,116 @@ func TestLocalScannerTLSIssuerProcessMessageUnknownMessage(t *testing.T) { assert.True(t, ok) } +func TestLocalScannerTLSIssuerIntegrationTests(t *testing.T) { + suite.Run(t, new(localScannerTLSIssueIntegrationTests)) +} + +type localScannerTLSIssueIntegrationTests struct { + suite.Suite + envIsolator *envisolator.EnvIsolator +} + +func (s *localScannerTLSIssueIntegrationTests) SetupSuite() { + s.envIsolator = envisolator.NewEnvIsolator(s.T()) +} + +func (s *localScannerTLSIssueIntegrationTests) SetupTest() { + err := testutilsMTLS.LoadTestMTLSCerts(s.envIsolator) + s.Require().NoError(err) +} + +func (s *localScannerTLSIssueIntegrationTests) TearDownTest() { + s.envIsolator.RestoreAll() +} + +func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { + testTimeout := 100 * time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + client := getFakeK8sClient(fakeK8sClientConfig{}) + tlsIssuer := newLocalScannerTLSIssuer( + s.T(), + client, + storage.ManagerType_MANAGER_TYPE_HELM_CHART, + sensorNamespace, + sensorPodName, + ) + + err := tlsIssuer.Start() + defer tlsIssuer.Stop(nil) + s.Require().NoError(err) + s.Require().NotNil(tlsIssuer.certRefresher) + + var request *central.MsgFromSensor + select { + case request = <-tlsIssuer.ResponsesC(): + case <-ctx.Done(): + s.Require().Fail(ctx.Err().Error()) + } + + s.Require().NotNil(request.GetIssueLocalScannerCertsRequest()) + ca, err := mtls.CAForSigning() + s.Require().NoError(err) + // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged + scannerCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + s.Require().NoError(err) + scannerDBCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + s.Require().NoError(err) + response := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ + IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ + RequestId: request.GetIssueLocalScannerCertsRequest().GetRequestId(), + Response: ¢ral.IssueLocalScannerCertsResponse_Certificates{ + Certificates: &storage.TypedServiceCertificateSet{ + CaPem: ca.CertPEM(), + ServiceCerts: []*storage.TypedServiceCertificate{ + { + ServiceType: storage.ServiceType_SCANNER_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerCert.KeyPEM, + CertPem: scannerCert.CertPEM, + }, + }, + { + ServiceType: storage.ServiceType_SCANNER_DB_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerDBCert.KeyPEM, + CertPem: scannerDBCert.CertPEM, + }, + }, + }, + }, + }, + }, + }, + } + err = tlsIssuer.ProcessMessage(response) + s.Require().NoError(err) + + var secrets *v1.SecretList + ok := concurrency.PollWithTimeout(func() bool { + secrets, err = client.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) + s.Require().NoError(err) + return len(secrets.Items) == 2 + }, 10*time.Millisecond, testTimeout) + s.Require().True(ok) + for _, secret := range secrets.Items { + var expectedCert *mtls.IssuedCert + switch secretName := secret.GetName(); secretName { + case "scanner-slim-tls": + expectedCert = scannerCert + case "scanner-db-slim-tls": + expectedCert = scannerDBCert + default: + s.Require().Failf("expected secret name should be either %q or %q, found %q instead", + "scanner-slim-tls", "scanner-db-slim-tls", secretName) + } + s.Equal(ca.CertPEM(), secret.Data[mtls.CACertFileName]) + s.Equal(expectedCert.CertPEM, secret.Data[mtls.ServiceCertFileName]) + s.Equal(expectedCert.KeyPEM, secret.Data[mtls.ServiceKeyFileName]) + } +} + func getFakeK8sClient(conf fakeK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) if !conf.skipSensorReplicaSet { @@ -287,6 +403,18 @@ type fakeK8sClientConfig struct { skipSensorPod bool } +func newLocalScannerTLSIssuer( + t *testing.T, + k8sClient kubernetes.Interface, + sensorManagedBy storage.ManagerType, + sensorNamespace string, + sensorPodName string, +) *localScannerTLSIssuerImpl { + tlsIssuer := NewLocalScannerTLSIssuer(k8sClient, sensorManagedBy, sensorNamespace, sensorPodName) + require.IsType(t, &localScannerTLSIssuerImpl{}, tlsIssuer) + return tlsIssuer.(*localScannerTLSIssuerImpl) +} + type certificateRequesterMock struct { mock.Mock } From 41e94fc733e37d638490b8b7a8d417b745f40181 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 15:20:56 +0100 Subject: [PATCH 35/46] check wrapped errors not only outer error --- sensor/kubernetes/localscanner/cert_refresher.go | 2 +- sensor/kubernetes/localscanner/cert_refresher_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sensor/kubernetes/localscanner/cert_refresher.go b/sensor/kubernetes/localscanner/cert_refresher.go index 605aa9395ecae..4efc1d4fd9cfa 100644 --- a/sensor/kubernetes/localscanner/cert_refresher.go +++ b/sensor/kubernetes/localscanner/cert_refresher.go @@ -97,7 +97,7 @@ func getTimeToRefreshFromRepo(ctx context.Context, getCertsRenewalTime getCertsR repository serviceCertificatesRepo) (time.Duration, error) { certificates, getCertsErr := repository.getServiceCertificates(ctx) - if getCertsErr == ErrDifferentCAForDifferentServiceTypes || getCertsErr == ErrMissingSecretData { + if errors.Is(getCertsErr, ErrDifferentCAForDifferentServiceTypes) || errors.Is(getCertsErr, ErrMissingSecretData) { log.Errorf("local scanner certificates are in an inconsistent state, "+ "will refresh certificates immediately: %s", getCertsErr) return 0, nil diff --git a/sensor/kubernetes/localscanner/cert_refresher_test.go b/sensor/kubernetes/localscanner/cert_refresher_test.go index 28b77a576be33..3f935964eda37 100644 --- a/sensor/kubernetes/localscanner/cert_refresher_test.go +++ b/sensor/kubernetes/localscanner/cert_refresher_test.go @@ -110,8 +110,8 @@ func (s *certRefresherSuite) TestRefreshCertificatesGetCertsInconsistentImmediat testCases := map[string]struct { recoverableErr error }{ - "refresh immediately on ErrDifferentCAForDifferentServiceTypes": {recoverableErr: ErrDifferentCAForDifferentServiceTypes}, - "refresh immediately on ErrMissingSecretData": {recoverableErr: ErrMissingSecretData}, + "refresh immediately on ErrDifferentCAForDifferentServiceTypes": {recoverableErr: errors.Wrap(ErrDifferentCAForDifferentServiceTypes, "wrap error")}, + "refresh immediately on ErrMissingSecretData": {recoverableErr: errors.Wrap(ErrMissingSecretData, "wrap error")}, "refresh immediately on missing secrets": {recoverableErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "foo")}, } for tcName, tc := range testCases { From 0578fbdadffd7d364bebddd6acc16f080f422a24 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 15:21:26 +0100 Subject: [PATCH 36/46] extend happy path integ test to corrupted secrets --- .../localscanner/tls_issuer_test.go | 224 +++++++++++------- 1 file changed, 132 insertions(+), 92 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index eef603d00fa2d..544f98fd0555c 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -148,14 +148,14 @@ func TestLocalScannerTLSIssuerStartAlreadyStartedFailure(t *testing.T) { func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t *testing.T) { testCases := map[string]struct { - testK8sClientConfig fakeK8sClientConfig + k8sClientConfig fakeK8sClientConfig }{ - "sensor replica set missing": {testK8sClientConfig: fakeK8sClientConfig{skipSensorReplicaSet: true}}, - "sensor pod missing": {testK8sClientConfig: fakeK8sClientConfig{skipSensorPod: true}}, + "sensor replica set missing": {k8sClientConfig: fakeK8sClientConfig{skipSensorReplicaSet: true}}, + "sensor pod missing": {k8sClientConfig: fakeK8sClientConfig{skipSensorPod: true}}, } for tcName, tc := range testCases { t.Run(tcName, func(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(tc.testK8sClientConfig) + fixture := newLocalScannerTLSIssuerFixture(tc.k8sClientConfig) fixture.certRefresher.On("Stop").Once() fixture.certRequester.On("Stop").Once() @@ -264,90 +264,115 @@ func (s *localScannerTLSIssueIntegrationTests) TearDownTest() { } func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { - testTimeout := 100 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - defer cancel() - client := getFakeK8sClient(fakeK8sClientConfig{}) - tlsIssuer := newLocalScannerTLSIssuer( - s.T(), - client, - storage.ManagerType_MANAGER_TYPE_HELM_CHART, - sensorNamespace, - sensorPodName, - ) - - err := tlsIssuer.Start() - defer tlsIssuer.Stop(nil) - s.Require().NoError(err) - s.Require().NotNil(tlsIssuer.certRefresher) - - var request *central.MsgFromSensor - select { - case request = <-tlsIssuer.ResponsesC(): - case <-ctx.Done(): - s.Require().Fail(ctx.Err().Error()) + // var corruptedSecretData map[string][]byte + testCases := map[string]struct { + k8sClientConfig fakeK8sClientConfig + }{ + "no secrets": {k8sClientConfig: fakeK8sClientConfig{}}, + "corrupted data in scanner secret": { + k8sClientConfig: fakeK8sClientConfig{ + secretsData: map[string]map[string][]byte{"scanner-slim-tls": nil}, + }, + }, + "corrupted data in scanner DB secret": { + k8sClientConfig: fakeK8sClientConfig{ + secretsData: map[string]map[string][]byte{"scanner-db-slim-tls": nil}, + }, + }, + "corrupted data in all local scanner secrets": { + k8sClientConfig: fakeK8sClientConfig{ + secretsData: map[string]map[string][]byte{"scanner-slim-tls": nil, "scanner-db-slim-tls": nil}, + }, + }, } + for tcName, tc := range testCases { + s.Run(tcName, func() { + testTimeout := 100 * time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + client := getFakeK8sClient(tc.k8sClientConfig) + tlsIssuer := newLocalScannerTLSIssuer( + s.T(), + client, + storage.ManagerType_MANAGER_TYPE_HELM_CHART, + sensorNamespace, + sensorPodName, + ) + + err := tlsIssuer.Start() + defer tlsIssuer.Stop(nil) + s.Require().NoError(err) + s.Require().NotNil(tlsIssuer.certRefresher) + + var request *central.MsgFromSensor + select { + case request = <-tlsIssuer.ResponsesC(): + case <-ctx.Done(): + s.Require().Fail(ctx.Err().Error()) + } - s.Require().NotNil(request.GetIssueLocalScannerCertsRequest()) - ca, err := mtls.CAForSigning() - s.Require().NoError(err) - // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged - scannerCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) - s.Require().NoError(err) - scannerDBCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) - s.Require().NoError(err) - response := ¢ral.MsgToSensor{ - Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ - IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ - RequestId: request.GetIssueLocalScannerCertsRequest().GetRequestId(), - Response: ¢ral.IssueLocalScannerCertsResponse_Certificates{ - Certificates: &storage.TypedServiceCertificateSet{ - CaPem: ca.CertPEM(), - ServiceCerts: []*storage.TypedServiceCertificate{ - { - ServiceType: storage.ServiceType_SCANNER_SERVICE, - Cert: &storage.ServiceCertificate{ - KeyPem: scannerCert.KeyPEM, - CertPem: scannerCert.CertPEM, - }, - }, - { - ServiceType: storage.ServiceType_SCANNER_DB_SERVICE, - Cert: &storage.ServiceCertificate{ - KeyPem: scannerDBCert.KeyPEM, - CertPem: scannerDBCert.CertPEM, + s.Require().NotNil(request.GetIssueLocalScannerCertsRequest()) + ca, err := mtls.CAForSigning() + s.Require().NoError(err) + // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged + scannerCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + s.Require().NoError(err) + scannerDBCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + s.Require().NoError(err) + response := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ + IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ + RequestId: request.GetIssueLocalScannerCertsRequest().GetRequestId(), + Response: ¢ral.IssueLocalScannerCertsResponse_Certificates{ + Certificates: &storage.TypedServiceCertificateSet{ + CaPem: ca.CertPEM(), + ServiceCerts: []*storage.TypedServiceCertificate{ + { + ServiceType: storage.ServiceType_SCANNER_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerCert.KeyPEM, + CertPem: scannerCert.CertPEM, + }, + }, + { + ServiceType: storage.ServiceType_SCANNER_DB_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerDBCert.KeyPEM, + CertPem: scannerDBCert.CertPEM, + }, + }, }, }, }, }, }, - }, - }, - } - err = tlsIssuer.ProcessMessage(response) - s.Require().NoError(err) - - var secrets *v1.SecretList - ok := concurrency.PollWithTimeout(func() bool { - secrets, err = client.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) - s.Require().NoError(err) - return len(secrets.Items) == 2 - }, 10*time.Millisecond, testTimeout) - s.Require().True(ok) - for _, secret := range secrets.Items { - var expectedCert *mtls.IssuedCert - switch secretName := secret.GetName(); secretName { - case "scanner-slim-tls": - expectedCert = scannerCert - case "scanner-db-slim-tls": - expectedCert = scannerDBCert - default: - s.Require().Failf("expected secret name should be either %q or %q, found %q instead", - "scanner-slim-tls", "scanner-db-slim-tls", secretName) - } - s.Equal(ca.CertPEM(), secret.Data[mtls.CACertFileName]) - s.Equal(expectedCert.CertPEM, secret.Data[mtls.ServiceCertFileName]) - s.Equal(expectedCert.KeyPEM, secret.Data[mtls.ServiceKeyFileName]) + } + err = tlsIssuer.ProcessMessage(response) + s.Require().NoError(err) + + var secrets *v1.SecretList + ok := concurrency.PollWithTimeout(func() bool { + secrets, err = client.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) + s.Require().NoError(err) + return len(secrets.Items) == 2 + }, 10*time.Millisecond, testTimeout) + s.Require().True(ok) + for _, secret := range secrets.Items { + var expectedCert *mtls.IssuedCert + switch secretName := secret.GetName(); secretName { + case "scanner-slim-tls": + expectedCert = scannerCert + case "scanner-db-slim-tls": + expectedCert = scannerDBCert + default: + s.Require().Failf("expected secret name should be either %q or %q, found %q instead", + "scanner-slim-tls", "scanner-db-slim-tls", secretName) + } + s.Equal(ca.CertPEM(), secret.Data[mtls.CACertFileName]) + s.Equal(expectedCert.CertPEM, secret.Data[mtls.ServiceCertFileName]) + s.Equal(expectedCert.KeyPEM, secret.Data[mtls.ServiceKeyFileName]) + } + }) } } @@ -371,24 +396,36 @@ func getFakeK8sClient(conf fakeK8sClientConfig) *fake.Clientset { } objects = append(objects, sensorReplicaSet) + sensorReplicaSetGVK := sensorReplicaSet.GroupVersionKind() + sensorReplicaSetOwnerRef := metav1.OwnerReference{ + APIVersion: sensorReplicaSetGVK.GroupVersion().String(), + Kind: sensorReplicaSet.Kind, + Name: sensorReplicaSet.GetName(), + UID: sensorReplicaSet.GetUID(), + } + if !conf.skipSensorPod { - sensorReplicaSetGVK := sensorReplicaSet.GroupVersionKind() sensorPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: sensorPodName, - Namespace: sensorNamespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: sensorReplicaSetGVK.GroupVersion().String(), - Kind: sensorReplicaSet.Kind, - Name: sensorReplicaSet.GetName(), - UID: sensorReplicaSet.GetUID(), - }, - }, + Name: sensorPodName, + Namespace: sensorNamespace, + OwnerReferences: []metav1.OwnerReference{sensorReplicaSetOwnerRef}, }, } objects = append(objects, sensorPod) } + + for secretName, secretData := range conf.secretsData { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: sensorNamespace, + OwnerReferences: []metav1.OwnerReference{sensorReplicaSetOwnerRef}, + }, + Data: secretData, + } + objects = append(objects, secret) + } } k8sClient := fake.NewSimpleClientset(objects...) @@ -401,6 +438,9 @@ type fakeK8sClientConfig struct { skipSensorReplicaSet bool // if true then no sensor pod set will be added to the test client. skipSensorPod bool + // if skipSensorReplicaSet is false, then a secret will be added to the test client for + // each entry in this map, using the key as the secret name and the value as the secret data. + secretsData map[string]map[string][]byte } func newLocalScannerTLSIssuer( From 707bb642b04071da564302b0dc0c131cd1e61c36 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 16:33:55 +0100 Subject: [PATCH 37/46] test cert issue retries on integration test --- sensor/kubernetes/localscanner/tls_issuer.go | 4 +- .../localscanner/tls_issuer_test.go | 137 ++++++++++++------ 2 files changed, 92 insertions(+), 49 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index fff3347fb1501..48a9141e8fefc 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -59,6 +59,7 @@ func NewLocalScannerTLSIssuer( sensorManagedBy: sensorManagedBy, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, + certRefreshBackoff: certRefreshBackoff, getCertificateRefresherFn: newCertificatesRefresher, getServiceCertificatesRepoFn: newServiceCertificatesRepo, certRequester: NewCertificateRequester(msgToCentralC, msgFromCentralC), @@ -72,6 +73,7 @@ type localScannerTLSIssuerImpl struct { sensorManagedBy storage.ManagerType msgToCentralC chan *central.MsgFromSensor msgFromCentralC chan *central.IssueLocalScannerCertsResponse + certRefreshBackoff wait.Backoff getCertificateRefresherFn certificateRefresherGetter getServiceCertificatesRepoFn serviceCertificatesRepoGetter certRequester CertificateRequester @@ -118,7 +120,7 @@ func (i *localScannerTLSIssuerImpl) Start() error { certsRepo := i.getServiceCertificatesRepoFn(*sensorOwnerReference, i.sensorNamespace, i.k8sClient.CoreV1().Secrets(i.sensorNamespace)) i.certRefresher = i.getCertificateRefresherFn(i.certRequester.RequestCertificates, certsRepo, - certRefreshTimeout, certRefreshBackoff) + certRefreshTimeout, i.certRefreshBackoff) i.certRequester.Start() if refreshStartErr := i.certRefresher.Start(); refreshStartErr != nil { diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 544f98fd0555c..29c8e93b5afb9 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" @@ -12,6 +13,7 @@ import ( testutilsMTLS "github.com/stackrox/rox/pkg/mtls/testutils" "github.com/stackrox/rox/pkg/testutils/envisolator" "github.com/stackrox/rox/pkg/uuid" + "github.com/stackrox/rox/sensor/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -60,6 +62,7 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *local sensorManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, + certRefreshBackoff: certRefreshBackoff, getCertificateRefresherFn: fixture.componentGetter.getCertificateRefresher, getServiceCertificatesRepoFn: fixture.componentGetter.getServiceCertificatesRepo, certRequester: fixture.certRequester, @@ -99,8 +102,8 @@ func TestLocalScannerTLSIssuerStartStopSuccess(t *testing.T) { getCertsErr error }{ "no error": {getCertsErr: nil}, - "missing secret data": {getCertsErr: ErrMissingSecretData}, - "inconsistent CAs": {getCertsErr: ErrDifferentCAForDifferentServiceTypes}, + "missing secret data": {getCertsErr: errors.Wrap(ErrMissingSecretData, "wrap error")}, + "inconsistent CAs": {getCertsErr: errors.Wrap(ErrDifferentCAForDifferentServiceTypes, "wrap error")}, "missing secret": {getCertsErr: k8sErrors.NewNotFound(schema.GroupResource{Group: "Core", Resource: "Secret"}, "scanner-db-slim-tls")}, } for tcName, tc := range testCases { @@ -263,10 +266,10 @@ func (s *localScannerTLSIssueIntegrationTests) TearDownTest() { s.envIsolator.RestoreAll() } -func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { - // var corruptedSecretData map[string][]byte +func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { testCases := map[string]struct { - k8sClientConfig fakeK8sClientConfig + k8sClientConfig fakeK8sClientConfig + numFailedResponses int }{ "no secrets": {k8sClientConfig: fakeK8sClientConfig{}}, "corrupted data in scanner secret": { @@ -284,12 +287,17 @@ func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { secretsData: map[string]map[string][]byte{"scanner-slim-tls": nil, "scanner-db-slim-tls": nil}, }, }, + "refresh failure and retries": {k8sClientConfig: fakeK8sClientConfig{}, numFailedResponses: 2}, } for tcName, tc := range testCases { s.Run(tcName, func() { testTimeout := 100 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), testTimeout) defer cancel() + ca, err := mtls.CAForSigning() + s.Require().NoError(err) + scannerCert := s.getCertificate() + scannerDBCert := s.getCertificate() client := getFakeK8sClient(tc.k8sClientConfig) tlsIssuer := newLocalScannerTLSIssuer( s.T(), @@ -298,55 +306,23 @@ func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { sensorNamespace, sensorPodName, ) + tlsIssuer.certRefreshBackoff = wait.Backoff{ + Duration: time.Millisecond, + } - err := tlsIssuer.Start() + s.Require().NoError(tlsIssuer.Start()) defer tlsIssuer.Stop(nil) - s.Require().NoError(err) s.Require().NotNil(tlsIssuer.certRefresher) - var request *central.MsgFromSensor - select { - case request = <-tlsIssuer.ResponsesC(): - case <-ctx.Done(): - s.Require().Fail(ctx.Err().Error()) + for i := 0; i < tc.numFailedResponses; i++ { + request := s.waitForRequest(ctx, tlsIssuer) + response := getIssueCertsFailureResponse(request.GetRequestId()) + err = tlsIssuer.ProcessMessage(response) + s.Require().NoError(err) } - s.Require().NotNil(request.GetIssueLocalScannerCertsRequest()) - ca, err := mtls.CAForSigning() - s.Require().NoError(err) - // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged - scannerCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) - s.Require().NoError(err) - scannerDBCert, err := issueCertificate(mtls.WithValidityExpiringInHours()) - s.Require().NoError(err) - response := ¢ral.MsgToSensor{ - Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ - IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ - RequestId: request.GetIssueLocalScannerCertsRequest().GetRequestId(), - Response: ¢ral.IssueLocalScannerCertsResponse_Certificates{ - Certificates: &storage.TypedServiceCertificateSet{ - CaPem: ca.CertPEM(), - ServiceCerts: []*storage.TypedServiceCertificate{ - { - ServiceType: storage.ServiceType_SCANNER_SERVICE, - Cert: &storage.ServiceCertificate{ - KeyPem: scannerCert.KeyPEM, - CertPem: scannerCert.CertPEM, - }, - }, - { - ServiceType: storage.ServiceType_SCANNER_DB_SERVICE, - Cert: &storage.ServiceCertificate{ - KeyPem: scannerDBCert.KeyPEM, - CertPem: scannerDBCert.CertPEM, - }, - }, - }, - }, - }, - }, - }, - } + request := s.waitForRequest(ctx, tlsIssuer) + response := getIssueCertsSuccessResponse(request.GetRequestId(), ca.CertPEM(), scannerCert, scannerDBCert) err = tlsIssuer.ProcessMessage(response) s.Require().NoError(err) @@ -376,6 +352,71 @@ func (s *localScannerTLSIssueIntegrationTests) TestHappyPath() { } } +func (s *localScannerTLSIssueIntegrationTests) getCertificate() *mtls.IssuedCert { + // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged + cert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + s.Require().NoError(err) + return cert +} + +func (s *localScannerTLSIssueIntegrationTests) waitForRequest(ctx context.Context, tlsIssuer common.SensorComponent) *central.IssueLocalScannerCertsRequest { + var request *central.MsgFromSensor + select { + case request = <-tlsIssuer.ResponsesC(): + case <-ctx.Done(): + s.Require().Fail(ctx.Err().Error()) + } + s.Require().NotNil(request.GetIssueLocalScannerCertsRequest()) + + return request.GetIssueLocalScannerCertsRequest() +} + +func getIssueCertsSuccessResponse(requestID string, caPem []byte, scannerCert, scannerDBCert *mtls.IssuedCert) *central.MsgToSensor { + return ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ + IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ + RequestId: requestID, + Response: ¢ral.IssueLocalScannerCertsResponse_Certificates{ + Certificates: &storage.TypedServiceCertificateSet{ + CaPem: caPem, + ServiceCerts: []*storage.TypedServiceCertificate{ + { + ServiceType: storage.ServiceType_SCANNER_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerCert.KeyPEM, + CertPem: scannerCert.CertPEM, + }, + }, + { + ServiceType: storage.ServiceType_SCANNER_DB_SERVICE, + Cert: &storage.ServiceCertificate{ + KeyPem: scannerDBCert.KeyPEM, + CertPem: scannerDBCert.CertPEM, + }, + }, + }, + }, + }, + }, + }, + } +} + +func getIssueCertsFailureResponse(requestID string) *central.MsgToSensor { + return ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_IssueLocalScannerCertsResponse{ + IssueLocalScannerCertsResponse: ¢ral.IssueLocalScannerCertsResponse{ + RequestId: requestID, + Response: ¢ral.IssueLocalScannerCertsResponse_Error{ + Error: ¢ral.LocalScannerCertsIssueError{ + Message: "forced error", + }, + }, + }, + }, + } +} + func getFakeK8sClient(conf fakeK8sClientConfig) *fake.Clientset { objects := make([]runtime.Object, 0) if !conf.skipSensorReplicaSet { From 0565a669bdc8fa29b4f54ceb28bd27ecfbb4e1dc Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 17:42:01 +0100 Subject: [PATCH 38/46] add public method to check if ticker was stopped --- pkg/concurrency/retry_ticker.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/concurrency/retry_ticker.go b/pkg/concurrency/retry_ticker.go index 58e767a071b67..1505a28548909 100644 --- a/pkg/concurrency/retry_ticker.go +++ b/pkg/concurrency/retry_ticker.go @@ -25,6 +25,7 @@ var ( type RetryTicker interface { Start() error Stop() + Stopped() bool } type tickFunc func(ctx context.Context) (timeToNextTick time.Duration, err error) @@ -64,7 +65,7 @@ type retryTickerImpl struct { // - ErrStartedTimer is returned if the timer was already started. // - ErrStoppedTimer is returned if the timer was stopped. func (t *retryTickerImpl) Start() error { - if t.stopFlag.Get() { + if t.Stopped() { return ErrStoppedTimer } if t.getTickTimer() != nil { @@ -82,13 +83,18 @@ func (t *retryTickerImpl) Stop() { t.setTickTimer(nil) } +// Stopped returns true if this RetryTicker has been stopped, otherwise returns false. +func (t *retryTickerImpl) Stopped() bool { + return t.stopFlag.Get() +} + func (t *retryTickerImpl) scheduleTick(timeToTick time.Duration) { t.setTickTimer(t.scheduler(timeToTick, func() { ctx, cancel := context.WithTimeout(context.Background(), t.timeout) defer cancel() nextTimeToTick, tickErr := t.doFunc(ctx) - if t.stopFlag.Get() { + if t.Stopped() { // ticker was stopped while tick function was running. return } From fb4af925963c26062bd6c97672f494cbc1d2b83a Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 17:58:42 +0100 Subject: [PATCH 39/46] give priority to ErrUnexpectedSecretsOwner on get secrets call --- sensor/kubernetes/localscanner/cert_refresher.go | 5 ++++- sensor/kubernetes/localscanner/cert_refresher_test.go | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/sensor/kubernetes/localscanner/cert_refresher.go b/sensor/kubernetes/localscanner/cert_refresher.go index 4efc1d4fd9cfa..bf84bfe8866a9 100644 --- a/sensor/kubernetes/localscanner/cert_refresher.go +++ b/sensor/kubernetes/localscanner/cert_refresher.go @@ -47,7 +47,7 @@ func refreshCertificates(ctx context.Context, requestCertificates requestCertifi timeToNextRefresh, err = ensureCertificatesAreFresh(ctx, requestCertificates, getCertsRenewalTime, repository) if err != nil { if errors.Is(err, ErrUnexpectedSecretsOwner) { - log.Errorf("stopping automatic refresh of %s: %s", certsDescription, err) + log.Errorf("non-recoverable error refreshing %s, automatic refresh will be stopped: %s", certsDescription, err) return 0, concurrency.ErrNonRecoverable } @@ -97,6 +97,9 @@ func getTimeToRefreshFromRepo(ctx context.Context, getCertsRenewalTime getCertsR repository serviceCertificatesRepo) (time.Duration, error) { certificates, getCertsErr := repository.getServiceCertificates(ctx) + if errors.Is(getCertsErr, ErrUnexpectedSecretsOwner) { + return 0, getCertsErr + } if errors.Is(getCertsErr, ErrDifferentCAForDifferentServiceTypes) || errors.Is(getCertsErr, ErrMissingSecretData) { log.Errorf("local scanner certificates are in an inconsistent state, "+ "will refresh certificates immediately: %s", getCertsErr) diff --git a/sensor/kubernetes/localscanner/cert_refresher_test.go b/sensor/kubernetes/localscanner/cert_refresher_test.go index 3f935964eda37..d33e8dad707dc 100644 --- a/sensor/kubernetes/localscanner/cert_refresher_test.go +++ b/sensor/kubernetes/localscanner/cert_refresher_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" @@ -132,9 +133,10 @@ func (s *certRefresherSuite) TestRefreshCertificatesGetCertsInconsistentImmediat } } -func (s *certRefresherSuite) TestRefreshCertificatesGetCertsUnexpectedOwnerFailure() { +func (s *certRefresherSuite) TestRefreshCertificatesGetCertsUnexpectedOwnerHighestPriorityFailure() { + getErr := multierror.Append(nil, ErrUnexpectedSecretsOwner, ErrDifferentCAForDifferentServiceTypes, ErrMissingSecretData) s.dependenciesMock.On("getServiceCertificates", mock.Anything).Once().Return( - (*storage.TypedServiceCertificateSet)(nil), concurrency.ErrNonRecoverable) + (*storage.TypedServiceCertificateSet)(nil), getErr) _, err := s.refreshCertificates() From aef8dfae8c554d371f403f11be1f6916ca0524a5 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 18:00:22 +0100 Subject: [PATCH 40/46] add integ test for tls issuer stop on wrong secrets owner --- .../localscanner/tls_issuer_test.go | 67 +++++++++++++++++-- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 29c8e93b5afb9..52755e0d159e3 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" @@ -266,7 +267,7 @@ func (s *localScannerTLSIssueIntegrationTests) TearDownTest() { s.envIsolator.RestoreAll() } -func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { +func (s *localScannerTLSIssueIntegrationTests) TestSuccessfulRefresh() { testCases := map[string]struct { k8sClientConfig fakeK8sClientConfig numFailedResponses int @@ -298,10 +299,10 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { s.Require().NoError(err) scannerCert := s.getCertificate() scannerDBCert := s.getCertificate() - client := getFakeK8sClient(tc.k8sClientConfig) + k8sClient := getFakeK8sClient(tc.k8sClientConfig) tlsIssuer := newLocalScannerTLSIssuer( s.T(), - client, + k8sClient, storage.ManagerType_MANAGER_TYPE_HELM_CHART, sensorNamespace, sensorPodName, @@ -313,6 +314,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { s.Require().NoError(tlsIssuer.Start()) defer tlsIssuer.Stop(nil) s.Require().NotNil(tlsIssuer.certRefresher) + s.Require().False(tlsIssuer.certRefresher.Stopped()) for i := 0; i < tc.numFailedResponses; i++ { request := s.waitForRequest(ctx, tlsIssuer) @@ -328,7 +330,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { var secrets *v1.SecretList ok := concurrency.PollWithTimeout(func() bool { - secrets, err = client.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) + secrets, err = k8sClient.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) s.Require().NoError(err) return len(secrets.Items) == 2 }, 10*time.Millisecond, testTimeout) @@ -352,6 +354,48 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfullRefresh() { } } +func (s *localScannerTLSIssueIntegrationTests) TestUnexpectedOwnerStop() { + testCases := map[string]struct { + secretNames []string + }{ + "wrong owner for scanner secret": {secretNames: []string{"scanner-slim-tls"}}, + "wrong owner for scanner db secret": {secretNames: []string{"scanner-db-slim-tls"}}, + "wrong owner for scanner and scanner db secrets": {secretNames: []string{"scanner-slim-tls", "scanner-db-slim-tls"}}, + } + for tcName, tc := range testCases { + s.Run(tcName, func() { + secretsData := make(map[string]map[string][]byte, len(tc.secretNames)) + for _, secretName := range tc.secretNames { + secretsData[secretName] = nil + } + k8sClient := getFakeK8sClient(fakeK8sClientConfig{ + secretsData: secretsData, + secretsOwner: &metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "another-deployment", + UID: types.UID(uuid.NewDummy().String()), + }, + }) + tlsIssuer := newLocalScannerTLSIssuer( + s.T(), + k8sClient, + storage.ManagerType_MANAGER_TYPE_HELM_CHART, + sensorNamespace, + sensorPodName, + ) + + s.Require().NoError(tlsIssuer.Start()) + defer tlsIssuer.Stop(nil) + + ok := concurrency.PollWithTimeout(func() bool { + return tlsIssuer.certRefresher != nil && tlsIssuer.certRefresher.Stopped() + }, 10*time.Millisecond, 100*time.Millisecond) + s.True(ok) + }) + } +} + func (s *localScannerTLSIssueIntegrationTests) getCertificate() *mtls.IssuedCert { // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged cert, err := issueCertificate(mtls.WithValidityExpiringInHours()) @@ -456,12 +500,16 @@ func getFakeK8sClient(conf fakeK8sClientConfig) *fake.Clientset { objects = append(objects, sensorPod) } + secretsOwnerRef := sensorReplicaSetOwnerRef + if conf.secretsOwner != nil { + secretsOwnerRef = *conf.secretsOwner + } for secretName, secretData := range conf.secretsData { secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: sensorNamespace, - OwnerReferences: []metav1.OwnerReference{sensorReplicaSetOwnerRef}, + OwnerReferences: []metav1.OwnerReference{secretsOwnerRef}, }, Data: secretData, } @@ -482,6 +530,9 @@ type fakeK8sClientConfig struct { // if skipSensorReplicaSet is false, then a secret will be added to the test client for // each entry in this map, using the key as the secret name and the value as the secret data. secretsData map[string]map[string][]byte + // owner reference to used for the secrets specified in `secretsData`. If `nil` then the sensor + // replica set is used as owner + secretsOwner *metav1.OwnerReference } func newLocalScannerTLSIssuer( @@ -513,6 +564,7 @@ func (m *certificateRequesterMock) RequestCertificates(ctx context.Context) (*ce type certificateRefresherMock struct { mock.Mock + stopped bool } func (m *certificateRefresherMock) Start() error { @@ -522,6 +574,11 @@ func (m *certificateRefresherMock) Start() error { func (m *certificateRefresherMock) Stop() { m.Called() + m.stopped = true +} + +func (m *certificateRefresherMock) Stopped() bool { + return m.stopped } type componentGetterMock struct { From 832fa82008a4448135f07cb257de88621ee472b6 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Wed, 23 Feb 2022 18:26:25 +0100 Subject: [PATCH 41/46] account for eventual consistency of mock k8s client to make test more stable --- sensor/kubernetes/localscanner/tls_issuer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 52755e0d159e3..1bfc00477c12c 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -332,7 +332,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfulRefresh() { ok := concurrency.PollWithTimeout(func() bool { secrets, err = k8sClient.CoreV1().Secrets(sensorNamespace).List(context.Background(), metav1.ListOptions{}) s.Require().NoError(err) - return len(secrets.Items) == 2 + return len(secrets.Items) == 2 && len(secrets.Items[0].Data) > 0 && len(secrets.Items[1].Data) > 0 }, 10*time.Millisecond, testTimeout) s.Require().True(ok) for _, secret := range secrets.Items { From 8a3173c137cbe207a4edc2d60456e9e94f6f3209 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 24 Feb 2022 12:18:48 +0100 Subject: [PATCH 42/46] add message to assertion --- sensor/kubernetes/localscanner/tls_issuer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 1bfc00477c12c..b0652b1fe0a2e 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -391,7 +391,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestUnexpectedOwnerStop() { ok := concurrency.PollWithTimeout(func() bool { return tlsIssuer.certRefresher != nil && tlsIssuer.certRefresher.Stopped() }, 10*time.Millisecond, 100*time.Millisecond) - s.True(ok) + s.True(ok, "cert refresher should be stopped") }) } } From 41218cf2d0696fcd6e59f0bc10cfd0fa8b0f10b0 Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 24 Feb 2022 12:25:50 +0100 Subject: [PATCH 43/46] decide component launch based on managedBy on CreateSensor --- sensor/kubernetes/localscanner/tls_issuer.go | 10 ----- .../localscanner/tls_issuer_test.go | 41 ++----------------- sensor/kubernetes/sensor/sensor.go | 5 ++- 3 files changed, 6 insertions(+), 50 deletions(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer.go b/sensor/kubernetes/localscanner/tls_issuer.go index 48a9141e8fefc..295bfc06bf501 100644 --- a/sensor/kubernetes/localscanner/tls_issuer.go +++ b/sensor/kubernetes/localscanner/tls_issuer.go @@ -6,7 +6,6 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/internalapi/central" - "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" @@ -46,7 +45,6 @@ var ( // up to date, using the specified retry parameters. func NewLocalScannerTLSIssuer( k8sClient kubernetes.Interface, - sensorManagedBy storage.ManagerType, sensorNamespace string, sensorPodName string, ) common.SensorComponent { @@ -56,7 +54,6 @@ func NewLocalScannerTLSIssuer( sensorNamespace: sensorNamespace, sensorPodName: sensorPodName, k8sClient: k8sClient, - sensorManagedBy: sensorManagedBy, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, certRefreshBackoff: certRefreshBackoff, @@ -70,7 +67,6 @@ type localScannerTLSIssuerImpl struct { sensorNamespace string sensorPodName string k8sClient kubernetes.Interface - sensorManagedBy storage.ManagerType msgToCentralC chan *central.MsgFromSensor msgFromCentralC chan *central.IssueLocalScannerCertsResponse certRefreshBackoff wait.Backoff @@ -102,12 +98,6 @@ func (i *localScannerTLSIssuerImpl) Start() error { ctx, cancel := context.WithTimeout(context.Background(), startTimeout) defer cancel() - if i.sensorManagedBy != storage.ManagerType_MANAGER_TYPE_HELM_CHART && - i.sensorManagedBy != storage.ManagerType_MANAGER_TYPE_KUBERNETES_OPERATOR { - log.Debugf("start aborted: local scanner scanner TLS issuer is disabled for manager type %q.", i.sensorManagedBy) - return nil - } - if i.certRefresher != nil { return i.abortStart(errors.New("already started.")) } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index b0652b1fe0a2e..4fe2e248815a8 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -60,7 +60,6 @@ func newLocalScannerTLSIssuerFixture(k8sClientConfig fakeK8sClientConfig) *local sensorNamespace: sensorNamespace, sensorPodName: sensorPodName, k8sClient: fixture.k8sClient, - sensorManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, msgToCentralC: msgToCentralC, msgFromCentralC: msgFromCentralC, certRefreshBackoff: certRefreshBackoff, @@ -171,27 +170,6 @@ func TestLocalScannerTLSIssuerFetchSensorDeploymentOwnerRefErrorStartFailure(t * } } -func TestLocalScannerTLSIssuerWrongManagerTypeStartNoop(t *testing.T) { - testCases := map[string]struct { - sensorManagedBy storage.ManagerType - }{ - "bundle installations": {sensorManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL}, - "unknown installation type": {sensorManagedBy: storage.ManagerType_MANAGER_TYPE_UNKNOWN}, - } - for tcName, tc := range testCases { - t.Run(tcName, func(t *testing.T) { - fixture := newLocalScannerTLSIssuerFixture(fakeK8sClientConfig{}) - fixture.tlsIssuer.sensorManagedBy = tc.sensorManagedBy - - startErr := fixture.tlsIssuer.Start() - - assert.NoError(t, startErr) - assert.Nil(t, fixture.tlsIssuer.certRefresher) - fixture.assertMockExpectations(t) - }) - } -} - func TestLocalScannerTLSIssuerProcessMessageKnownMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -300,13 +278,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfulRefresh() { scannerCert := s.getCertificate() scannerDBCert := s.getCertificate() k8sClient := getFakeK8sClient(tc.k8sClientConfig) - tlsIssuer := newLocalScannerTLSIssuer( - s.T(), - k8sClient, - storage.ManagerType_MANAGER_TYPE_HELM_CHART, - sensorNamespace, - sensorPodName, - ) + tlsIssuer := newLocalScannerTLSIssuer(s.T(), k8sClient, sensorNamespace, sensorPodName) tlsIssuer.certRefreshBackoff = wait.Backoff{ Duration: time.Millisecond, } @@ -377,13 +349,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestUnexpectedOwnerStop() { UID: types.UID(uuid.NewDummy().String()), }, }) - tlsIssuer := newLocalScannerTLSIssuer( - s.T(), - k8sClient, - storage.ManagerType_MANAGER_TYPE_HELM_CHART, - sensorNamespace, - sensorPodName, - ) + tlsIssuer := newLocalScannerTLSIssuer(s.T(), k8sClient, sensorNamespace, sensorPodName) s.Require().NoError(tlsIssuer.Start()) defer tlsIssuer.Stop(nil) @@ -538,11 +504,10 @@ type fakeK8sClientConfig struct { func newLocalScannerTLSIssuer( t *testing.T, k8sClient kubernetes.Interface, - sensorManagedBy storage.ManagerType, sensorNamespace string, sensorPodName string, ) *localScannerTLSIssuerImpl { - tlsIssuer := NewLocalScannerTLSIssuer(k8sClient, sensorManagedBy, sensorNamespace, sensorPodName) + tlsIssuer := NewLocalScannerTLSIssuer(k8sClient, sensorNamespace, sensorPodName) require.IsType(t, &localScannerTLSIssuerImpl{}, tlsIssuer) return tlsIssuer.(*localScannerTLSIssuerImpl) } diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index 59bdf5d5da7d5..55fbe0c64b539 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -163,10 +163,11 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager return nil, errors.Wrap(err, "creating central client") } - if features.LocalImageScanning.Enabled() { + if features.LocalImageScanning.Enabled() && (helmManagedConfig.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_HELM_CHART || + helmManagedConfig.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_KUBERNETES_OPERATOR) { podName := os.Getenv("POD_NAME") components = append(components, - localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), helmManagedConfig.GetManagedBy(), sensorNamespace, podName)) + localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podName)) } s := sensor.NewSensor( From bb322d7bd0c3502ef14e12dc90289529eb4a530f Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 24 Feb 2022 15:04:54 +0100 Subject: [PATCH 44/46] use deny list instead of allow list for tls issuer component enablement --- sensor/kubernetes/sensor/sensor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index 55fbe0c64b539..d15b7ca88b6f3 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -163,8 +163,8 @@ func CreateSensor(client client.Interface, workloadHandler *fake.WorkloadManager return nil, errors.Wrap(err, "creating central client") } - if features.LocalImageScanning.Enabled() && (helmManagedConfig.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_HELM_CHART || - helmManagedConfig.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_KUBERNETES_OPERATOR) { + if features.LocalImageScanning.Enabled() && (helmManagedConfig.GetManagedBy() != storage.ManagerType_MANAGER_TYPE_UNKNOWN && + helmManagedConfig.GetManagedBy() != storage.ManagerType_MANAGER_TYPE_MANUAL) { podName := os.Getenv("POD_NAME") components = append(components, localscanner.NewLocalScannerTLSIssuer(client.Kubernetes(), sensorNamespace, podName)) From c4c2b071fea71f31c3487512dc2eba74fcafedbb Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 24 Feb 2022 15:09:38 +0100 Subject: [PATCH 45/46] use certificates for both scanner and scanner db in integ test --- .../localscanner/certificate_expiration_test.go | 6 +++--- sensor/kubernetes/localscanner/tls_issuer_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sensor/kubernetes/localscanner/certificate_expiration_test.go b/sensor/kubernetes/localscanner/certificate_expiration_test.go index 135e7555907b9..5e42021302c60 100644 --- a/sensor/kubernetes/localscanner/certificate_expiration_test.go +++ b/sensor/kubernetes/localscanner/certificate_expiration_test.go @@ -68,12 +68,12 @@ func (s *getSecretRenewalTimeSuite) TestGetSecretsCertRenewalTime() { s.LessOrEqual(certDuration, afterOffset/2) } -func issueCertificate(issueOption mtls.IssueCertOption) (*mtls.IssuedCert, error) { +func issueCertificate(serviceType storage.ServiceType, issueOption mtls.IssueCertOption) (*mtls.IssuedCert, error) { ca, err := mtls.CAForSigning() if err != nil { return nil, err } - subject := mtls.NewSubject("clusterId", storage.ServiceType_SCANNER_SERVICE) + subject := mtls.NewSubject("clusterId", serviceType) cert, err := ca.IssueCertForSubject(subject, issueOption) if err != nil { return nil, err @@ -82,7 +82,7 @@ func issueCertificate(issueOption mtls.IssueCertOption) (*mtls.IssuedCert, error } func issueCertificatePEM(issueOption mtls.IssueCertOption) ([]byte, error) { - cert, err := issueCertificate(issueOption) + cert, err := issueCertificate(storage.ServiceType_SCANNER_SERVICE, issueOption) if err != nil { return nil, err } diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 4fe2e248815a8..9f0c6e6892590 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -275,8 +275,8 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfulRefresh() { defer cancel() ca, err := mtls.CAForSigning() s.Require().NoError(err) - scannerCert := s.getCertificate() - scannerDBCert := s.getCertificate() + scannerCert := s.getCertificate(storage.ServiceType_SCANNER_SERVICE) + scannerDBCert := s.getCertificate(storage.ServiceType_SCANNER_DB_SERVICE) k8sClient := getFakeK8sClient(tc.k8sClientConfig) tlsIssuer := newLocalScannerTLSIssuer(s.T(), k8sClient, sensorNamespace, sensorPodName) tlsIssuer.certRefreshBackoff = wait.Backoff{ @@ -362,9 +362,9 @@ func (s *localScannerTLSIssueIntegrationTests) TestUnexpectedOwnerStop() { } } -func (s *localScannerTLSIssueIntegrationTests) getCertificate() *mtls.IssuedCert { +func (s *localScannerTLSIssueIntegrationTests) getCertificate(serviceType storage.ServiceType) *mtls.IssuedCert { // TODO(ROX-9463): use short expiration for testing renewal when ROX-9010 implementing `WithCustomCertLifetime` is merged - cert, err := issueCertificate(mtls.WithValidityExpiringInHours()) + cert, err := issueCertificate(serviceType, mtls.WithValidityExpiringInHours()) s.Require().NoError(err) return cert } From 7de21b009e1533655019a30e915c41ac49bcb1ee Mon Sep 17 00:00:00 2001 From: Juan Rodriguez Hortala Date: Thu, 24 Feb 2022 15:19:27 +0100 Subject: [PATCH 46/46] add message to assertion derived from a poll, ot make it more clear --- sensor/kubernetes/localscanner/tls_issuer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor/kubernetes/localscanner/tls_issuer_test.go b/sensor/kubernetes/localscanner/tls_issuer_test.go index 9f0c6e6892590..7dd018af8f236 100644 --- a/sensor/kubernetes/localscanner/tls_issuer_test.go +++ b/sensor/kubernetes/localscanner/tls_issuer_test.go @@ -306,7 +306,7 @@ func (s *localScannerTLSIssueIntegrationTests) TestSuccessfulRefresh() { s.Require().NoError(err) return len(secrets.Items) == 2 && len(secrets.Items[0].Data) > 0 && len(secrets.Items[1].Data) > 0 }, 10*time.Millisecond, testTimeout) - s.Require().True(ok) + s.Require().True(ok, "expected exactly 2 secrets with non-empty data available in the k8s API") for _, secret := range secrets.Items { var expectedCert *mtls.IssuedCert switch secretName := secret.GetName(); secretName {