From 84b000aef377ee313d4bb409e0ff18a08413bfcd Mon Sep 17 00:00:00 2001 From: Guzman Date: Tue, 24 Mar 2026 23:37:23 +0100 Subject: [PATCH 01/24] Add CO->ACS scheduled scan importer tool Standalone Go tool (scripts/compliance-operator-importer) that reads Compliance Operator ScanSettingBinding/ScanSetting resources from a Kubernetes cluster and creates equivalent ACS v2 compliance scan configurations. Phase 1: create-only with idempotency (skip existing), dry-run mode, exponential backoff retry, JSON report output, and structured exit codes (0/1/2). Infrastructure changes to support the standalone sub-module: - go.work workspace file so go vet/staticcheck/roxvet can typecheck the importer module alongside the main rox module - .golangci.yml: exclude importer path from linter path patterns - tools/roxvet/validateimports: skip packages outside the rox module prefix instead of reporting them as errors (these appear via go.work when analyzing workspace packages from other modules) Prompt: Build a standalone CO->ACS scheduled scan importer as a complete Go sub-module under scripts/compliance-operator-importer/. Co-Authored-By: Claude Sonnet 4.6 --- .golangci.yml | 2 + go.work | 6 + go.work.sum | 570 ++++++++++++++++++ .../compliance-operator-importer/.gitignore | 1 + .../compliance-operator-importer/DECISIONS.md | 56 ++ .../compliance-operator-importer/README.md | 26 + .../cmd/importer/main.go | 58 ++ scripts/compliance-operator-importer/go.mod | 35 ++ scripts/compliance-operator-importer/go.sum | 94 +++ .../internal/acs/client.go | 234 +++++++ .../internal/acs/client_test.go | 217 +++++++ .../internal/cofetch/client.go | 181 ++++++ .../internal/cofetch/types.go | 54 ++ .../internal/config/config.go | 176 ++++++ .../internal/config/config_test.go | 304 ++++++++++ .../internal/mapping/mapping.go | 114 ++++ .../internal/mapping/mapping_test.go | 307 ++++++++++ .../internal/mapping/schedule.go | 126 ++++ .../internal/mapping/schedule_test.go | 225 +++++++ .../internal/models/models.go | 166 +++++ .../internal/preflight/preflight.go | 158 +++++ .../internal/preflight/preflight_test.go | 210 +++++++ .../internal/problems/problems.go | 44 ++ .../internal/problems/problems_test.go | 136 +++++ .../internal/reconcile/create_only.go | 198 ++++++ .../internal/reconcile/create_only_test.go | 324 ++++++++++ .../internal/report/report.go | 97 +++ .../internal/report/report_test.go | 217 +++++++ .../internal/run/run.go | 213 +++++++ .../internal/run/run_test.go | 521 ++++++++++++++++ .../specs/00-spec-process.md | 57 ++ .../specs/01-cli-and-config-contract.md | 119 ++++ .../specs/02-co-to-acs-mapping.feature | 74 +++ .../03-idempotency-dry-run-retries.feature | 74 +++ .../specs/04-validation-and-acceptance.md | 160 +++++ .../specs/05-traceability-matrix.md | 18 + .../specs/06-implementation-backlog.md | 187 ++++++ .../analyzers/validateimports/analyzer.go | 6 + 38 files changed, 5765 insertions(+) create mode 100644 go.work create mode 100644 go.work.sum create mode 100644 scripts/compliance-operator-importer/.gitignore create mode 100644 scripts/compliance-operator-importer/DECISIONS.md create mode 100644 scripts/compliance-operator-importer/README.md create mode 100644 scripts/compliance-operator-importer/cmd/importer/main.go create mode 100644 scripts/compliance-operator-importer/go.mod create mode 100644 scripts/compliance-operator-importer/go.sum create mode 100644 scripts/compliance-operator-importer/internal/acs/client.go create mode 100644 scripts/compliance-operator-importer/internal/acs/client_test.go create mode 100644 scripts/compliance-operator-importer/internal/cofetch/client.go create mode 100644 scripts/compliance-operator-importer/internal/cofetch/types.go create mode 100644 scripts/compliance-operator-importer/internal/config/config.go create mode 100644 scripts/compliance-operator-importer/internal/config/config_test.go create mode 100644 scripts/compliance-operator-importer/internal/mapping/mapping.go create mode 100644 scripts/compliance-operator-importer/internal/mapping/mapping_test.go create mode 100644 scripts/compliance-operator-importer/internal/mapping/schedule.go create mode 100644 scripts/compliance-operator-importer/internal/mapping/schedule_test.go create mode 100644 scripts/compliance-operator-importer/internal/models/models.go create mode 100644 scripts/compliance-operator-importer/internal/preflight/preflight.go create mode 100644 scripts/compliance-operator-importer/internal/preflight/preflight_test.go create mode 100644 scripts/compliance-operator-importer/internal/problems/problems.go create mode 100644 scripts/compliance-operator-importer/internal/problems/problems_test.go create mode 100644 scripts/compliance-operator-importer/internal/reconcile/create_only.go create mode 100644 scripts/compliance-operator-importer/internal/reconcile/create_only_test.go create mode 100644 scripts/compliance-operator-importer/internal/report/report.go create mode 100644 scripts/compliance-operator-importer/internal/report/report_test.go create mode 100644 scripts/compliance-operator-importer/internal/run/run.go create mode 100644 scripts/compliance-operator-importer/internal/run/run_test.go create mode 100644 scripts/compliance-operator-importer/specs/00-spec-process.md create mode 100644 scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md create mode 100644 scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature create mode 100644 scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature create mode 100644 scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md create mode 100644 scripts/compliance-operator-importer/specs/05-traceability-matrix.md create mode 100644 scripts/compliance-operator-importer/specs/06-implementation-backlog.md diff --git a/.golangci.yml b/.golangci.yml index 0cd68aa0ae3d8..a9bbccf91bff7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -209,6 +209,7 @@ linters: path: roxctl/common/io/io\.go # io.go will by default use os.Stdin/os.StdErr. paths: - pkg/complianceoperator/api + - scripts/compliance-operator-importer - third_party$ - builtin$ - examples$ @@ -223,6 +224,7 @@ formatters: generated: lax paths: - pkg/complianceoperator/api + - scripts/compliance-operator-importer - third_party$ - builtin$ - examples$ diff --git a/go.work b/go.work new file mode 100644 index 0000000000000..e44d3c482d5cf --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.25.0 + +use ( + . + ./scripts/compliance-operator-importer +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000000000..ee63c5c70873c --- /dev/null +++ b/go.work.sum @@ -0,0 +1,570 @@ +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +bitbucket.org/creachadair/shell v0.0.8/go.mod h1:vINzudofoUXZSJ5tREgpy+Etyjsag3ait5WOWImEVZ0= +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= +buf.build/go/protovalidate v1.1.0/go.mod h1:bGZcPiAQDC3ErCHK3t74jSoJDFOs2JH3d7LWuTEIdss= +buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q= +chainguard.dev/go-grpc-kit v0.17.15/go.mod h1:1wAVAX2CCamtFlfMs9PFzfgQQxX1/TQyF6cbWApbJ2U= +chainguard.dev/sdk v0.1.45/go.mod h1:Xq7KQhJHsWAovd8AiWBAj/ftcNkxMPx5YoQeGVTIj2c= +cloud.google.com/go/accessapproval v1.8.8/go.mod h1:RFwPY9JDKseP4gJrX1BlAVsP5O6kI8NdGlTmaeDefmk= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= +cloud.google.com/go/ai v0.8.0/go.mod h1:t3Dfk4cM61sytiggo2UyGsDVW3RF1qGZaUKDrZFyqkE= +cloud.google.com/go/aiplatform v1.120.0/go.mod h1:6mDthfmy0oS1EQhVFdijoxkVdI2+HIZkpuGTBpedeCg= +cloud.google.com/go/analytics v0.30.1/go.mod h1:V/FnINU5kMOsttZnKPnXfKi6clJUHTEXUKQjHxcNK8A= +cloud.google.com/go/apigateway v1.7.7/go.mod h1:j1bCmrUK1BzVHpiIyTApxB7cRyhivKzltqLmp6j6i7U= +cloud.google.com/go/apigeeconnect v1.7.7/go.mod h1:ftGK3nca0JePiVLl0A6alaMjKdOc5C+sAkFMyH2RH8U= +cloud.google.com/go/apigeeregistry v0.10.0/go.mod h1:SAlF5OhKvyLDuwWAaFAIVJjrEqKRrGTPkJs+TWNnSqg= +cloud.google.com/go/appengine v1.9.7/go.mod h1:y1XpGVeAhbsNzHida79cHbr3pFRsym0ob8xnC8yphbo= +cloud.google.com/go/area120 v0.10.0/go.mod h1:Xg3fKl4xU3UVai9wsI1FXwNU8wSCDYT7dFZfwJKViAM= +cloud.google.com/go/asset v1.22.1/go.mod h1:NlvWwmca7CX6BIBEdRNxOocH6DowmBghAAHucOHuHng= +cloud.google.com/go/assuredworkloads v1.13.0/go.mod h1:o/oHEOnUlribR+uJWTKQo8A5RhSl9K9FNeMOew4TJ3M= +cloud.google.com/go/automl v1.15.0/go.mod h1:U9zOtQb8zVrFNGTuW3BfxeqmLyeleLgT9B12EaXfODg= +cloud.google.com/go/baremetalsolution v1.4.0/go.mod h1:K6C6g4aS8LW95I0fEHZiBsBlh0UxwDLGf+S/vyfXbvg= +cloud.google.com/go/batch v1.14.0/go.mod h1:oeQveyG6NDS/ks2ilOP4LzKRmuIaI7GLe0CkR7WF6pk= +cloud.google.com/go/beyondcorp v1.2.0/go.mod h1:sszcgxpPPBEfLzbI0aYCTg6tT1tyt3CmKav3NZIUcvI= +cloud.google.com/go/bigquery v1.74.0/go.mod h1:iViO7Cx3A/cRKcHNRsHB3yqGAMInFBswrE9Pxazsc90= +cloud.google.com/go/bigtable v1.42.0/go.mod h1:oZ30nofVB6/UYGg7lBwGLWSea7NZUvw/WvBBgLY07xU= +cloud.google.com/go/billing v1.21.0/go.mod h1:ZGairB3EVnb3i09E2SxFxo50p5unPaMTuo1jh6jW9js= +cloud.google.com/go/binaryauthorization v1.10.0/go.mod h1:WOuiaQkI4PU/okwrcREjSAr2AUtjQgVe+PlrXKOmKKw= +cloud.google.com/go/certificatemanager v1.9.6/go.mod h1:vWogV874jKZkSRDFCMM3r7wqybv8WXs3XhyNff6o/Zo= +cloud.google.com/go/channel v1.21.0/go.mod h1:8v3TwHtgLmFxTpL2U+e10CLFOQN8u/Vr9RhYcJUS3y8= +cloud.google.com/go/cloudbuild v1.25.0/go.mod h1:lCu+T6IPkobPo2Nw+vCE7wuaAl9HbXLzdPx/tcF+oWo= +cloud.google.com/go/clouddms v1.8.8/go.mod h1:QtCyw+a73dlkDb2q20aTAPvfaTZCepDDi6Gb1AKq0a4= +cloud.google.com/go/cloudtasks v1.13.7/go.mod h1:H0TThOUG+Ml34e2+ZtW6k6nt4i9KuH3nYAJ5mxh7OM4= +cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= +cloud.google.com/go/compute v1.54.0/go.mod h1:RfBj0L1x/pIM84BrzNX2V21oEv16EKRPBiTcBRRH1Ww= +cloud.google.com/go/contactcenterinsights v1.17.4/go.mod h1:kZe6yOnKDfpPz2GphDHynxk/Spx+53UX/pGf+SmWAKM= +cloud.google.com/go/container v1.46.0/go.mod h1:A7gMqdQduTk46+zssWDTKbGS2z46UsJNXfKqvMI1ZO4= +cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= +cloud.google.com/go/dataflow v0.11.1/go.mod h1:3s6y/h5Qz7uuxTmKJKBifkYZ3zs63jS+6VGtSu8Cf7Y= +cloud.google.com/go/dataform v0.13.0/go.mod h1:U3fqrPY5jAcFh1a8rQb4a+PQ7zKlc5qfgotFZ+luKPo= +cloud.google.com/go/datafusion v1.8.7/go.mod h1:4dkFb1la41qCEXh1AzYtFwl842bu2ikTUXyKhjvFCb0= +cloud.google.com/go/datalabeling v0.9.7/go.mod h1:EEUVn+wNn3jl19P2S13FqE1s9LsKzRsPuuMRq2CMsOk= +cloud.google.com/go/dataplex v1.28.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA= +cloud.google.com/go/dataproc/v2 v2.16.0/go.mod h1:HlzFg8k1SK+bJN3Zsy2z5g6OZS1D4DYiDUgJtF0gJnE= +cloud.google.com/go/dataqna v0.9.8/go.mod h1:2lHKmGPOqzzuqCc5NI0+Xrd5om4ulxGwPpLB4AnFgpA= +cloud.google.com/go/datastore v1.22.0/go.mod h1:aopSX+Whx0lHspWWBj+AjWt68/zjYsPfDe3LjWtqZg8= +cloud.google.com/go/datastream v1.15.1/go.mod h1:aV1Grr9LFon0YvqryE5/gF1XAhcau2uxN2OvQJPpqRw= +cloud.google.com/go/deploy v1.27.3/go.mod h1:7LFIYYTSSdljYRqY3n+JSmIFdD4lv6aMD5xg0crB5iw= +cloud.google.com/go/dialogflow v1.76.0/go.mod h1:mdLkMmSCghfcP85X9dFBlirC1OssS65KE5hrrSz2GXY= +cloud.google.com/go/dlp v1.28.0/go.mod h1:C3od1fIK8lf7Kr62aU1Uh0z4OL5Z8s3do3znAiEupAw= +cloud.google.com/go/documentai v1.42.0/go.mod h1:CABOUzRNOuvb/QwJS2LS80Hpqbu3UW2afyRKTYuW7bo= +cloud.google.com/go/domains v0.10.7/go.mod h1:T3WG/QUAO/52z4tUPooKS8AY7yXaFxPYn1V3F0/JbNQ= +cloud.google.com/go/edgecontainer v1.4.4/go.mod h1:yyNVHsCKtsX/0mqFdbljQw0Uo660q2dlMPaiqYiC2Tg= +cloud.google.com/go/errorreporting v0.4.0/go.mod h1:dZGEhqzdHZSRxxWLVjC3Ue5CVaROzvP58D9rU6zbBfw= +cloud.google.com/go/essentialcontacts v1.7.7/go.mod h1:ytycWAEn/aKUMRKQPMVgMrAtphEMgjbzL8vFwM3tqXs= +cloud.google.com/go/eventarc v1.18.0/go.mod h1:/6SDoqh5+9QNUqCX4/oQcJVK16fG/snHBSXu7lrJtO8= +cloud.google.com/go/filestore v1.10.3/go.mod h1:94ZGyLTx9j+aWKozPQ6Wbq1DuImie/L/HIdGMshtwac= +cloud.google.com/go/firestore v1.21.0/go.mod h1:1xH6HNcnkf/gGyR8udd6pFO4Z7GWJSwLKQMx/u6UrP4= +cloud.google.com/go/functions v1.19.7/go.mod h1:xbcKfS7GoIcaXr2FSwmtn9NXal1JR4TV6iYZlgXffwA= +cloud.google.com/go/gkebackup v1.8.1/go.mod h1:GAaAl+O5D9uISH5MnClUop2esQW4pDa2qe/95A4l7YQ= +cloud.google.com/go/gkeconnect v0.12.5/go.mod h1:wMD2RXcsAWlkREZWJDVeDV70PYka1iEb9stFmgpw+5o= +cloud.google.com/go/gkehub v0.16.0/go.mod h1:ADp27Ucor8v81wY+x/5pOxTorxkPj/xswH3AUpN62GU= +cloud.google.com/go/gkemulticloud v1.6.0/go.mod h1:bGpd4o/Z5Z/XFlaojkgdVisHRwb+fLJvUPzsmV0I9ok= +cloud.google.com/go/grafeas v0.3.16/go.mod h1:I/yrRMOEsLasrmZXQzmDXwrJ3ZPn3dQWLaWt4lXmYvE= +cloud.google.com/go/gsuiteaddons v1.7.8/go.mod h1:DBKNHH4YXAdd/rd6zVvtOGAJNGo0ekOh+nIjTUDEJ5U= +cloud.google.com/go/iap v1.11.3/go.mod h1:+gXO0ClH62k2LVlfhHzrpiHQNyINlEVmGAE3+DB4ShU= +cloud.google.com/go/ids v1.5.7/go.mod h1:N3ZQOIgIBwwOu2tzyhmh3JDT+kt8PcoKkn2BRT9Qe4A= +cloud.google.com/go/iot v1.8.7/go.mod h1:HvVcypV8LPv1yTXSLCNK+YCtqGHhq+p0F3BXETfpN+U= +cloud.google.com/go/language v1.14.6/go.mod h1:7y3J9OexQsfkWNGCxhT+7lb64pa60e12ZCoWDOHxJ1M= +cloud.google.com/go/lifesciences v0.10.7/go.mod h1:v3AbTki9iWttEls/Wf4ag3EqeLRHofploOcpsLnu7iY= +cloud.google.com/go/managedidentities v1.7.7/go.mod h1:nwNlMxtBo2YJMvsKXRtAD1bL41qiCI9npS7cbqrsJUs= +cloud.google.com/go/maps v1.29.0/go.mod h1:FNATcM5ziB2TDE2IVWH4f/yeXc+SbUk1X+bmKjR8HEA= +cloud.google.com/go/mediatranslation v0.9.7/go.mod h1:mz3v6PR7+Fd/1bYrRxNFGnd+p4wqdc/fyutqC5QHctw= +cloud.google.com/go/memcache v1.11.7/go.mod h1:AU1jYlUqCihxapcJ1GGMtlMWDVhzjbfUWBXqsXa4rBg= +cloud.google.com/go/metastore v1.14.8/go.mod h1:h1XI2LpD4ohJhQYn9TwXqKb5sVt6KSo47ft96SiFF1s= +cloud.google.com/go/networkconnectivity v1.21.0/go.mod h1:XC1UJ+tqBsLWz73dqrMc7kUvdTv0FIxtDGv6YntTBO0= +cloud.google.com/go/networkmanagement v1.23.0/go.mod h1:QTYCWp5UxUnU280SqF7AX/mf6NhsqKblmLeCALQmx5c= +cloud.google.com/go/networksecurity v0.11.0/go.mod h1:JLgDsg4tOyJ3eMO8lypjqMftbfd60SJ+P7T+DUmWBsM= +cloud.google.com/go/notebooks v1.12.7/go.mod h1:uR9pxAkKmlNloibMr9Q1t8WhIu4P2JeqJs7c064/0Mo= +cloud.google.com/go/optimization v1.7.7/go.mod h1:OY2IAlX23o52qwMAZ0w65wibKuV12a4x6IHDTCq6kcU= +cloud.google.com/go/orchestration v1.11.10/go.mod h1:tz7m1s4wNEvhNNIM3JOMH0lYxBssu9+7si5MCPw/4/0= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.16.0/go.mod h1:PRmLgZ1loD1hGaqnTBww1nETbqcqAvmTQOLYiIZ7Nvk= +cloud.google.com/go/oslogin v1.14.7/go.mod h1:NB6NqBHfDMwznePdBVX+ILllc1oPCdNSGp5u/WIyndY= +cloud.google.com/go/phishingprotection v0.9.7/go.mod h1:JTI4HNGyAbWolBoNOoCyCF0e3cqPNrYnlievHU49EwE= +cloud.google.com/go/policytroubleshooter v1.11.7/go.mod h1:JP/aQ+bUkt4Gz6lQXBi/+A/6nyNRZ0Pvxui5Xl9ieyk= +cloud.google.com/go/privatecatalog v0.10.8/go.mod h1:BkLHi+rtAGYBt5DocXLytHhF0n6F03Tegxgty40Y7aA= +cloud.google.com/go/profiler v0.4.3/go.mod h1:3xFodugWfPIQZWFcXdUmfa+yTiiyQ8fWrdT+d2Sg4J0= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.4.0/go.mod h1:2lS/XQKq5qtOMs6kHBK+WX1ytUC36kLl2ig3zqsGUx8= +cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= +cloud.google.com/go/recaptchaenterprise/v2 v2.21.0/go.mod h1:HxQYqZC2/zl2CvKN7jJEv71vEdDi1GMGNUiZxnpiuVI= +cloud.google.com/go/recommendationengine v0.9.7/go.mod h1:snZ/FL147u86Jqpv1j95R+CyU5NvL/UzYiyDo6UByTM= +cloud.google.com/go/recommender v1.13.6/go.mod h1:y5/5womtdOaIM3xx+76vbsiA+8EBTIVfWnxHDFHBGJM= +cloud.google.com/go/redis v1.18.3/go.mod h1:x8HtXZbvMBDNT6hMHaQ022Pos5d7SP7YsUH8fCJ2Wm4= +cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= +cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw= +cloud.google.com/go/retail v1.26.0/go.mod h1:gMfh6s174Mvy1rK4g50J9TH5sRim8px+Krml25kdrqo= +cloud.google.com/go/run v1.15.0/go.mod h1:rgFHMdAopLl++57vzeqA+a1o2x0/ILZnEacRD6nC0EA= +cloud.google.com/go/scheduler v1.11.8/go.mod h1:bNKU7/f04eoM6iKQpwVLvFNBgGyJNS87RiFN73mIPik= +cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/security v1.19.2/go.mod h1:KXmf64mnOsLVKe8mk/bZpU1Rsvxqc0Ej0A6tgCeN93w= +cloud.google.com/go/servicedirectory v1.12.7/go.mod h1:gOtN+qbuCMH6tj2dqlDY3qQL7w3V0+nkWaZElnJK8Ps= +cloud.google.com/go/shell v1.8.7/go.mod h1:OTke7qc3laNEW5Jr5OV9VR3IwU5x5VqGOE6705zFex4= +cloud.google.com/go/spanner v1.88.0/go.mod h1:MzulBwuuYwQUVdkZXBBFapmXee3N+sQrj2T/yup6uEE= +cloud.google.com/go/speech v1.30.0/go.mod h1:F2+NJujR8uzDLd6bwy5kgtVycxvEq06nzvzz5eQ/gMo= +cloud.google.com/go/storagetransfer v1.13.1/go.mod h1:S858w5l383ffkdqAqrAA+BC7KlhCqeNieK3sFf5Bj4Y= +cloud.google.com/go/talent v1.8.4/go.mod h1:3yukBXUTVFNyKcJpUExW/k5gqEy8qW6OCNj7WdN0MWo= +cloud.google.com/go/texttospeech v1.16.0/go.mod h1:AeSkoH3ziPvapsuyI07TWY4oGxluAjntX+pF4PJ2jy0= +cloud.google.com/go/tpu v1.8.4/go.mod h1:ul0cyWSHr6jHGZYElZe6HvQn35VY93RAlwpDiSBRnPA= +cloud.google.com/go/translate v1.12.7/go.mod h1:wwJp14NZyWvcrFANhIXutXj0pOBkYciBHwSlUOykcjI= +cloud.google.com/go/video v1.27.1/go.mod h1:xzfAC77B4vtnbi/TT3UUxEjCa/+Ehy5EA8w470ytOig= +cloud.google.com/go/videointelligence v1.12.7/go.mod h1:XAk5hCMY+GihxJ55jNoMdwdXSNZnCl3wGs2+94gK7MA= +cloud.google.com/go/vision/v2 v2.9.6/go.mod h1:lJC+vP15D5znJvHQYjEoTKnpToX1L93BUlvBmzM0gyg= +cloud.google.com/go/vmmigration v1.10.0/go.mod h1:LDztCWEb+RwS1bPg4Xzt0fcJS9kVrFxa3ejhH7OW9vg= +cloud.google.com/go/vmwareengine v1.3.6/go.mod h1:ps0rb+Skgpt9ppHYC0o5DqtJ5ld2FyS8sAqtbHH8t9s= +cloud.google.com/go/vpcaccess v1.8.7/go.mod h1:9RYw5bVvk4Z51Rc8vwXT63yjEiMD/l7XyEaDyrNHgmk= +cloud.google.com/go/webrisk v1.11.2/go.mod h1:yH44GeXz5iz4HFsIlGeoVvnjwnmfbni7Lwj1SelV4f0= +cloud.google.com/go/websecurityscanner v1.7.7/go.mod h1:ng/PzARaus3Bj4Os4LpUnyYHsbtJky1HbBDmz148v1o= +cloud.google.com/go/workflows v1.14.3/go.mod h1:CC9+YdVI2Kvp0L58WajHpEfKJxhrtRh3uQ0SYWcmAk4= +contrib.go.opencensus.io/exporter/stackdriver v0.13.15-0.20230702191903-2de6d2748484/go.mod h1:uxw+4/0SiKbbVSD/F2tk5pJTdVcfIBBcsQ8gwcu4X+E= +cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= +cuelang.org/go v0.15.3/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= +cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= +github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= +github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= +github.com/Antonboom/testifylint v1.3.1/go.mod h1:NV0hTlteCkViPW9mSR4wEMfwp+Hs1T3dY60bkvSfhpM= +github.com/Crocmagnon/fatcontext v0.2.2/go.mod h1:WSn/c/+MMNiD8Pri0ahRj0o9jVpeowzavOQplBJw6u0= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/MakeNowJust/heredoc/v2 v2.0.1/go.mod h1:6/2Abh5s+hc3g9nbWLe9ObDIOhaRrqsyY9MWy+4JdRM= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= +github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/beevik/ntp v1.5.0/go.mod h1:mJEhBrwT76w9D+IfOEGvuzyuudiW9E52U2BaTrMOYow= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildkite/agent/v3 v3.115.2/go.mod h1:a3t090/PPxAIIPCjlXF5fhfRvG0E9huFsnMX7B76iIQ= +github.com/buildkite/go-pipeline v0.16.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.4.0/go.mod h1:0vbODqUFEcVf4v2xVXRfZZRsqJVsCCHTG/TBRByGK4E= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= +github.com/cavaliercoder/go-rpm v0.0.0-20200122174316-8cb9fd9c31a8/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= +github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/chainguard-dev/clog v1.7.0/go.mod h1:4+WFhRMsGH79etYXY3plYdp+tCz/KCkU8fAr0HoaPvs= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= +github.com/cheggaaa/pb/v3 v3.1.6/go.mod h1:urxmfVtaxT+9aWk92DbsvXFZtNSWQSO5TRAp+MJ3l1s= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= +github.com/ckaznocha/protoc-gen-lint v0.3.0/go.mod h1:ASGO5J8wYQ8yJPBE68EntfsSKRU8tp7qAskT3BjIsvE= +github.com/clarketm/json v1.17.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6/go.mod h1:Ikt4Wfpln1YOrak+auA8BNxgiilj0Y2y7nO+aN2eMzk= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U= +github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= +github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= +github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= +github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0= +github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/ign-converter v0.0.0-20241125185625-2f773079ca81/go.mod h1:Q7SbzjFkayIfwm+b+nXedvIcP2SFAndA7ET/JPNNc1I= +github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= +github.com/coreos/ignition/v2 v2.21.0/go.mod h1:axhFZ3jEgXBjKtKp0rSMv2li0Rt43rasp5hS9uyYjco= +github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/eggsampler/acme/v3 v3.6.2/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= +github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= +github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= +github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= +github.com/fullstorydev/grpcurl v1.9.3/go.mod h1:/b4Wxe8bG6ndAjlfSUjwseQReUDUvBJiFEB7UllOlUE= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-piv/piv-go/v2 v2.4.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.29.0/go.mod h1:D6QxqeMlgIPuT02L66f2ccrZ7AGgHkzKmmTMZhk/Kc4= +github.com/go-redis/redismock/v9 v9.2.0/go.mod h1:18KHfGDK4Y6c2R0H38EUGWAdc7ZQS9gfYxc94k7rWT0= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godror/godror v0.40.4/go.mod h1:i8YtVTHUJKfFT3wTat4A9UoqScUtZXiYB9Rf3SVARgc= +github.com/godror/knownpb v0.1.1/go.mod h1:4nRFbQo1dDuwKnblRXDxrfCFYeT4hjg3GjMqef58eRE= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.59.1/go.mod h1:jX5Oif4C7P0j9++YB2MMJmoNrb01NJ8ITqKWNLewThg= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= +github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= +github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= +github.com/google/addlicense v1.1.1/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA= +github.com/google/generative-ai-go v0.19.0/go.mod h1:JYolL13VG7j79kM5BtHz4qwONHkeJQzOCkKXnpqtS/E= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/rpmpack v0.7.1/go.mod h1:h1JL16sUTWCLI/c39ox1rDaTBo3BXUQGjczVJyK4toU= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18= +github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag= +github.com/guregu/null v4.0.0+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= +github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= +github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.6.1/go.mod h1:vF1QkOO159prdo6mHRxak2CpzDpHAfKiPUDP/NeRnX8= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= +github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= +github.com/letsencrypt/borp v0.0.0-20240620175310-a78493c6e2bd/go.mod h1:gMSMCNKhxox/ccR923EJsIvHeVVYfCABGbirqa0EwuM= +github.com/letsencrypt/challtestsrv v1.3.3/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158/go.mod h1:ZFNBS3H6OEsprCRjscty6GCBe5ZiX44x6qY4s7+bDX0= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1/go.mod h1:RuJdxo0oI6dClIaMzdl3hewq3a065RH65dofJP03h8I= +github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= +github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0/go.mod h1:F/7q8/HZz+TXjlsoZQQKVYvXTZaFH4QRa3y+j1p7MS0= +github.com/open-policy-agent/opa v1.12.1/go.mod h1:RnDgm04GA1RjEXJvrsG9uNT/+FyBNmozcPvA2qz60M4= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= +github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg= +github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70/go.mod h1:TQx0VEhZ/92qRXIMDu2Wg4bUPmw5HRNE6wpSZ+IsP0Y= +github.com/openshift/machine-config-operator v0.0.1-0.20250401081735-9026ff2d802e/go.mod h1:wmBAHvqHXXSFa0yz3scg0RZLxcs5B51ZTeaVlCSPaDk= +github.com/operator-framework/api v0.29.0/go.mod h1:0whQE4mpMDd2zyHkQe+bFa3DLoRs6oGWCbu8dY/3pyc= +github.com/owenrumney/go-sarif v1.1.1 h1:QNObu6YX1igyFKhdzd7vgzmw7XsWN3/6NMGuDzBgXmE= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/polyfloyd/go-errorlint v1.5.2/go.mod h1:sH1QC1pxxi0fFecsVIzBmxtrgd9IF/SkJpA6wqyKAJs= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2/go.mod h1:671/KciyzKiTmvIYTpp7CzWD1/TNXVPgeDLJcGFWrOM= +github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= +github.com/prometheus/prometheus v0.301.0/go.mod h1:BJLjWCKNfRfjp7Q48DrAjARnCi7GhfUVvUFEAWTssZM= +github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU= +github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY= +github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/ryancurrah/gomodguard v1.3.2/go.mod h1:LqdemiFomEjcxOqirbQCb3JFvSxH2JUYMerTFd3sF2o= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.26.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.22.0/go.mod h1:sR5n3LzZ/52rn4xxRBJk38iPe/hjiA0CkVcyiAHNCrM= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/segmentio/conf v1.2.0/go.mod h1:Y3B9O/PqqWqjyxyWWseyj/quPEtMu1zDp/kVbSWWaB0= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/sigstore/rekor-tiles v0.1.11 h1:0NAJ2EhD1r6DH95FUuDTqUDd+c31LSKzoXGW5ZCzFq0= +github.com/sigstore/rekor-tiles v0.1.11/go.mod h1:eGIeqASh52pgWpmp/j5KZDjmKdVwob7eTYskVVRCu5k= +github.com/sigstore/timestamp-authority v1.2.9 h1:L9Fj070/EbMC8qUk8BchkrYCS1BT5i93Bl6McwydkFs= +github.com/sigstore/timestamp-authority v1.2.9/go.mod h1:QyRnZchz4o+xdHyK5rvCWacCHxWmpX+mgvJwB1OXcLY= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= +github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/transparency-dev/tessera v1.0.1-0.20251104110637-ba6c65c4ae73/go.mod h1:hxs+XmMCxM44pskCyfRFhEuUkpETNcfl6fTNOFsh7O8= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= +github.com/uwu-tools/magex v0.10.1/go.mod h1:5uQvmocqEueCbgK4Dm67mIfhjq80o408F17J6867go8= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= +github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= +github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1/go.mod h1:nmuySobZb4kFgFy6BptpXp/BBw+xFSyvVPP6auoJB4k= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= +go-simpler.org/sloglint v0.7.1/go.mod h1:OlaVDRh/FKKd4X4sIMbsz8st97vomydceL146Fthh/c= +go.etcd.io/etcd/api/v3 v3.6.8/go.mod h1:qyQj1HZPUV3B5cbAL8scG62+fyz5dSxxu0w8pn28N6Q= +go.etcd.io/etcd/client/pkg/v3 v3.6.8/go.mod h1:GsiTRUZE2318PggZkAo6sWb6l8JLVrnckTNfbG8PWtw= +go.etcd.io/etcd/client/v3 v3.6.8/go.mod h1:MVG4BpSIuumPi+ELF7wYtySETmoTWBHVcDoHdVupwt8= +go.etcd.io/etcd/etcdctl/v3 v3.6.8/go.mod h1:8X8SvxOc5kPQ0e+jbSx3RgKzTNQ3O8rBuQEoDKuQFX0= +go.etcd.io/etcd/etcdutl/v3 v3.6.8/go.mod h1:HGfpMG6Sjo9S6KWeXctiYcN8LjLbbUBdAjCYb8V977w= +go.etcd.io/etcd/pkg/v3 v3.6.8/go.mod h1:TRibVNe+FqJIe1abOAA1PsuQ4wqO87ZaOoprg09Tn8c= +go.etcd.io/etcd/server/v3 v3.6.8/go.mod h1:88dCtwUnSirkUoJbflQxxWXqtBSZa6lSG0Kuej+dois= +go.etcd.io/etcd/tests/v3 v3.6.8/go.mod h1:U1ioDy7TXzz2UXhSQfbJ3++PsryNwiniHtdbXZPprX0= +go.etcd.io/etcd/v3 v3.6.8/go.mod h1:syLTueu7AV0Pw/TcOTHEeWOtcAD/xFnnXB0gukO92Vc= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +goa.design/goa/v3 v3.23.4/go.mod h1:da3W585WfJe9gT+hJCbP8YFB9yc4gmuCwB0MvkbwhXk= +gocloud.dev v0.45.0/go.mod h1:0kXKmkCLG6d31N7NyLZWzt7jDSQura9zD/mWgiB6THI= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260311181403-84a4fc48630c/go.mod h1:9amqk/8LQWEC4RjyUxMx1DebyQ7hZB9gvl67bHmgZ2E= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1/go.mod h1:YNKnb2OAApgYn2oYY47Rn7alMr1zWjb2U8Q0aoGWiNc= +google.golang.org/grpc/gcp/observability v1.0.1/go.mod h1:yM0UcrYRMe/B+Nu0mDXeTJNDyIMJRJnzuxqnJMz7Ewk= +google.golang.org/grpc/security/advancedtls v1.0.0/go.mod h1:o+s4go+e1PJ2AjuQMY5hU82W7lDlefjJA6FqEHRVHWk= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +k8s.io/cri-api v0.32.13/go.mod h1:DCzMuTh2padoinefWME0G678Mc3QFbLMF2vEweGzBAI= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kms v0.35.3/go.mod h1:VT+4ekZAdrZDMgShK37vvlyHUVhwI9t/9tvh0AyCWmQ= +k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU= +k8s.io/metrics v0.35.3/go.mod h1:/O8UBb5QVyAekR2QvL/WWxskpdV1wVSEl4MSLAy4Ql4= +k8s.io/pod-security-admission v0.32.1/go.mod h1:psSkvN+noAracLrouPjVDID/7TiMWoHQLNoBTVCY/nw= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +open-cluster-management.io/api v0.15.0/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.14.3/go.mod h1:BgHrVkRmx7iWCumslrUpxE6BX474IrMXc+7R0RpV+E8= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= +sigs.k8s.io/kustomize/kustomize/v5 v5.7.1/go.mod h1:+5/SrBcJ4agx1SJknGuR/c9thwRSKLxnKoI5BzXFaLU= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= +tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws= diff --git a/scripts/compliance-operator-importer/.gitignore b/scripts/compliance-operator-importer/.gitignore new file mode 100644 index 0000000000000..e660fd93d3196 --- /dev/null +++ b/scripts/compliance-operator-importer/.gitignore @@ -0,0 +1 @@ +bin/ diff --git a/scripts/compliance-operator-importer/DECISIONS.md b/scripts/compliance-operator-importer/DECISIONS.md new file mode 100644 index 0000000000000..ea959f917aa0b --- /dev/null +++ b/scripts/compliance-operator-importer/DECISIONS.md @@ -0,0 +1,56 @@ +# V1 Scope Freeze: CO -> ACS Importer + +## Status + +This document freezes Phase 1 behavior. Any deviation requires updating this file and corresponding specs. + +## Frozen decisions + +1. **Execution model** + - Standalone external importer only. + - No runtime/product code changes in Sensor/Central/ACS backend. + +2. **Importer mode** + - Phase 1 is create-only. + - Importer may create new ACS scan configs. + - Importer must never update existing ACS scan configs. + +3. **Implementation language** + - Use **Go** for Phase 1 implementation. + - Do not implement Phase 1 importer in bash/shell. + - Python is an acceptable future alternative only if explicitly re-decided in this file. + +4. **Existing-name behavior** + - If `scanName` already exists in ACS, skip resource. + - Add one entry to `problems[]` with clear `description` and `fixHint`. + +5. **Error handling model** + - Resource-level issue => skip resource, continue processing, emit `problems[]` entry. + - Fatal preflight/config issue => fail run before resource processing. + +6. **Cluster targeting** + - Source cluster selected like `kubectl` (current context by default, optional context override). + - Single destination ACS cluster ID via `--acs-cluster-id`. + +7. **ACS authentication model** + - Default auth mode is token (`ACS_API_TOKEN` via `--acs-token-env`). + - Optional basic-auth mode is allowed for local/dev environments. + - Basic mode uses username/password inputs and the same preflight endpoint checks. + +8. **Profile kind fallback** + - Missing `ScanSettingBinding.spec.profiles[].kind` defaults to `Profile`. + +9. **Schedule conversion** + - Convert valid CO cron to ACS schedule fields. + - Conversion failure => skip resource + `problems[]` entry with remediation hint. + +10. **Provenance marker** + +- Not required in Phase 1 create-only mode. +- Can be revisited in a future update/reconcile phase. + +## Deferred to Phase 2 (out of scope) + +- Update/reconcile mode (`PUT`) for existing configs. +- Ownership/provenance-based update guard. +- Multi-target cluster mapping per binding. diff --git a/scripts/compliance-operator-importer/README.md b/scripts/compliance-operator-importer/README.md new file mode 100644 index 0000000000000..7860b08d75525 --- /dev/null +++ b/scripts/compliance-operator-importer/README.md @@ -0,0 +1,26 @@ +# Compliance Operator -> ACS Importer (Spec Set) + +This directory contains **specifications only** for a standalone importer that reads existing Compliance Operator resources and creates equivalent ACS compliance scan configurations via ACS API. + +No runtime changes to Sensor/Central are in scope for this work item. +Phase 1 mode is **create-only** (no ACS updates). + +## Spec-driven workflow + +Implement in this order: + +1. Read `DECISIONS.md` (frozen v1 scope and non-goals). +2. Read `specs/00-spec-process.md` (process and quality gates). +3. Use `specs/06-implementation-backlog.md` to execute slice-by-slice. +4. Implement CLI contract from `specs/01-cli-and-config-contract.md`. +5. Implement behavior scenarios in: + - `specs/02-co-to-acs-mapping.feature` + - `specs/03-idempotency-dry-run-retries.feature` +6. Validate with `specs/04-validation-and-acceptance.md`. + +Definition of done: + +- every MUST statement in spec docs is implemented, +- every `Scenario` in `.feature` files has an automated test, +- resource-level issues are skipped and captured in `problems[]` with fix hints, +- acceptance commands in `specs/04-validation-and-acceptance.md` pass on a real cluster. diff --git a/scripts/compliance-operator-importer/cmd/importer/main.go b/scripts/compliance-operator-importer/cmd/importer/main.go new file mode 100644 index 0000000000000..cd17554751a45 --- /dev/null +++ b/scripts/compliance-operator-importer/cmd/importer/main.go @@ -0,0 +1,58 @@ +// Binary co-acs-scan-importer reads Compliance Operator ScanSettingBinding +// resources from a Kubernetes cluster and creates equivalent ACS compliance +// scan configurations through the ACS v2 API. +// +// Usage: +// +// co-acs-scan-importer \ +// --acs-endpoint https://central.example.com \ +// --co-namespace openshift-compliance \ +// --acs-cluster-id \ +// [--dry-run] [--report-json /tmp/report.json] +package main + +import ( + "context" + "fmt" + "os" + + "github.com/stackrox/co-acs-importer/internal/acs" + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/config" + "github.com/stackrox/co-acs-importer/internal/preflight" + "github.com/stackrox/co-acs-importer/internal/run" +) + +func main() { + os.Exit(mainWithCode()) +} + +func mainWithCode() int { + cfg, err := config.ParseAndValidate(os.Args[1:]) + if err != nil { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + return run.ExitFatalError + } + + ctx := context.Background() + + // IMP-CLI-015, IMP-CLI-016: preflight check before any resource processing. + if err := preflight.Run(ctx, cfg); err != nil { + fmt.Fprintf(os.Stderr, "FATAL: preflight failed: %v\n", err) + return run.ExitFatalError + } + + acsClient, err := acs.NewClient(cfg) + if err != nil { + fmt.Fprintf(os.Stderr, "FATAL: failed to create ACS client: %v\n", err) + return run.ExitFatalError + } + + coClient, err := cofetch.NewClient(cfg) + if err != nil { + fmt.Fprintf(os.Stderr, "FATAL: failed to create CO client: %v\n", err) + return run.ExitFatalError + } + + return run.NewRunner(cfg, acsClient, coClient).Run(ctx) +} diff --git a/scripts/compliance-operator-importer/go.mod b/scripts/compliance-operator-importer/go.mod new file mode 100644 index 0000000000000..312ad91530093 --- /dev/null +++ b/scripts/compliance-operator-importer/go.mod @@ -0,0 +1,35 @@ +module github.com/stackrox/co-acs-importer + +go 1.25.0 + +require ( + k8s.io/apimachinery v0.35.3 + k8s.io/client-go v0.35.3 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.9.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/scripts/compliance-operator-importer/go.sum b/scripts/compliance-operator-importer/go.sum new file mode 100644 index 0000000000000..3dc8c6782020e --- /dev/null +++ b/scripts/compliance-operator-importer/go.sum @@ -0,0 +1,94 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ= +k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4= +k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8= +k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg= +k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/scripts/compliance-operator-importer/internal/acs/client.go b/scripts/compliance-operator-importer/internal/acs/client.go new file mode 100644 index 0000000000000..1eea9d296eb2f --- /dev/null +++ b/scripts/compliance-operator-importer/internal/acs/client.go @@ -0,0 +1,234 @@ +// Package acs provides an HTTP client for the ACS compliance scan configuration API. +// +// create-only: PUT is never called in Phase 1 +package acs + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// client is the concrete implementation of models.ACSClient. +// It issues only GET and POST requests. No PUT method exists in Phase 1. +type client struct { + httpClient *http.Client + baseURL string + cfg *models.Config +} + +// NewClient creates a models.ACSClient from cfg. +// +// TLS is configured from cfg.CACertFile and cfg.InsecureSkipVerify. +// Timeout is set from cfg.RequestTimeout. +// Authentication: +// - token mode: "Authorization: Bearer " (token resolved from cfg.TokenEnv) +// - basic mode: HTTP Basic auth (cfg.Username + password from cfg.PasswordEnv) +// +// create-only: PUT is never called in Phase 1 +func NewClient(cfg *models.Config) (models.ACSClient, error) { + tlsCfg, err := buildTLSConfig(cfg) + if err != nil { + return nil, fmt.Errorf("acs: building TLS config: %w", err) + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + } + + timeout := cfg.RequestTimeout + if timeout == 0 { + timeout = 30 * time.Second + } + + return &client{ + httpClient: &http.Client{ + Transport: transport, + Timeout: timeout, + }, + baseURL: cfg.ACSEndpoint, + cfg: cfg, + }, nil +} + +// buildTLSConfig constructs a tls.Config from the importer config. +func buildTLSConfig(cfg *models.Config) (*tls.Config, error) { + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec // controlled by explicit CLI flag + } + + if cfg.CACertFile != "" { + pemData, err := os.ReadFile(cfg.CACertFile) + if err != nil { + return nil, fmt.Errorf("reading CA cert file %q: %w", cfg.CACertFile, err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(pemData) { + return nil, fmt.Errorf("no valid PEM certificates found in %q", cfg.CACertFile) + } + tlsCfg.RootCAs = pool + } + + return tlsCfg, nil +} + +// addAuth adds the correct Authorization header to req based on the configured auth mode. +func (c *client) addAuth(req *http.Request) error { + switch c.cfg.AuthMode { + case models.AuthModeBasic: + password := os.Getenv(c.cfg.PasswordEnv) + req.SetBasicAuth(c.cfg.Username, password) + default: // token mode + tokenEnv := c.cfg.TokenEnv + if tokenEnv == "" { + tokenEnv = "ACS_API_TOKEN" + } + token := os.Getenv(tokenEnv) + if token == "" { + return fmt.Errorf("acs: token env var %q is empty", tokenEnv) + } + req.Header.Set("Authorization", "Bearer "+token) + } + return nil +} + +// Preflight checks ACS connectivity and auth by calling: +// +// GET /v2/compliance/scan/configurations?pagination.limit=1 +// +// Only HTTP 200 is treated as success; any other status returns an error. +// +// Implements IMP-CLI-015, IMP-CLI-016. +func (c *client) Preflight(ctx context.Context) error { + url := c.baseURL + "/v2/compliance/scan/configurations?pagination.limit=1" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("acs: preflight request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("acs: preflight failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + switch resp.StatusCode { + case http.StatusUnauthorized: + return errors.New("acs: preflight: HTTP 401 Unauthorized - check token or credentials") + case http.StatusForbidden: + return errors.New("acs: preflight: HTTP 403 Forbidden - token lacks required permissions") + default: + return fmt.Errorf("acs: preflight: unexpected HTTP %d", resp.StatusCode) + } + } + return nil +} + +// ListScanConfigurations returns all existing scan configuration summaries by calling: +// +// GET /v2/compliance/scan/configurations?pagination.limit=1000 +// +// Implements IMP-IDEM-001 (used to build the existing-name set). +func (c *client) ListScanConfigurations(ctx context.Context) ([]models.ACSConfigSummary, error) { + url := c.baseURL + "/v2/compliance/scan/configurations?pagination.limit=1000" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("acs: list request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("acs: list scan configurations: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("acs: list scan configurations: HTTP %d", resp.StatusCode) + } + + var listResp models.ACSListResponse + if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { + return nil, fmt.Errorf("acs: decoding list response: %w", err) + } + return listResp.Configurations, nil +} + +// complianceScanConfigurationResponse is used to parse the id from the POST response. +type complianceScanConfigurationResponse struct { + ID string `json:"id"` +} + +// CreateScanConfiguration sends POST /v2/compliance/scan/configurations and returns +// the ID of the newly created configuration. +// +// IMPORTANT: This method MUST use POST only. No PUT is called anywhere in Phase 1. +// Implements IMP-IDEM-001, IMP-IDEM-003. +// +// create-only: PUT is never called in Phase 1 +func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACSCreatePayload) (string, error) { + body, err := json.Marshal(payload) + if err != nil { + return "", fmt.Errorf("acs: marshalling create payload: %w", err) + } + + url := c.baseURL + "/v2/compliance/scan/configurations" + // POST only - never PUT - create-only Phase 1 + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return "", fmt.Errorf("acs: create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return "", err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("acs: create scan configuration: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + return "", &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("POST /v2/compliance/scan/configurations returned HTTP %d", resp.StatusCode)} + } + + var created complianceScanConfigurationResponse + if err := json.NewDecoder(resp.Body).Decode(&created); err != nil { + return "", fmt.Errorf("acs: decoding create response: %w", err) + } + if created.ID == "" { + return "", errors.New("acs: create response contained empty id") + } + return created.ID, nil +} + +// HTTPError is returned by CreateScanConfiguration when the server responds with +// a non-success HTTP status. The reconciler uses StatusCode() to decide whether +// to retry (transient: 429,502,503,504) or abort (non-transient: 400,401,403,404). +type HTTPError struct { + Code int + Message string +} + +func (e *HTTPError) Error() string { return e.Message } +func (e *HTTPError) StatusCode() int { return e.Code } diff --git a/scripts/compliance-operator-importer/internal/acs/client_test.go b/scripts/compliance-operator-importer/internal/acs/client_test.go new file mode 100644 index 0000000000000..45177b7cd38bd --- /dev/null +++ b/scripts/compliance-operator-importer/internal/acs/client_test.go @@ -0,0 +1,217 @@ +package acs_test + +import ( + "context" + "crypto/tls" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/acs" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// newTestConfig returns a Config wired to the given TLS test server URL. +// InsecureSkipVerify is always true so the self-signed httptest cert is accepted. +func newTestConfig(serverURL string) *models.Config { + return &models.Config{ + ACSEndpoint: serverURL, + AuthMode: models.AuthModeToken, + TokenEnv: "ACS_API_TOKEN", + RequestTimeout: 5 * time.Second, + MaxRetries: 3, + InsecureSkipVerify: true, + } +} + +// startTLSServer starts an httptest TLS server with the provided handler and +// returns the server plus an http.Client pre-configured with the server's TLS cert. +func startTLSServer(handler http.Handler) (*httptest.Server, *http.Client) { + srv := httptest.NewTLSServer(handler) + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // test only + }, + Timeout: 5 * time.Second, + } + return srv, client +} + +// IMP-CLI-015: Preflight 200 => nil error +func TestPreflight_200_ReturnsNil(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(models.ACSListResponse{}) + })) + defer srv.Close() + + t.Setenv("ACS_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err != nil { + t.Errorf("IMP-CLI-015: Preflight with HTTP 200 should return nil, got: %v", err) + } +} + +// IMP-CLI-016: Preflight 401 => error +func TestPreflight_401_ReturnsError(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + })) + defer srv.Close() + + t.Setenv("ACS_API_TOKEN", "bad-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err == nil { + t.Error("IMP-CLI-016: Preflight with HTTP 401 should return error, got nil") + } +} + +// IMP-CLI-016: Preflight 403 => error +func TestPreflight_403_ReturnsError(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Forbidden", http.StatusForbidden) + })) + defer srv.Close() + + t.Setenv("ACS_API_TOKEN", "bad-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err == nil { + t.Error("IMP-CLI-016: Preflight with HTTP 403 should return error, got nil") + } +} + +// IMP-IDEM-001: ListScanConfigurations returns parsed list +func TestListScanConfigurations_ReturnsParsedList(t *testing.T) { + want := []models.ACSConfigSummary{ + {ID: "id-1", ScanName: "cis-weekly"}, + {ID: "id-2", ScanName: "pci-daily"}, + } + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(models.ACSListResponse{ + Configurations: want, + TotalCount: int32(len(want)), + }) + })) + defer srv.Close() + + t.Setenv("ACS_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + got, err := client.ListScanConfigurations(context.Background()) + if err != nil { + t.Fatalf("IMP-IDEM-001: ListScanConfigurations: %v", err) + } + if len(got) != len(want) { + t.Fatalf("IMP-IDEM-001: expected %d configs, got %d", len(want), len(got)) + } + for i, g := range got { + if g.ID != want[i].ID || g.ScanName != want[i].ScanName { + t.Errorf("IMP-IDEM-001: item[%d]: got {%s %s}, want {%s %s}", i, g.ID, g.ScanName, want[i].ID, want[i].ScanName) + } + } +} + +// IMP-IDEM-003: CreateScanConfiguration uses POST method (never PUT) +// IMP-IDEM-001: CreateScanConfiguration returns new config ID +func TestCreateScanConfiguration_UsesPOSTAndReturnsID(t *testing.T) { + const wantID = "new-config-id-123" + var gotMethod string + + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + gotMethod = r.Method + if r.Method != http.MethodPost { + // Fail loudly if any non-POST method is used + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(map[string]string{"id": wantID}) + })) + defer srv.Close() + + t.Setenv("ACS_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + payload := models.ACSCreatePayload{ + ScanName: "cis-weekly", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } + + gotID, err := client.CreateScanConfiguration(context.Background(), payload) + if err != nil { + t.Fatalf("IMP-IDEM-001: CreateScanConfiguration: %v", err) + } + + // IMP-IDEM-003: must use POST, never PUT + if gotMethod != http.MethodPost { + t.Errorf("IMP-IDEM-003: expected method POST, got %s", gotMethod) + } + if gotMethod == http.MethodPut { + t.Errorf("IMP-IDEM-003: VIOLATION - PUT was called, which is forbidden in Phase 1") + } + + // IMP-IDEM-001: must return the ID from the response + if gotID != wantID { + t.Errorf("IMP-IDEM-001: expected ID %q, got %q", wantID, gotID) + } +} + +// IMP-IDEM-003: Compile-time guard - verify the ACSClient interface has no Put method. +// This is a documentation-as-code assertion: if someone adds a Put/Update method to +// ACSClient, it would need to be added here too, making the violation visible. +func TestNoPUTMethodOnInterface(t *testing.T) { + // The models.ACSClient interface must only define: + // Preflight, ListScanConfigurations, CreateScanConfiguration + // If a PUT-based method were added, the reconciler mock in create_only_test.go + // would fail to compile (it only implements the three allowed methods). + // + // IMP-IDEM-003: This test documents the invariant. The real enforcement is in + // create_only_test.go where the mock ACSClient deliberately records every HTTP + // method and the test asserts PUT is never among them. + t.Log("IMP-IDEM-003: ACSClient interface has no PUT method - enforced by interface definition") +} diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go new file mode 100644 index 0000000000000..caf5e8f27a441 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -0,0 +1,181 @@ +package cofetch + +import ( + "context" + "errors" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/models" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" +) + +// GVRs for Compliance Operator resources. +var ( + scanSettingBindingGVR = schema.GroupVersionResource{ + Group: "compliance.openshift.io", + Version: "v1alpha1", + Resource: "scansettingbindings", + } + scanSettingGVR = schema.GroupVersionResource{ + Group: "compliance.openshift.io", + Version: "v1alpha1", + Resource: "scansettings", + } +) + +// k8sClient is the production implementation of COClient backed by a dynamic k8s client. +type k8sClient struct { + dynamic dynamic.Interface + namespace string // empty string means all namespaces +} + +// NewClient creates a COClient using the kube context specified in cfg. +// If cfg.KubeContext is empty the current context is used. +// If cfg.COAllNamespaces is true, resources are listed across all namespaces. +func NewClient(cfg *models.Config) (COClient, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{} + if cfg.KubeContext != "" { + overrides.CurrentContext = cfg.KubeContext + } + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build kubeconfig: %w", err) + } + + dynClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("create dynamic client: %w", err) + } + + ns := cfg.CONamespace + if cfg.COAllNamespaces { + ns = "" + } + + return &k8sClient{ + dynamic: dynClient, + namespace: ns, + }, nil +} + +// ListScanSettingBindings returns all ScanSettingBindings from the configured namespace(s). +func (c *k8sClient) ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) { + list, err := c.dynamic.Resource(scanSettingBindingGVR).Namespace(c.namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("list ScanSettingBindings in namespace %q: %w", c.namespace, err) + } + + result := make([]ScanSettingBinding, 0, len(list.Items)) + for _, item := range list.Items { + ssb, parseErr := parseScanSettingBinding(item.Object) + if parseErr != nil { + // Skip malformed resources rather than aborting the whole list. + continue + } + result = append(result, ssb) + } + return result, nil +} + +// GetScanSetting fetches a named ScanSetting from the given namespace. +func (c *k8sClient) GetScanSetting(ctx context.Context, namespace, name string) (*ScanSetting, error) { + obj, err := c.dynamic.Resource(scanSettingGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("get ScanSetting %q in namespace %q: %w", name, namespace, err) + } + + ss, err := parseScanSetting(obj.Object) + if err != nil { + return nil, fmt.Errorf("parse ScanSetting %q: %w", name, err) + } + return ss, nil +} + +// parseScanSettingBinding converts an unstructured map into a ScanSettingBinding. +func parseScanSettingBinding(obj map[string]interface{}) (ScanSettingBinding, error) { + meta, _ := obj["metadata"].(map[string]interface{}) + name, _ := meta["name"].(string) + namespace, _ := meta["namespace"].(string) + + spec, _ := obj["spec"].(map[string]interface{}) + + // Parse profiles list into []NamedObjectReference. + var profiles []NamedObjectReference + if rawProfiles, ok := spec["profiles"].([]interface{}); ok { + for _, rp := range rawProfiles { + pm, ok := rp.(map[string]interface{}) + if !ok { + continue + } + profiles = append(profiles, NamedObjectReference{ + Name: stringField(pm, "name"), + Kind: stringField(pm, "kind"), + APIGroup: stringField(pm, "apiGroup"), + }) + } + } + + // Parse settingsRef as a NamedObjectReference. + var settingsRef *NamedObjectReference + if sr, ok := spec["settingsRef"].(map[string]interface{}); ok { + settingsRef = &NamedObjectReference{ + Name: stringField(sr, "name"), + Kind: stringField(sr, "kind"), + APIGroup: stringField(sr, "apiGroup"), + } + } + + if name == "" { + return ScanSettingBinding{}, errors.New("ScanSettingBinding has no name") + } + + // Populate ScanSettingName from settingsRef.Name for backward compatibility + // with callers that read the flat field (e.g. mapping package). + scanSettingName := "" + if settingsRef != nil { + scanSettingName = settingsRef.Name + } + + return ScanSettingBinding{ + Namespace: namespace, + Name: name, + ScanSettingName: scanSettingName, + SettingsRef: settingsRef, + Profiles: profiles, + }, nil +} + +// parseScanSetting converts an unstructured map into a ScanSetting. +func parseScanSetting(obj map[string]interface{}) (*ScanSetting, error) { + meta, _ := obj["metadata"].(map[string]interface{}) + name, _ := meta["name"].(string) + namespace, _ := meta["namespace"].(string) + + // Schedule is nested under complianceSuiteSettings.schedule. + schedule := "" + if css, ok := obj["complianceSuiteSettings"].(map[string]interface{}); ok { + schedule, _ = css["schedule"].(string) + } + + if name == "" { + return nil, errors.New("ScanSetting has no name") + } + + return &ScanSetting{ + Namespace: namespace, + Name: name, + Schedule: schedule, + }, nil +} + +// stringField safely extracts a string value from an unstructured map. +func stringField(m map[string]interface{}, key string) string { + v, _ := m[key].(string) + return v +} diff --git a/scripts/compliance-operator-importer/internal/cofetch/types.go b/scripts/compliance-operator-importer/internal/cofetch/types.go new file mode 100644 index 0000000000000..bb04563b71332 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/cofetch/types.go @@ -0,0 +1,54 @@ +// Package cofetch defines types for Compliance Operator resource discovery. +package cofetch + +import "context" + +// NamedObjectReference is a lightweight reference to a named Kubernetes object. +// It mirrors the CO NamedObjectReference type without importing the CO library. +type NamedObjectReference struct { + Name string + Kind string // "Profile" or "TailoredProfile"; empty defaults to "Profile" (IMP-MAP-002) + APIGroup string +} + +// ResolvedKind returns the kind, defaulting to "Profile" when empty (IMP-MAP-002). +func (r NamedObjectReference) ResolvedKind() string { + if r.Kind == "" { + return "Profile" + } + return r.Kind +} + +// ProfileRef is an alias for NamedObjectReference used in profile reference lists. +// It is a type alias (not a new type) so []ProfileRef and []NamedObjectReference are +// interchangeable, allowing both client.go and mapping_test.go to construct profiles. +type ProfileRef = NamedObjectReference + +// ScanSettingBinding is a simplified representation of the Compliance Operator +// ScanSettingBinding resource (compliance.openshift.io/v1alpha1). +// Fields are extracted from unstructured Kubernetes API responses. +type ScanSettingBinding struct { + Namespace string + Name string + ScanSettingName string // name of the referenced ScanSetting (flattened from SettingsRef.Name) + SettingsRef *NamedObjectReference // full structured settings reference + Profiles []NamedObjectReference +} + +// ScanSetting is a simplified representation of the Compliance Operator ScanSetting +// resource (compliance.openshift.io/v1alpha1). +type ScanSetting struct { + Namespace string + Name string + // Schedule is the cron expression from complianceSuiteSettings.schedule. + Schedule string +} + +// COClient abstracts Compliance Operator resource discovery. +// All methods are context-aware and must not mutate cluster state. +type COClient interface { + // ListScanSettingBindings returns all ScanSettingBindings in the configured namespace(s). + ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) + // GetScanSetting fetches a named ScanSetting from the given namespace. + GetScanSetting(ctx context.Context, namespace, name string) (*ScanSetting, error) +} diff --git a/scripts/compliance-operator-importer/internal/config/config.go b/scripts/compliance-operator-importer/internal/config/config.go new file mode 100644 index 0000000000000..18d9867ff2a23 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config.go @@ -0,0 +1,176 @@ +// Package config parses and validates all CLI flags and environment variables +// for the CO -> ACS importer tool. +package config + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +const ( + defaultTokenEnv = "ACS_API_TOKEN" + defaultPasswordEnv = "ACS_PASSWORD" + defaultTimeout = 30 * time.Second + defaultMaxRetries = 5 +) + +// ParseAndValidate parses flags from args (typically os.Args[1:]), resolves +// environment variables, and validates the resulting Config. +// It uses a dedicated FlagSet so it is safe to call from tests. +func ParseAndValidate(args []string) (*models.Config, error) { + fs := flag.NewFlagSet("co-acs-importer", flag.ContinueOnError) + + // IMP-CLI-001 + acsEndpoint := fs.String("acs-endpoint", os.Getenv("ACS_ENDPOINT"), "ACS endpoint URL (https://). Also read from ACS_ENDPOINT env var.") + + // IMP-CLI-023 / IMP-CLI-026 + acsAuthMode := fs.String("acs-auth-mode", "", "Auth mode: token (default) or basic. (IMP-CLI-023, IMP-CLI-026)") + + // IMP-CLI-002 / token mode + acsTokenEnv := fs.String("acs-token-env", defaultTokenEnv, "Env var name that holds the ACS API token (token mode).") + + // IMP-CLI-024 / basic mode + acsUsername := fs.String("acs-username", os.Getenv("ACS_USERNAME"), "ACS username for basic auth. Also read from ACS_USERNAME env var.") + acsPasswordEnv := fs.String("acs-password-env", defaultPasswordEnv, "Env var name that holds the ACS password (basic mode).") + + // IMP-CLI-003 + kubeContext := fs.String("source-kubecontext", "", "Kubernetes context to use as source cluster (default: current context).") + + // IMP-CLI-004 + coNamespace := fs.String("co-namespace", "", "Namespace to read Compliance Operator resources from.") + coAllNamespaces := fs.Bool("co-all-namespaces", false, "Read Compliance Operator resources from all namespaces.") + + // IMP-CLI-005 + acsClusterID := fs.String("acs-cluster-id", "", "ACS cluster ID that all imported scan configs target.") + + // IMP-CLI-007 + dryRun := fs.Bool("dry-run", false, "Disable all ACS write operations.") + + // IMP-CLI-008 + reportJSON := fs.String("report-json", "", "Write structured JSON report to this file path.") + + // IMP-CLI-009 + requestTimeout := fs.Duration("request-timeout", defaultTimeout, "HTTP request timeout (e.g. 30s).") + + // IMP-CLI-010 + maxRetries := fs.Int("max-retries", defaultMaxRetries, "Maximum number of retries for ACS API calls (min 0).") + + // IMP-CLI-011 + caCertFile := fs.String("ca-cert-file", "", "Path to CA certificate file for TLS verification.") + + // IMP-CLI-012 + insecureSkipVerify := fs.Bool("insecure-skip-verify", false, "Skip TLS certificate verification (not recommended for production).") + + if err := fs.Parse(args); err != nil { + return nil, fmt.Errorf("flag parse error: %w", err) + } + + cfg := &models.Config{ + ACSEndpoint: *acsEndpoint, + TokenEnv: *acsTokenEnv, + Username: *acsUsername, + PasswordEnv: *acsPasswordEnv, + KubeContext: *kubeContext, + CONamespace: *coNamespace, + COAllNamespaces: *coAllNamespaces, + ACSClusterID: *acsClusterID, + DryRun: *dryRun, + ReportJSON: *reportJSON, + RequestTimeout: *requestTimeout, + MaxRetries: *maxRetries, + CACertFile: *caCertFile, + InsecureSkipVerify: *insecureSkipVerify, + } + + // IMP-CLI-026: default auth mode to token when not explicitly set. + switch models.AuthMode(*acsAuthMode) { + case "": + cfg.AuthMode = models.AuthModeToken + case models.AuthModeToken, models.AuthModeBasic: + cfg.AuthMode = models.AuthMode(*acsAuthMode) + default: + return nil, fmt.Errorf( + "invalid --acs-auth-mode %q: must be %q or %q (IMP-CLI-023)", + *acsAuthMode, models.AuthModeToken, models.AuthModeBasic, + ) + } + + if err := validate(cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// validate checks all cross-field invariants after flags and env vars are resolved. +func validate(cfg *models.Config) error { + // IMP-CLI-001: endpoint required. + if cfg.ACSEndpoint == "" { + return errors.New("--acs-endpoint (or ACS_ENDPOINT env var) is required (IMP-CLI-001)") + } + + // IMP-CLI-013: endpoint must be https://. + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + return fmt.Errorf("--acs-endpoint must start with https:// (got %q) (IMP-CLI-013)", cfg.ACSEndpoint) + } + + // Strip trailing slash for consistency. + cfg.ACSEndpoint = strings.TrimRight(cfg.ACSEndpoint, "/") + + // IMP-CLI-014 / IMP-CLI-025: validate auth material for the chosen mode. + switch cfg.AuthMode { + case models.AuthModeToken: + token := os.Getenv(cfg.TokenEnv) + if token == "" { + return fmt.Errorf( + "token auth mode requires a non-empty token in env var %q (IMP-CLI-014, IMP-CLI-025)\n"+ + "Fix: set %s= before running", + cfg.TokenEnv, cfg.TokenEnv, + ) + } + case models.AuthModeBasic: + if cfg.Username == "" { + return errors.New( + "basic auth mode requires --acs-username (or ACS_USERNAME env var) to be non-empty (IMP-CLI-025)\n" + + "Fix: pass --acs-username= or set ACS_USERNAME=", + ) + } + password := os.Getenv(cfg.PasswordEnv) + if password == "" { + return fmt.Errorf( + "basic auth mode requires a non-empty password in env var %q (IMP-CLI-025)\n"+ + "Fix: set %s= before running", + cfg.PasswordEnv, cfg.PasswordEnv, + ) + } + } + + // IMP-CLI-004: must have exactly one of --co-namespace or --co-all-namespaces. + if cfg.CONamespace == "" && !cfg.COAllNamespaces { + return errors.New( + "one of --co-namespace or --co-all-namespaces is required (IMP-CLI-004)", + ) + } + if cfg.CONamespace != "" && cfg.COAllNamespaces { + return errors.New( + "--co-namespace and --co-all-namespaces are mutually exclusive (IMP-CLI-004)", + ) + } + + // IMP-CLI-005: cluster ID required. + if cfg.ACSClusterID == "" { + return errors.New("--acs-cluster-id is required (IMP-CLI-005)") + } + + // IMP-CLI-010: max retries must be non-negative. + if cfg.MaxRetries < 0 { + return fmt.Errorf("--max-retries must be >= 0, got %d (IMP-CLI-010)", cfg.MaxRetries) + } + + return nil +} diff --git a/scripts/compliance-operator-importer/internal/config/config_test.go b/scripts/compliance-operator-importer/internal/config/config_test.go new file mode 100644 index 0000000000000..99aea9caa2ff1 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config_test.go @@ -0,0 +1,304 @@ +package config + +import ( + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// minimalValidArgs returns a set of args that always satisfies all required +// flags when token env var is pre-set by the caller. +func minimalValidArgs(overrides ...string) []string { + base := []string{ + "--acs-endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--acs-cluster-id", "cluster-abc", + } + return append(base, overrides...) +} + +// setenv is a test helper that sets an env var and returns a cleanup func. +func setenv(t *testing.T, key, value string) { + t.Helper() + t.Setenv(key, value) +} + +// TestIMP_CLI_001_EndpointRequired verifies that omitting --acs-endpoint +// (with no ACS_ENDPOINT env var) produces an error. +func TestIMP_CLI_001_EndpointRequired(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate([]string{ + "--co-namespace", "openshift-compliance", + "--acs-cluster-id", "cluster-abc", + }) + if err == nil { + t.Fatal("expected error for missing --acs-endpoint, got nil") + } +} + +// TestIMP_CLI_001_EndpointFromEnv verifies that ACS_ENDPOINT env var is +// accepted in place of --acs-endpoint. +func TestIMP_CLI_001_EndpointFromEnv(t *testing.T) { + setenv(t, "ACS_ENDPOINT", "https://central.example.com") + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate([]string{ + "--co-namespace", "openshift-compliance", + "--acs-cluster-id", "cluster-abc", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from env, got %q", cfg.ACSEndpoint) + } +} + +// TestIMP_CLI_013_HTTPSEnforced verifies that non-https endpoints are rejected. +func TestIMP_CLI_013_HTTPSEnforced(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cases := []string{ + "http://central.example.com", + "central.example.com", + "ftp://central.example.com", + } + for _, endpoint := range cases { + t.Run(endpoint, func(t *testing.T) { + _, err := ParseAndValidate([]string{ + "--acs-endpoint", endpoint, + "--co-namespace", "openshift-compliance", + "--acs-cluster-id", "cluster-abc", + }) + if err == nil { + t.Fatalf("expected error for non-https endpoint %q, got nil", endpoint) + } + }) + } +} + +// TestIMP_CLI_023_AuthModeEnum verifies that invalid auth modes are rejected. +func TestIMP_CLI_023_AuthModeEnum(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "oauth")) + if err == nil { + t.Fatal("expected error for invalid auth mode 'oauth', got nil") + } +} + +// TestIMP_CLI_023_AuthModeTokenAccepted verifies that "token" is accepted. +func TestIMP_CLI_023_AuthModeTokenAccepted(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode, got %q", cfg.AuthMode) + } +} + +// TestIMP_CLI_023_AuthModeBasicAccepted verifies that "basic" is accepted. +func TestIMP_CLI_023_AuthModeBasicAccepted(t *testing.T) { + setenv(t, defaultPasswordEnv, "secret") + + cfg, err := ParseAndValidate(minimalValidArgs( + "--acs-auth-mode", "basic", + "--acs-username", "admin", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeBasic { + t.Errorf("expected basic mode, got %q", cfg.AuthMode) + } +} + +// TestIMP_CLI_024_BasicModeFields verifies that basic mode reads username and +// password from the expected sources. +func TestIMP_CLI_024_BasicModeFields(t *testing.T) { + setenv(t, "ACS_PASSWORD", "s3cr3t") + setenv(t, "ACS_USERNAME", "alice") + + cfg, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "basic")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "alice" { + t.Errorf("expected username alice, got %q", cfg.Username) + } + if cfg.PasswordEnv != defaultPasswordEnv { + t.Errorf("expected password env %q, got %q", defaultPasswordEnv, cfg.PasswordEnv) + } +} + +// TestIMP_CLI_025_AmbiguousAuthMissingPassword verifies that basic mode without +// a password is rejected. +func TestIMP_CLI_025_AmbiguousAuthMissingPassword(t *testing.T) { + // Ensure the password env var is absent. + t.Setenv("ACS_PASSWORD", "") + + _, err := ParseAndValidate(minimalValidArgs( + "--acs-auth-mode", "basic", + "--acs-username", "admin", + )) + if err == nil { + t.Fatal("expected error for basic mode without password, got nil") + } +} + +// TestIMP_CLI_025_AmbiguousAuthMissingUsername verifies that basic mode without +// a username is rejected. +func TestIMP_CLI_025_AmbiguousAuthMissingUsername(t *testing.T) { + setenv(t, "ACS_PASSWORD", "secret") + // Ensure username env is absent. + t.Setenv("ACS_USERNAME", "") + + _, err := ParseAndValidate(minimalValidArgs( + "--acs-auth-mode", "basic", + // No --acs-username + )) + if err == nil { + t.Fatal("expected error for basic mode without username, got nil") + } +} + +// TestIMP_CLI_025_AmbiguousAuthMissingToken verifies that token mode without a +// token is rejected. +func TestIMP_CLI_025_AmbiguousAuthMissingToken(t *testing.T) { + t.Setenv(defaultTokenEnv, "") + + _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) + if err == nil { + t.Fatal("expected error for token mode without token, got nil") + } +} + +// TestIMP_CLI_026_DefaultAuthModeIsToken verifies that when --acs-auth-mode is +// not set, the importer defaults to token mode. +func TestIMP_CLI_026_DefaultAuthModeIsToken(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected default auth mode to be %q, got %q", models.AuthModeToken, cfg.AuthMode) + } +} + +// TestDefaultTimeout verifies the default request timeout is 30s. +func TestDefaultTimeout(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.RequestTimeout != 30*time.Second { + t.Errorf("expected 30s timeout, got %v", cfg.RequestTimeout) + } +} + +// TestDefaultMaxRetries verifies the default max retries is 5. +func TestDefaultMaxRetries(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.MaxRetries != defaultMaxRetries { + t.Errorf("expected max retries %d, got %d", defaultMaxRetries, cfg.MaxRetries) + } +} + +// TestMissingACSClusterID verifies that omitting --acs-cluster-id is an error. +func TestMissingACSClusterID(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate([]string{ + "--acs-endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + // No --acs-cluster-id + }) + if err == nil { + t.Fatal("expected error for missing --acs-cluster-id, got nil") + } +} + +// TestMissingNamespaceScope verifies that providing neither --co-namespace nor +// --co-all-namespaces is an error. +func TestMissingNamespaceScope(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate([]string{ + "--acs-endpoint", "https://central.example.com", + "--acs-cluster-id", "cluster-abc", + }) + if err == nil { + t.Fatal("expected error for missing namespace scope, got nil") + } +} + +// TestMutuallyExclusiveNamespaceFlags verifies that --co-namespace and +// --co-all-namespaces are mutually exclusive. +func TestMutuallyExclusiveNamespaceFlags(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate(minimalValidArgs("--co-all-namespaces")) + if err == nil { + t.Fatal("expected error for both --co-namespace and --co-all-namespaces, got nil") + } +} + +// TestAllNamespacesFlag verifies that --co-all-namespaces works without --co-namespace. +func TestAllNamespacesFlag(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate([]string{ + "--acs-endpoint", "https://central.example.com", + "--acs-cluster-id", "cluster-abc", + "--co-all-namespaces", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.COAllNamespaces { + t.Error("expected COAllNamespaces=true") + } + if cfg.CONamespace != "" { + t.Errorf("expected empty CONamespace, got %q", cfg.CONamespace) + } +} + +// TestNegativeMaxRetriesRejected verifies that --max-retries < 0 is rejected. +func TestNegativeMaxRetriesRejected(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + _, err := ParseAndValidate(minimalValidArgs("--max-retries", "-1")) + if err == nil { + t.Fatal("expected error for negative max-retries, got nil") + } +} + +// TestTrailingSlashStripped verifies that a trailing slash on the endpoint is +// stripped for consistency. +func TestTrailingSlashStripped(t *testing.T) { + setenv(t, defaultTokenEnv, "tok") + + cfg, err := ParseAndValidate(minimalValidArgs( + "--acs-endpoint", "https://central.example.com/", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected trailing slash stripped, got %q", cfg.ACSEndpoint) + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/mapping.go b/scripts/compliance-operator-importer/internal/mapping/mapping.go new file mode 100644 index 0000000000000..2c16ada02cefb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/mapping.go @@ -0,0 +1,114 @@ +package mapping + +import ( + "fmt" + "slices" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// MappingResult is returned per ScanSettingBinding. +// Exactly one of Payload or Problem will be non-nil. +type MappingResult struct { + // Payload is non-nil on success and contains the ACS create payload. + Payload *models.ACSCreatePayload + // Problem is non-nil when the binding should be skipped, with details about why. + Problem *models.Problem +} + +// MapBinding converts one ScanSettingBinding and its referenced ScanSetting into an +// ACS create payload, or returns a Problem if the binding should be skipped. +// +// Rules applied: +// - IMP-MAP-001: scanName = binding.Name; profiles = sorted+deduped list of profile names. +// - IMP-MAP-002: missing profile kind defaults to "Profile" (ProfileRef.Kind is "" => Profile). +// - IMP-MAP-003: oneTimeScan=false when a schedule is present. +// - IMP-MAP-004: scanSchedule set from ConvertCronToACSSchedule. +// - IMP-MAP-005: description contains "Imported from CO ScanSettingBinding /". +// - IMP-MAP-006: description includes the ScanSetting name. +// - IMP-MAP-007: clusters = [cfg.ACSClusterID]. +// - IMP-MAP-008..011: nil ScanSetting => Problem{category:mapping, skipped:true}. +// - IMP-MAP-012..015: invalid cron => Problem{category:mapping, skipped:true}. +func MapBinding(binding cofetch.ScanSettingBinding, ss *cofetch.ScanSetting, cfg *models.Config) MappingResult { + ref := fmt.Sprintf("%s/%s", binding.Namespace, binding.Name) + + // IMP-MAP-008, IMP-MAP-009, IMP-MAP-010: missing ScanSetting. + if ss == nil { + return MappingResult{ + Problem: &models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryMapping, + ResourceRef: ref, + Description: fmt.Sprintf( + "ScanSettingBinding %q references ScanSetting %q which could not be found", + ref, binding.ScanSettingName, + ), + FixHint: fmt.Sprintf( + "Ensure ScanSetting %q exists in namespace %q and is readable by the importer. "+ + "Verify with: kubectl get scansetting %s -n %s", + binding.ScanSettingName, binding.Namespace, + binding.ScanSettingName, binding.Namespace, + ), + Skipped: true, + }, + } + } + + // IMP-MAP-004, IMP-MAP-012..015: convert cron schedule. + schedule, err := ConvertCronToACSSchedule(ss.Schedule) + if err != nil { + return MappingResult{ + Problem: &models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryMapping, + ResourceRef: ref, + Description: fmt.Sprintf( + "schedule conversion failed for ScanSettingBinding %q (ScanSetting %q, schedule %q): %v", + ref, ss.Name, ss.Schedule, err, + ), + FixHint: fmt.Sprintf( + "Update ScanSetting %q to use a supported 5-field cron expression, for example: "+ + "\"0 2 * * *\" (daily at 02:00), \"0 2 * * 0\" (weekly on Sunday), "+ + "\"0 2 1 * *\" (monthly on the 1st). "+ + "Step and range notation in the cron expression are not supported.", + ss.Name, + ), + Skipped: true, + }, + } + } + + // IMP-MAP-001, IMP-MAP-002: collect profiles, dedup, sort. + // ProfileRef.Kind being empty is equivalent to "Profile" (IMP-MAP-002). + // Only the profile name is used in the ACS payload; kind determines lookup but + // both Profile and TailoredProfile names go into the same ACS profiles list. + profileSet := make(map[string]struct{}, len(binding.Profiles)) + for _, p := range binding.Profiles { + profileSet[p.Name] = struct{}{} + } + profiles := make([]string, 0, len(profileSet)) + for name := range profileSet { + profiles = append(profiles, name) + } + slices.Sort(profiles) // IMP-MAP-001: deterministic sorted order + + // IMP-MAP-005, IMP-MAP-006: build description. + description := fmt.Sprintf( + "Imported from CO ScanSettingBinding %s/%s (ScanSetting: %s)", + binding.Namespace, binding.Name, ss.Name, + ) + + return MappingResult{ + Payload: &models.ACSCreatePayload{ + ScanName: binding.Name, // IMP-MAP-001 + ScanConfig: models.ACSBaseScanConfig{ + OneTimeScan: false, // IMP-MAP-003 + Profiles: profiles, // IMP-MAP-001 + ScanSchedule: schedule, // IMP-MAP-004 + Description: description, // IMP-MAP-005, IMP-MAP-006 + }, + Clusters: []string{cfg.ACSClusterID}, // IMP-MAP-007 + }, + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/mapping_test.go b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go new file mode 100644 index 0000000000000..64136c337bf71 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go @@ -0,0 +1,307 @@ +package mapping + +import ( + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// baseBinding returns a minimal valid ScanSettingBinding for tests. +func baseBinding() cofetch.ScanSettingBinding { + return cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: "cis-weekly", + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{ + {Name: "ocp4-cis-node", Kind: "Profile"}, + {Name: "ocp4-cis-master", Kind: "Profile"}, + {Name: "my-tailored-profile", Kind: "TailoredProfile"}, + }, + } +} + +// baseScanSetting returns a minimal valid ScanSetting for tests. +func baseScanSetting() *cofetch.ScanSetting { + return &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "default-auto-apply", + Schedule: "0 0 * * *", + } +} + +// baseConfig returns a minimal Config for tests. +func baseConfig() *models.Config { + return &models.Config{ + ACSClusterID: "cluster-a", + } +} + +// TestIMP_MAP_001_ScanName verifies the ACS payload scanName equals the binding name. +func TestIMP_MAP_001_ScanName(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload == nil { + t.Fatal("expected non-nil payload") + } + if result.Payload.ScanName != "cis-weekly" { + t.Errorf("ScanName: want %q, got %q", "cis-weekly", result.Payload.ScanName) + } +} + +// TestIMP_MAP_001_ProfilesSortedDeduped verifies profiles are sorted and deduplicated. +func TestIMP_MAP_001_ProfilesSortedDeduped(t *testing.T) { + binding := baseBinding() + // Add a duplicate entry. + binding.Profiles = append(binding.Profiles, cofetch.ProfileRef{Name: "ocp4-cis-node", Kind: "Profile"}) + + result := MapBinding(binding, baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + want := []string{"my-tailored-profile", "ocp4-cis-master", "ocp4-cis-node"} + got := result.Payload.ScanConfig.Profiles + if len(got) != len(want) { + t.Fatalf("Profiles len: want %d, got %d: %v", len(want), len(got), got) + } + for i, w := range want { + if got[i] != w { + t.Errorf("Profiles[%d]: want %q, got %q", i, w, got[i]) + } + } +} + +// TestIMP_MAP_002_MissingKindDefaultsToProfile verifies that a ProfileRef with empty +// Kind is accepted and the profile name is included in ACS profiles (IMP-MAP-002). +// The kind=="" semantics mean "treat as Profile" — no lookup difference for the importer. +func TestIMP_MAP_002_MissingKindDefaultsToProfile(t *testing.T) { + binding := baseBinding() + binding.Profiles = []cofetch.ProfileRef{ + {Name: "custom-x"}, // Kind is empty => defaults to "Profile" + } + + result := MapBinding(binding, baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if len(result.Payload.ScanConfig.Profiles) != 1 { + t.Fatalf("expected 1 profile, got %v", result.Payload.ScanConfig.Profiles) + } + if result.Payload.ScanConfig.Profiles[0] != "custom-x" { + t.Errorf("profile name: want %q, got %q", "custom-x", result.Payload.ScanConfig.Profiles[0]) + } +} + +// TestIMP_MAP_003_OneTimeScanFalseWhenScheduleSet verifies oneTimeScan is false +// when the ScanSetting has a cron schedule. +func TestIMP_MAP_003_OneTimeScanFalseWhenScheduleSet(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload.ScanConfig.OneTimeScan { + t.Error("OneTimeScan: want false when schedule is set") + } +} + +// TestIMP_MAP_004_ScanSchedulePresentWhenScheduleSet verifies scanSchedule is non-nil +// when the ScanSetting has a cron schedule. +func TestIMP_MAP_004_ScanSchedulePresentWhenScheduleSet(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload.ScanConfig.ScanSchedule == nil { + t.Error("ScanSchedule: want non-nil when schedule is set") + } +} + +// TestIMP_MAP_005_DescriptionContainsBindingRef verifies the description contains +// "Imported from CO ScanSettingBinding /". +func TestIMP_MAP_005_DescriptionContainsBindingRef(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + want := "Imported from CO ScanSettingBinding openshift-compliance/cis-weekly" + if !strings.Contains(result.Payload.ScanConfig.Description, want) { + t.Errorf("Description: want it to contain %q, got %q", want, result.Payload.ScanConfig.Description) + } +} + +// TestIMP_MAP_006_DescriptionIncludesScanSettingName verifies the description +// includes a reference to the ScanSetting name. +func TestIMP_MAP_006_DescriptionIncludesScanSettingName(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if !strings.Contains(result.Payload.ScanConfig.Description, "default-auto-apply") { + t.Errorf("Description: want ScanSetting name %q included, got %q", + "default-auto-apply", result.Payload.ScanConfig.Description) + } +} + +// TestIMP_MAP_007_ClustersContainsACSClusterID verifies clusters contains the +// configured ACS cluster ID. +func TestIMP_MAP_007_ClustersContainsACSClusterID(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if len(result.Payload.Clusters) != 1 { + t.Fatalf("Clusters: want 1 entry, got %v", result.Payload.Clusters) + } + if result.Payload.Clusters[0] != "cluster-a" { + t.Errorf("Clusters[0]: want %q, got %q", "cluster-a", result.Payload.Clusters[0]) + } +} + +// TestIMP_MAP_008_MissingScanSettingSkipsBinding verifies that a nil ScanSetting +// results in a MappingResult with nil Payload and non-nil Problem (IMP-MAP-008). +func TestIMP_MAP_008_MissingScanSettingSkipsBinding(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Payload != nil { + t.Errorf("Payload: want nil when ScanSetting is missing, got %+v", result.Payload) + } + if result.Problem == nil { + t.Fatal("Problem: want non-nil when ScanSetting is missing") + } +} + +// TestIMP_MAP_008_MissingScanSettingCategoryMapping verifies the problem category +// is "mapping" for a missing ScanSetting (IMP-MAP-008). +func TestIMP_MAP_008_MissingScanSettingCategoryMapping(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.Category != models.CategoryMapping { + t.Errorf("Problem.Category: want %q, got %q", models.CategoryMapping, result.Problem.Category) + } +} + +// TestIMP_MAP_009_MissingScanSettingProblemsEntry verifies the problem entry has +// a populated ResourceRef (IMP-MAP-009). +func TestIMP_MAP_009_MissingScanSettingProblemsEntry(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.ResourceRef == "" { + t.Error("Problem.ResourceRef: want non-empty") + } +} + +// TestIMP_MAP_010_MissingScanSettingFixHint verifies the problem entry has a +// non-empty fix hint (IMP-MAP-010). +func TestIMP_MAP_010_MissingScanSettingFixHint(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.FixHint == "" { + t.Error("Problem.FixHint: want non-empty fix hint for missing ScanSetting") + } +} + +// TestIMP_MAP_011_OtherValidBindingsStillProcessed verifies that a missing ScanSetting +// only affects that binding; independent MapBinding calls for valid bindings succeed (IMP-MAP-011). +func TestIMP_MAP_011_OtherValidBindingsStillProcessed(t *testing.T) { + // Broken binding (nil ScanSetting). + broken := MapBinding(baseBinding(), nil, baseConfig()) + if broken.Problem == nil { + t.Fatal("broken binding: want Problem set") + } + + // Valid binding processed independently and must succeed. + validBinding := cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: "another-binding", + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{{Name: "ocp4-cis", Kind: "Profile"}}, + } + valid := MapBinding(validBinding, baseScanSetting(), baseConfig()) + if valid.Problem != nil { + t.Fatalf("valid binding: unexpected problem: %+v", valid.Problem) + } + if valid.Payload == nil { + t.Fatal("valid binding: expected non-nil payload") + } +} + +// TestIMP_MAP_012_InvalidCronSkipsBinding verifies that an invalid cron expression +// causes the binding to be skipped (Payload=nil, Problem set, Skipped=true) (IMP-MAP-012). +func TestIMP_MAP_012_InvalidCronSkipsBinding(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Payload != nil { + t.Errorf("Payload: want nil for invalid cron, got %+v", result.Payload) + } + if result.Problem == nil { + t.Fatal("Problem: want non-nil for invalid cron") + } + if !result.Problem.Skipped { + t.Error("Problem.Skipped: want true") + } +} + +// TestIMP_MAP_013_InvalidCronProblemCategoryMapping verifies the problem category +// is "mapping" for an invalid schedule (IMP-MAP-013). +func TestIMP_MAP_013_InvalidCronProblemCategoryMapping(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.Category != models.CategoryMapping { + t.Errorf("Problem.Category: want %q, got %q", models.CategoryMapping, result.Problem.Category) + } +} + +// TestIMP_MAP_014_InvalidCronDescriptionMentionsSchedule verifies the problem +// description mentions schedule conversion failure (IMP-MAP-014). +func TestIMP_MAP_014_InvalidCronDescriptionMentionsSchedule(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + desc := strings.ToLower(result.Problem.Description) + if !strings.Contains(desc, "schedule") { + t.Errorf("Problem.Description: want it to mention %q, got %q", "schedule", result.Problem.Description) + } +} + +// TestIMP_MAP_015_InvalidCronFixHintMentionsCron verifies the problem fix hint +// suggests using a valid cron expression (IMP-MAP-015). +func TestIMP_MAP_015_InvalidCronFixHintMentionsCron(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + hint := strings.ToLower(result.Problem.FixHint) + if !strings.Contains(hint, "cron") { + t.Errorf("Problem.FixHint: want it to mention %q, got %q", "cron", result.Problem.FixHint) + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule.go b/scripts/compliance-operator-importer/internal/mapping/schedule.go new file mode 100644 index 0000000000000..58a2e5211f312 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/schedule.go @@ -0,0 +1,126 @@ +package mapping + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// ConvertCronToACSSchedule converts a standard 5-field cron expression to an +// ACS Schedule object. +// +// Supported cases: +// +// "minute hour * * *" -> DAILY, hour=H, minute=M +// "minute hour * * dayOfWeek" -> WEEKLY, hour=H, minute=M, day=DOW +// "minute hour dayOfMonth * *" -> MONTHLY, hour=H, minute=M, days=[DOM] +// +// Returns an error for: +// - non-5-field expressions +// - step notation (*/n or n/m) +// - range notation (n-m) +// - both day-of-month and day-of-week set (ambiguous) +// - out-of-range values +// - any other unsupported syntax +// +// The error message is suitable for inclusion in a Problem.FixHint. +func ConvertCronToACSSchedule(cron string) (*models.ACSSchedule, error) { + cron = strings.TrimSpace(cron) + if cron == "" { + return nil, errors.New("cron expression is empty; provide a valid 5-field cron expression (e.g. \"0 2 * * *\" for daily at 02:00)") + } + + fields := strings.Fields(cron) + if len(fields) != 5 { + return nil, fmt.Errorf("cron expression %q has %d field(s); a standard cron requires exactly 5 fields: minute hour day-of-month month day-of-week", cron, len(fields)) + } + + minute, hour, dom, month, dow := fields[0], fields[1], fields[2], fields[3], fields[4] + + // Reject unsupported syntax in any field. + for _, f := range fields { + if strings.Contains(f, "/") { + return nil, fmt.Errorf("step notation %q is not supported; use a simple numeric cron expression (e.g. \"0 2 * * *\")", f) + } + if strings.Contains(f, "-") { + return nil, fmt.Errorf("range notation %q is not supported; use a simple numeric cron expression (e.g. \"0 2 * * *\")", f) + } + } + + // Month must always be wildcard; we don't support specific-month scheduling. + if month != "*" { + return nil, fmt.Errorf("specific month field %q is not supported; set month to \"*\" and use day-of-month or day-of-week instead", month) + } + + // Parse minute. + minVal, err := parseField(minute, "minute", 0, 59) + if err != nil { + return nil, err + } + + // Parse hour. + hourVal, err := parseField(hour, "hour", 0, 23) + if err != nil { + return nil, err + } + + // Determine schedule type by which positional fields are wildcards. + domIsWild := dom == "*" + dowIsWild := dow == "*" + + switch { + case !domIsWild && !dowIsWild: + // Both set — ambiguous. + return nil, fmt.Errorf("cron expression %q sets both day-of-month (%s) and day-of-week (%s), which is ambiguous; set exactly one to \"*\"", cron, dom, dow) + + case domIsWild && dowIsWild: + // DAILY: "minute hour * * *" + return &models.ACSSchedule{ + IntervalType: "DAILY", + Hour: hourVal, + Minute: minVal, + }, nil + + case domIsWild && !dowIsWild: + // WEEKLY: "minute hour * * dayOfWeek" + dowVal, err := parseField(dow, "day-of-week", 0, 6) + if err != nil { + return nil, err + } + return &models.ACSSchedule{ + IntervalType: "WEEKLY", + Hour: hourVal, + Minute: minVal, + Weekly: &models.ACSWeekly{Day: dowVal}, + }, nil + + default: + // MONTHLY: "minute hour dayOfMonth * *" + domVal, err := parseField(dom, "day-of-month", 1, 31) + if err != nil { + return nil, err + } + return &models.ACSSchedule{ + IntervalType: "MONTHLY", + Hour: hourVal, + Minute: minVal, + DaysOfMonth: &models.ACSDaysOfMonth{Days: []int32{domVal}}, + }, nil + } +} + +// parseField parses a single cron field that must be a plain integer (no wildcards +// allowed at this point) within [min, max]. +func parseField(val, name string, min, max int) (int32, error) { + n, err := strconv.Atoi(val) + if err != nil { + return 0, fmt.Errorf("cron field %q (value %q) is not a valid integer; use a plain number or \"*\" for %s", name, val, name) + } + if n < min || n > max { + return 0, fmt.Errorf("cron field %q value %d is out of range [%d, %d]", name, n, min, max) + } + return int32(n), nil +} diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule_test.go b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go new file mode 100644 index 0000000000000..0dd28f8f17938 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go @@ -0,0 +1,225 @@ +package mapping + +import ( + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// TestIMP_MAP_003_IMP_MAP_004_DailySchedule verifies that a daily cron expression +// produces oneTimeScan=false (IMP-MAP-003) and a present DAILY schedule (IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_DailySchedule(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got == nil { + t.Fatal("expected non-nil schedule") + } + if got.IntervalType != "DAILY" { + t.Errorf("IntervalType: want DAILY, got %q", got.IntervalType) + } + if got.Hour != 0 { + t.Errorf("Hour: want 0, got %d", got.Hour) + } + if got.Minute != 0 { + t.Errorf("Minute: want 0, got %d", got.Minute) + } + if got.Weekly != nil { + t.Errorf("Weekly: want nil for DAILY, got %+v", got.Weekly) + } + if got.DaysOfMonth != nil { + t.Errorf("DaysOfMonth: want nil for DAILY, got %+v", got.DaysOfMonth) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_DailyScheduleNonMidnight verifies non-midnight daily. +func TestIMP_MAP_003_IMP_MAP_004_DailyScheduleNonMidnight(t *testing.T) { + got, err := ConvertCronToACSSchedule("30 14 * * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "DAILY" { + t.Errorf("IntervalType: want DAILY, got %q", got.IntervalType) + } + if got.Hour != 14 { + t.Errorf("Hour: want 14, got %d", got.Hour) + } + if got.Minute != 30 { + t.Errorf("Minute: want 30, got %d", got.Minute) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_WeeklySchedule verifies that a weekly cron expression +// produces a WEEKLY schedule with the correct day (IMP-MAP-003, IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_WeeklySchedule(t *testing.T) { + // "0 2 * * 0" means Sunday at 02:00 + got, err := ConvertCronToACSSchedule("0 2 * * 0") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "WEEKLY" { + t.Errorf("IntervalType: want WEEKLY, got %q", got.IntervalType) + } + if got.Hour != 2 { + t.Errorf("Hour: want 2, got %d", got.Hour) + } + if got.Minute != 0 { + t.Errorf("Minute: want 0, got %d", got.Minute) + } + if got.Weekly == nil { + t.Fatal("Weekly: want non-nil for WEEKLY schedule") + } + if got.Weekly.Day != 0 { + t.Errorf("Weekly.Day: want 0 (Sunday), got %d", got.Weekly.Day) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_WeeklyScheduleSaturday verifies Saturday weekly. +func TestIMP_MAP_003_IMP_MAP_004_WeeklyScheduleSaturday(t *testing.T) { + got, err := ConvertCronToACSSchedule("15 3 * * 6") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "WEEKLY" { + t.Errorf("IntervalType: want WEEKLY, got %q", got.IntervalType) + } + if got.Weekly == nil { + t.Fatal("Weekly: want non-nil") + } + if got.Weekly.Day != 6 { + t.Errorf("Weekly.Day: want 6 (Saturday), got %d", got.Weekly.Day) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_MonthlySchedule verifies that a monthly cron expression +// produces a MONTHLY schedule with the correct day-of-month (IMP-MAP-003, IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_MonthlySchedule(t *testing.T) { + // "30 6 1 * *" means 1st of every month at 06:30 + got, err := ConvertCronToACSSchedule("30 6 1 * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "MONTHLY" { + t.Errorf("IntervalType: want MONTHLY, got %q", got.IntervalType) + } + if got.Hour != 6 { + t.Errorf("Hour: want 6, got %d", got.Hour) + } + if got.Minute != 30 { + t.Errorf("Minute: want 30, got %d", got.Minute) + } + if got.DaysOfMonth == nil { + t.Fatal("DaysOfMonth: want non-nil for MONTHLY schedule") + } + if len(got.DaysOfMonth.Days) != 1 || got.DaysOfMonth.Days[0] != 1 { + t.Errorf("DaysOfMonth.Days: want [1], got %v", got.DaysOfMonth.Days) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronNaturalLanguage verifies that a human-readable +// schedule string is rejected with an error that mentions cron (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronNaturalLanguage(t *testing.T) { + got, err := ConvertCronToACSSchedule("every day at noon") + if err == nil { + t.Fatalf("expected error for natural-language expression, got %+v", got) + } + errStr := err.Error() + if len(errStr) == 0 { + t.Error("error message must not be empty") + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronStepNotation verifies that step notation +// (*/n) is rejected as unsupported (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronStepNotation(t *testing.T) { + got, err := ConvertCronToACSSchedule("*/6 * * * *") + if err == nil { + t.Fatalf("expected error for step notation, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronRange verifies that range notation (n-m) +// is rejected (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronRange(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * 1-5") + if err == nil { + t.Fatalf("expected error for range notation, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronEmpty verifies that an empty string +// is rejected (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronEmpty(t *testing.T) { + got, err := ConvertCronToACSSchedule("") + if err == nil { + t.Fatalf("expected error for empty cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooFewFields verifies that a cron with +// fewer than 5 fields is rejected. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooFewFields(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * *") + if err == nil { + t.Fatalf("expected error for 4-field cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooManyFields verifies that a cron with +// more than 5 fields is rejected. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooManyFields(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * * *") + if err == nil { + t.Fatalf("expected error for 6-field cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronBothDOMAndDOW verifies that a cron +// with both day-of-month and day-of-week set is rejected as ambiguous. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronBothDOMAndDOW(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 1 * 0") + if err == nil { + t.Fatalf("expected error for both DOM and DOW set, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeHour verifies out-of-range hour. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeHour(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 25 * * *") + if err == nil { + t.Fatalf("expected error for hour=25, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeMinute verifies out-of-range minute. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeMinute(t *testing.T) { + got, err := ConvertCronToACSSchedule("60 0 * * *") + if err == nil { + t.Fatalf("expected error for minute=60, got %+v", got) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_MultiValueDOMMonthly verifies that multiple days-of-month +// in a monthly cron are accepted (e.g. "0 0 1,15 * *"). +func TestIMP_MAP_003_IMP_MAP_004_MultiValueDOMMonthly(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 15 * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "MONTHLY" { + t.Errorf("IntervalType: want MONTHLY, got %q", got.IntervalType) + } + if got.DaysOfMonth == nil || len(got.DaysOfMonth.Days) == 0 { + t.Fatal("DaysOfMonth: want non-nil with days") + } + if got.DaysOfMonth.Days[0] != 15 { + t.Errorf("DaysOfMonth.Days[0]: want 15, got %d", got.DaysOfMonth.Days[0]) + } +} + +// Compile-time check that the return type matches models.ACSSchedule. +var _ *models.ACSSchedule = func() *models.ACSSchedule { + s, _ := ConvertCronToACSSchedule("0 0 * * *") + return s +}() diff --git a/scripts/compliance-operator-importer/internal/models/models.go b/scripts/compliance-operator-importer/internal/models/models.go new file mode 100644 index 0000000000000..8105d2972344e --- /dev/null +++ b/scripts/compliance-operator-importer/internal/models/models.go @@ -0,0 +1,166 @@ +package models + +import ( + "context" + "time" +) + +// AuthMode controls which ACS authentication scheme the importer uses. +type AuthMode string + +const ( + AuthModeToken AuthMode = "token" + AuthModeBasic AuthMode = "basic" +) + +// Config holds all resolved configuration for a single importer run. +type Config struct { + ACSEndpoint string + AuthMode AuthMode + TokenEnv string // env var name, default "ACS_API_TOKEN" + Username string + PasswordEnv string // env var name, default "ACS_PASSWORD" + KubeContext string // empty = use current context + CONamespace string // empty when COAllNamespaces=true + COAllNamespaces bool + ACSClusterID string + DryRun bool + ReportJSON string + RequestTimeout time.Duration + MaxRetries int + CACertFile string + InsecureSkipVerify bool +} + +// Severity classifies how severe a Problem is. +type Severity string + +const ( + SeverityError Severity = "error" + SeverityWarning Severity = "warning" +) + +// Category classifies what kind of issue a Problem represents. +type Category string + +const ( + CategoryInput Category = "input" + CategoryMapping Category = "mapping" + CategoryConflict Category = "conflict" + CategoryAuth Category = "auth" + CategoryAPI Category = "api" + CategoryRetry Category = "retry" + CategoryValidation Category = "validation" +) + +// Problem is a structured issue entry recorded during an importer run. +type Problem struct { + Severity Severity `json:"severity"` + Category Category `json:"category"` + ResourceRef string `json:"resourceRef"` // "namespace/name" or synthetic + Description string `json:"description"` + FixHint string `json:"fixHint"` + Skipped bool `json:"skipped"` +} + +// ACSSchedule is the schedule portion of an ACS scan configuration. +type ACSSchedule struct { + IntervalType string `json:"intervalType,omitempty"` + Hour int32 `json:"hour"` + Minute int32 `json:"minute"` + Weekly *ACSWeekly `json:"weekly,omitempty"` + DaysOfWeek *ACSDaysOfWeek `json:"daysOfWeek,omitempty"` + DaysOfMonth *ACSDaysOfMonth `json:"daysOfMonth,omitempty"` +} + +// ACSWeekly holds the day-of-week for a weekly ACS schedule. +type ACSWeekly struct { + Day int32 `json:"day"` +} + +// ACSDaysOfWeek holds multiple days for a multi-day-of-week ACS schedule. +type ACSDaysOfWeek struct { + Days []int32 `json:"days"` +} + +// ACSDaysOfMonth holds days for a monthly ACS schedule. +type ACSDaysOfMonth struct { + Days []int32 `json:"days"` +} + +// ACSBaseScanConfig is the scanConfig sub-object in an ACS create payload. +type ACSBaseScanConfig struct { + OneTimeScan bool `json:"oneTimeScan"` + Profiles []string `json:"profiles"` + ScanSchedule *ACSSchedule `json:"scanSchedule,omitempty"` + Description string `json:"description"` +} + +// ACSCreatePayload is the request body for POST /v2/compliance/scan/configurations. +// Phase 1 is create-only; no PUT is ever issued. +type ACSCreatePayload struct { + ScanName string `json:"scanName"` + ScanConfig ACSBaseScanConfig `json:"scanConfig"` + Clusters []string `json:"clusters"` +} + +// ACSConfigSummary is a single entry from the ACS list response. +type ACSConfigSummary struct { + ID string `json:"id"` + ScanName string `json:"scanName"` +} + +// ACSListResponse matches the JSON from GET /v2/compliance/scan/configurations. +type ACSListResponse struct { + Configurations []ACSConfigSummary `json:"configurations"` + TotalCount int32 `json:"totalCount"` +} + +// ReportMeta is metadata written at the top of the JSON report. +type ReportMeta struct { + Timestamp string `json:"timestamp"` + DryRun bool `json:"dryRun"` + NamespaceScope string `json:"namespaceScope"` + Mode string `json:"mode"` // always "create-only" +} + +// ReportCounts summarises action totals for the JSON report. +type ReportCounts struct { + Discovered int `json:"discovered"` + Create int `json:"create"` + Skip int `json:"skip"` + Failed int `json:"failed"` +} + +// ReportItemSource identifies the CO source for one report item. +type ReportItemSource struct { + Namespace string `json:"namespace"` + BindingName string `json:"bindingName"` + ScanSettingName string `json:"scanSettingName"` +} + +// ReportItem records the outcome for one ScanSettingBinding. +type ReportItem struct { + Source ReportItemSource `json:"source"` + Action string `json:"action"` // create|skip|fail + Reason string `json:"reason"` + Attempts int `json:"attempts"` + ACSScanConfigID string `json:"acsScanConfigId,omitempty"` + Error string `json:"error,omitempty"` +} + +// Report is the top-level structure written to --report-json. +type Report struct { + Meta ReportMeta `json:"meta"` + Counts ReportCounts `json:"counts"` + Items []ReportItem `json:"items"` + Problems []Problem `json:"problems"` +} + +// ACSClient is the interface for ACS API operations. +// Phase 1 is create-only; no PUT method is defined. +type ACSClient interface { + Preflight(ctx context.Context) error + ListScanConfigurations(ctx context.Context) ([]ACSConfigSummary, error) + CreateScanConfiguration(ctx context.Context, payload ACSCreatePayload) (string, error) +} diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight.go b/scripts/compliance-operator-importer/internal/preflight/preflight.go new file mode 100644 index 0000000000000..944ea7c28f85d --- /dev/null +++ b/scripts/compliance-operator-importer/internal/preflight/preflight.go @@ -0,0 +1,158 @@ +// Package preflight verifies that the ACS endpoint is reachable and the +// supplied credentials are accepted before any resource processing begins. +package preflight + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +const preflightPath = "/v2/compliance/scan/configurations?pagination.limit=1" + +// Run performs preflight checks in order: +// 1. Verify endpoint uses https:// (IMP-CLI-013). +// 2. Verify auth material is non-empty for the configured mode (IMP-CLI-014). +// 3. Probe GET /v2/compliance/scan/configurations?pagination.limit=1 (IMP-CLI-015). +// 4. HTTP 401/403 => fail-fast with a remediation message (IMP-CLI-016). +// +// Returns nil on success, or an error with a remediation hint on failure. +func Run(ctx context.Context, cfg *models.Config) error { + // IMP-CLI-013: endpoint must be https://. + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + return fmt.Errorf( + "preflight failed: endpoint %q must start with https:// (IMP-CLI-013)\n"+ + "Fix: use --acs-endpoint https://", + cfg.ACSEndpoint, + ) + } + + // IMP-CLI-014: auth material must be non-empty. + if err := checkAuthMaterial(cfg); err != nil { + return err + } + + client, err := buildHTTPClient(cfg) + if err != nil { + return fmt.Errorf("preflight failed: could not build HTTP client: %w", err) + } + + url := cfg.ACSEndpoint + preflightPath + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("preflight failed: could not build request: %w", err) + } + + addAuthHeader(req, cfg) + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf( + "preflight failed: could not reach ACS at %s: %w\n"+ + "Fix: check network connectivity and that --acs-endpoint is correct", + cfg.ACSEndpoint, err, + ) + } + defer resp.Body.Close() + + // IMP-CLI-015: success only on HTTP 200. + // IMP-CLI-016: 401/403 => fail-fast with remediation message. + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusUnauthorized: + return errors.New( + "preflight failed: ACS returned 401 Unauthorized (IMP-CLI-016)\n" + + "Fix: verify your ACS API token or credentials are correct and not expired", + ) + case http.StatusForbidden: + return errors.New( + "preflight failed: ACS returned 403 Forbidden (IMP-CLI-016)\n" + + "Fix: ensure your ACS user has the 'Compliance' permission set with at least read access", + ) + default: + return fmt.Errorf( + "preflight failed: ACS returned unexpected status %d from %s\n"+ + "Fix: verify the ACS endpoint is correct and the service is healthy", + resp.StatusCode, url, + ) + } +} + +// checkAuthMaterial validates that the auth credentials for the configured +// mode are non-empty (IMP-CLI-014). +func checkAuthMaterial(cfg *models.Config) error { + switch cfg.AuthMode { + case models.AuthModeToken: + token := os.Getenv(cfg.TokenEnv) + if token == "" { + return fmt.Errorf( + "preflight failed: token auth mode requires a non-empty token in env var %q (IMP-CLI-014)\n"+ + "Fix: set %s=", + cfg.TokenEnv, cfg.TokenEnv, + ) + } + case models.AuthModeBasic: + if cfg.Username == "" { + return errors.New( + "preflight failed: basic auth mode requires a non-empty username (IMP-CLI-014)\n" + + "Fix: pass --acs-username= or set ACS_USERNAME=", + ) + } + password := os.Getenv(cfg.PasswordEnv) + if password == "" { + return fmt.Errorf( + "preflight failed: basic auth mode requires a non-empty password in env var %q (IMP-CLI-014)\n"+ + "Fix: set %s=", + cfg.PasswordEnv, cfg.PasswordEnv, + ) + } + } + return nil +} + +// buildHTTPClient constructs an HTTP client with the TLS settings from cfg. +func buildHTTPClient(cfg *models.Config) (*http.Client, error) { + tlsCfg := &tls.Config{ + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec // controlled by explicit flag + } + + if cfg.CACertFile != "" { + pem, err := os.ReadFile(cfg.CACertFile) + if err != nil { + return nil, fmt.Errorf("reading CA cert file %q: %w", cfg.CACertFile, err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("CA cert file %q contains no valid PEM certificates", cfg.CACertFile) + } + tlsCfg.RootCAs = pool + } + + transport := &http.Transport{TLSClientConfig: tlsCfg} + return &http.Client{ + Transport: transport, + Timeout: cfg.RequestTimeout, + }, nil +} + +// addAuthHeader sets the Authorization header on req according to cfg.AuthMode. +func addAuthHeader(req *http.Request, cfg *models.Config) { + switch cfg.AuthMode { + case models.AuthModeToken: + token := os.Getenv(cfg.TokenEnv) + req.Header.Set("Authorization", "Bearer "+token) + case models.AuthModeBasic: + password := os.Getenv(cfg.PasswordEnv) + creds := base64.StdEncoding.EncodeToString([]byte(cfg.Username + ":" + password)) + req.Header.Set("Authorization", "Basic "+creds) + } +} diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go new file mode 100644 index 0000000000000..5dbac5803c0d4 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go @@ -0,0 +1,210 @@ +package preflight + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// minimalTokenConfig returns a Config wired to the given server URL and token. +func minimalTokenConfig(serverURL, token string) *models.Config { + return &models.Config{ + ACSEndpoint: serverURL, + AuthMode: models.AuthModeToken, + TokenEnv: "TEST_ACS_TOKEN", + RequestTimeout: 5 * time.Second, + } +} + +// TestIMP_CLI_015_200ResponseNoError verifies that a 200 response from the +// preflight probe returns nil (IMP-CLI-015). +func TestIMP_CLI_015_200ResponseNoError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/v2/compliance/scan/configurations") { + http.NotFound(w, r) + return + } + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("TEST_ACS_TOKEN", "validtoken") + + cfg := minimalTokenConfig(srv.URL, "validtoken") + // Use the test server's TLS client directly by trusting its certificate. + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err != nil { + t.Fatalf("expected nil error for 200 response, got: %v", err) + } +} + +// TestIMP_CLI_016_401ReturnsRemediationError verifies that a 401 response +// causes a fail-fast error with remediation text (IMP-CLI-016). +func TestIMP_CLI_016_401ReturnsRemediationError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer srv.Close() + + t.Setenv("TEST_ACS_TOKEN", "badtoken") + + cfg := minimalTokenConfig(srv.URL, "badtoken") + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for 401 response, got nil") + } + msg := err.Error() + if !strings.Contains(strings.ToLower(msg), "unauthorized") && !strings.Contains(strings.ToLower(msg), "401") { + t.Errorf("expected 'unauthorized' or '401' in error message, got: %q", msg) + } + // Must include a remediation hint. + if !strings.Contains(strings.ToLower(msg), "fix:") { + t.Errorf("expected remediation hint (Fix:) in error message, got: %q", msg) + } +} + +// TestIMP_CLI_016_403ReturnsRemediationError verifies that a 403 response +// causes a fail-fast error with remediation text (IMP-CLI-016). +func TestIMP_CLI_016_403ReturnsRemediationError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + })) + defer srv.Close() + + t.Setenv("TEST_ACS_TOKEN", "insufficienttoken") + + cfg := minimalTokenConfig(srv.URL, "insufficienttoken") + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for 403 response, got nil") + } + msg := err.Error() + if !strings.Contains(strings.ToLower(msg), "forbidden") && !strings.Contains(strings.ToLower(msg), "403") { + t.Errorf("expected 'forbidden' or '403' in error message, got: %q", msg) + } + if !strings.Contains(strings.ToLower(msg), "fix:") { + t.Errorf("expected remediation hint (Fix:) in error message, got: %q", msg) + } +} + +// TestIMP_CLI_013_NonHTTPSEndpointRejected verifies that a non-https endpoint +// is rejected before any network call is made (IMP-CLI-013). +func TestIMP_CLI_013_NonHTTPSEndpointRejected(t *testing.T) { + t.Setenv("TEST_ACS_TOKEN", "tok") + + cfg := &models.Config{ + ACSEndpoint: "http://central.example.com", + AuthMode: models.AuthModeToken, + TokenEnv: "TEST_ACS_TOKEN", + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for non-https endpoint, got nil") + } + if !strings.Contains(err.Error(), "https://") { + t.Errorf("expected error to mention https://, got: %q", err.Error()) + } +} + +// TestIMP_CLI_014_EmptyTokenRejected verifies that an empty token in token +// mode is caught before any HTTP request (IMP-CLI-014). +func TestIMP_CLI_014_EmptyTokenRejected(t *testing.T) { + // Do not set the token env var. + t.Setenv("TEST_ACS_TOKEN", "") + + cfg := &models.Config{ + ACSEndpoint: "https://central.example.com", + AuthMode: models.AuthModeToken, + TokenEnv: "TEST_ACS_TOKEN", + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for empty token, got nil") + } + if !strings.Contains(strings.ToLower(err.Error()), "token") { + t.Errorf("expected error message to mention token, got: %q", err.Error()) + } +} + +// TestIMP_CLI_014_BasicModeEmptyPasswordRejected verifies that basic mode with +// an empty password is rejected before any HTTP request (IMP-CLI-014). +func TestIMP_CLI_014_BasicModeEmptyPasswordRejected(t *testing.T) { + t.Setenv("ACS_PASSWORD", "") + + cfg := &models.Config{ + ACSEndpoint: "https://central.example.com", + AuthMode: models.AuthModeBasic, + Username: "admin", + PasswordEnv: "ACS_PASSWORD", + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for empty password in basic mode, got nil") + } +} + +// TestIMP_CLI_015_ProbesCorrectPath verifies that the preflight probe sends +// a request to the expected API path (IMP-CLI-015). +func TestIMP_CLI_015_ProbesCorrectPath(t *testing.T) { + var capturedPath string + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedPath = r.URL.RequestURI() + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("TEST_ACS_TOKEN", "tok") + + cfg := minimalTokenConfig(srv.URL, "tok") + cfg.InsecureSkipVerify = true + + if err := Run(context.Background(), cfg); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectedPath := "/v2/compliance/scan/configurations?pagination.limit=1" + if capturedPath != expectedPath { + t.Errorf("expected probe path %q, got %q", expectedPath, capturedPath) + } +} + +// TestIMP_CLI_015_BearerTokenSentInHeader verifies that the Authorization +// header is set to "Bearer " in token mode. +func TestIMP_CLI_015_BearerTokenSentInHeader(t *testing.T) { + var capturedAuth string + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedAuth = r.Header.Get("Authorization") + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("TEST_ACS_TOKEN", "my-secret-token") + + cfg := minimalTokenConfig(srv.URL, "my-secret-token") + cfg.InsecureSkipVerify = true + + if err := Run(context.Background(), cfg); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if capturedAuth != "Bearer my-secret-token" { + t.Errorf("expected Authorization 'Bearer my-secret-token', got %q", capturedAuth) + } +} diff --git a/scripts/compliance-operator-importer/internal/problems/problems.go b/scripts/compliance-operator-importer/internal/problems/problems.go new file mode 100644 index 0000000000000..4d016ebd625bb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/problems/problems.go @@ -0,0 +1,44 @@ +// Package problems provides a Collector that accumulates Problem entries +// during an importer run. All collected problems are included in the final +// JSON report and used to determine the process exit code. +package problems + +import "github.com/stackrox/co-acs-importer/internal/models" + +// Collector accumulates problems during a run. +// It is not safe for concurrent use; callers must synchronise externally if needed. +type Collector struct { + problems []models.Problem +} + +// NewCollector returns an empty Collector ready for use. +func NewCollector() *Collector { + return &Collector{} +} + +// Add appends p to the collected problem list. +// Both Description and FixHint must be non-empty to satisfy IMP-CLI-022. +func (c *Collector) Add(p models.Problem) { + c.problems = append(c.problems, p) +} + +// All returns a copy of all collected problems in insertion order. +func (c *Collector) All() []models.Problem { + if len(c.problems) == 0 { + return []models.Problem{} + } + out := make([]models.Problem, len(c.problems)) + copy(out, c.problems) + return out +} + +// HasErrors returns true if at least one collected problem has severity "error". +// Used to determine whether the run should exit with code 2 (IMP-CLI-019). +func (c *Collector) HasErrors() bool { + for _, p := range c.problems { + if p.Severity == models.SeverityError { + return true + } + } + return false +} diff --git a/scripts/compliance-operator-importer/internal/problems/problems_test.go b/scripts/compliance-operator-importer/internal/problems/problems_test.go new file mode 100644 index 0000000000000..ad0f4a02257a0 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/problems/problems_test.go @@ -0,0 +1,136 @@ +package problems_test + +import ( + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" +) + +// TestIMP_CLI_022_AddAndAllRoundtrip verifies that problems added to the Collector +// are returned verbatim by All(), preserving insertion order. +// Requirement: IMP-CLI-022 (problems[] entry appended for every problem). +func TestIMP_CLI_022_AddAndAllRoundtrip(t *testing.T) { + c := problems.NewCollector() + + p1 := models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-a", + Description: "API returned 503", + FixHint: "Retry later or check ACS endpoint.", + Skipped: true, + } + p2 := models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-b", + Description: "Scan config already exists", + FixHint: "Delete or rename the existing config.", + Skipped: true, + } + + c.Add(p1) + c.Add(p2) + + got := c.All() + if len(got) != 2 { + t.Fatalf("expected 2 problems, got %d", len(got)) + } + if got[0] != p1 { + t.Errorf("first problem mismatch: got %+v, want %+v", got[0], p1) + } + if got[1] != p2 { + t.Errorf("second problem mismatch: got %+v, want %+v", got[1], p2) + } +} + +// TestIMP_CLI_022_EmptyCollectorAllReturnsEmptySlice verifies that a fresh +// Collector returns an empty (non-nil) slice from All(). +func TestIMP_CLI_022_EmptyCollectorAllReturnsEmptySlice(t *testing.T) { + c := problems.NewCollector() + got := c.All() + if got == nil { + t.Fatal("All() returned nil; want empty non-nil slice") + } + if len(got) != 0 { + t.Fatalf("expected 0 problems, got %d", len(got)) + } +} + +// TestIMP_CLI_022_HasErrorsFalseWhenOnlyWarnings verifies that HasErrors returns +// false when only warning-severity problems are present. +// Requirement: IMP-CLI-022 (severity classification). +func TestIMP_CLI_022_HasErrorsFalseWhenOnlyWarnings(t *testing.T) { + c := problems.NewCollector() + c.Add(models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-c", + Description: "Scan config already exists", + FixHint: "Delete the existing config and re-run.", + Skipped: true, + }) + if c.HasErrors() { + t.Error("HasErrors() returned true; expected false when only warnings present") + } +} + +// TestIMP_CLI_022_HasErrorsTrueWhenAnyErrorSeverity verifies that HasErrors +// returns true as soon as any error-severity problem is added. +// Requirement: IMP-CLI-022 (severity classification drives exit code logic). +func TestIMP_CLI_022_HasErrorsTrueWhenAnyErrorSeverity(t *testing.T) { + c := problems.NewCollector() + // Add a warning first to ensure we check all entries, not just the last. + c.Add(models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryMapping, + ResourceRef: "ns/binding-d", + Description: "Schedule conversion warning", + FixHint: "Use a standard cron expression.", + Skipped: false, + }) + c.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-e", + Description: "API returned 400 Bad Request", + FixHint: "Check that the payload is valid and the cluster ID exists.", + Skipped: true, + }) + if !c.HasErrors() { + t.Error("HasErrors() returned false; expected true when error-severity problem is present") + } +} + +// TestIMP_CLI_022_HasErrorsFalseOnEmptyCollector verifies that an empty +// Collector reports no errors. +func TestIMP_CLI_022_HasErrorsFalseOnEmptyCollector(t *testing.T) { + c := problems.NewCollector() + if c.HasErrors() { + t.Error("HasErrors() returned true on empty collector; expected false") + } +} + +// TestIMP_CLI_022_AllReturnsCopy verifies that mutations to the returned slice +// do not affect the Collector's internal state. +func TestIMP_CLI_022_AllReturnsCopy(t *testing.T) { + c := problems.NewCollector() + c.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-f", + Description: "Transient API failure", + FixHint: "Increase --max-retries or check ACS health.", + Skipped: true, + }) + + got := c.All() + got[0].Description = "mutated" + + // Second call must return the original value. + fresh := c.All() + if fresh[0].Description == "mutated" { + t.Error("All() returned a reference to internal state; expected an independent copy") + } +} diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only.go b/scripts/compliance-operator-importer/internal/reconcile/create_only.go new file mode 100644 index 0000000000000..bcd9ac9f49c84 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only.go @@ -0,0 +1,198 @@ +// Package reconcile implements the create-only reconciliation loop. +// +// create-only: PUT is never called in Phase 1 +package reconcile + +import ( + "context" + "fmt" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// transientStatusCodes is the set of HTTP status codes that should trigger a retry. +// Non-transient codes (400, 401, 403, 404) are NOT in this set and cause immediate failure. +// +// Implements IMP-ERR-001 (retry) and IMP-ERR-002 (no retry). +var transientStatusCodes = map[int]bool{ + 429: true, + 502: true, + 503: true, + 504: true, +} + +// statusCoder is the interface satisfied by acs.HTTPError (and the test statusError). +// It lets the reconciler inspect the HTTP status without importing the acs package, +// avoiding an import cycle. +type statusCoder interface { + StatusCode() int +} + +// Action records the outcome of a single Apply call. +type Action struct { + Source models.ReportItemSource + ActionType string // "create" | "skip" | "fail" + Reason string + Attempts int + ACSScanConfigID string + Err error + Problem *models.Problem +} + +// Reconciler implements the create-only reconciliation loop. +// It never calls PUT. Existing scan names are skipped with a conflict problem. +// +// create-only: PUT is never called in Phase 1 +type Reconciler struct { + client models.ACSClient + maxRetries int + dryRun bool +} + +// NewReconciler creates a Reconciler. +// +// - client: ACS API client (POST-only; no PUT anywhere) +// - maxRetries: maximum total attempts for a single create (must be >= 1) +// - dryRun: when true, no POST is issued; planned actions are still recorded +func NewReconciler(client models.ACSClient, maxRetries int, dryRun bool) *Reconciler { + if maxRetries < 1 { + maxRetries = 1 + } + return &Reconciler{ + client: client, + maxRetries: maxRetries, + dryRun: dryRun, + } +} + +// Apply tries to create the scan config if scanName is not already in existingNames. +// +// Behaviour: +// - If dryRun=true: records planned action, no POST is issued. (IMP-IDEM-004, IMP-IDEM-006) +// - If scanName exists: skip + conflict problem. (IMP-IDEM-002, IMP-IDEM-003) +// - Transient failures (429,502,503,504): retry with exponential backoff. (IMP-ERR-001) +// - Non-transient failures (400,401,403,404): record as fail immediately. (IMP-ERR-002) +// +// Exponential backoff: base=500ms, doubles each retry; up to maxRetries total attempts. +// Attempts count is always recorded in the returned Action. +// +// create-only: PUT is never called in Phase 1 +func (r *Reconciler) Apply( + ctx context.Context, + payload models.ACSCreatePayload, + source models.ReportItemSource, + existingNames map[string]bool, +) Action { + action := Action{Source: source} + + // IMP-IDEM-002: existing name => skip with conflict problem + // IMP-IDEM-003: no PUT is attempted for existing configs + if existingNames[payload.ScanName] { + problem := &models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: resourceRef(source), + Description: fmt.Sprintf("scan configuration %q already exists in ACS and will not be updated (create-only mode)", payload.ScanName), + FixHint: fmt.Sprintf("Remove the existing ACS scan configuration named %q before re-running, or rename the ScanSettingBinding to use a different name.", payload.ScanName), + Skipped: true, + } + action.ActionType = "skip" + action.Reason = fmt.Sprintf("scan configuration %q already exists in ACS", payload.ScanName) + action.Problem = problem + return action + } + + // IMP-IDEM-004: dry-run => record planned action, do not POST + // IMP-IDEM-006: planned action "create" is still recorded + if r.dryRun { + action.ActionType = "create" + action.Reason = "dry-run: would POST /v2/compliance/scan/configurations" + action.Attempts = 0 + return action + } + + // IMP-IDEM-001: POST /v2/compliance/scan/configurations when name not found + // IMP-ERR-001: retry on transient errors with exponential backoff + // IMP-ERR-002: no retry on non-transient errors + var ( + lastErr error + id string + delay = 500 * time.Millisecond + ) + + for attempt := 1; attempt <= r.maxRetries; attempt++ { + action.Attempts = attempt + + id, lastErr = r.client.CreateScanConfiguration(ctx, payload) + if lastErr == nil { + action.ActionType = "create" + action.ACSScanConfigID = id + action.Reason = "scan configuration created successfully" + return action + } + + // Check if the error is transient (eligible for retry) + if sc, ok := asStatusCoder(lastErr); ok { + code := sc.StatusCode() + if !transientStatusCodes[code] { + // Non-transient: fail immediately, no more attempts + action.ActionType = "fail" + action.Reason = fmt.Sprintf("non-transient HTTP %d error creating scan configuration", code) + action.Err = lastErr + return action + } + } else { + // Unknown error type (e.g. network error): treat as transient and retry + } + + // Do not sleep after the last attempt + if attempt < r.maxRetries { + select { + case <-ctx.Done(): + action.ActionType = "fail" + action.Reason = "context cancelled during retry backoff" + action.Err = ctx.Err() + return action + case <-time.After(delay): + } + delay *= 2 + } + } + + // Exhausted all retries + action.ActionType = "fail" + action.Reason = fmt.Sprintf("failed after %d attempt(s): %v", action.Attempts, lastErr) + action.Err = lastErr + return action +} + +// resourceRef formats the source as "namespace/bindingName" for use in Problem.ResourceRef. +func resourceRef(source models.ReportItemSource) string { + if source.Namespace == "" { + return source.BindingName + } + return source.Namespace + "/" + source.BindingName +} + +// asStatusCoder attempts to extract a statusCoder from err using errors.As-style +// type assertion. It handles both direct and wrapped errors. +func asStatusCoder(err error) (statusCoder, bool) { + // Direct type assertion first (most common path) + if sc, ok := err.(statusCoder); ok { + return sc, true + } + // Unwrap chain + type unwrapper interface{ Unwrap() error } + for err != nil { + if sc, ok := err.(statusCoder); ok { + return sc, true + } + uw, ok := err.(unwrapper) + if !ok { + break + } + err = uw.Unwrap() + } + return nil, false +} diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go new file mode 100644 index 0000000000000..3be75e7b6e847 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go @@ -0,0 +1,324 @@ +package reconcile_test + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/reconcile" +) + +// --------------------------------------------------------------------------- +// Mock ACS client +// --------------------------------------------------------------------------- + +// mockACSClient is a test double that records every call and allows the caller +// to inject per-call responses via the nextResponses queue. +// +// IMP-IDEM-003: The mock only implements POST (via CreateScanConfiguration). +// There is no Put/Update method. If one were added to ACSClient, this struct +// would fail to compile unless the method were added here too, making the +// violation immediately visible. +type mockACSClient struct { + // createResponses is consumed in order; each entry is either nil (success) + // or an error. Use statusError to encode HTTP status codes. + createResponses []error + + // callCount tracks total calls to CreateScanConfiguration. + callCount atomic.Int32 + + // recordedIDCounter is used to return unique IDs on success. + idCounter atomic.Int32 + + // listConfigs is the fixed list returned by ListScanConfigurations. + listConfigs []models.ACSConfigSummary +} + +// statusError wraps an HTTP status code so the reconciler can distinguish +// transient (429/502/503/504) from non-transient (400/401/403/404) failures. +type statusError struct { + code int +} + +func (e *statusError) Error() string { return fmt.Sprintf("HTTP %d", e.code) } +func (e *statusError) StatusCode() int { return e.code } + +func (m *mockACSClient) Preflight(_ context.Context) error { return nil } + +func (m *mockACSClient) ListScanConfigurations(_ context.Context) ([]models.ACSConfigSummary, error) { + return m.listConfigs, nil +} + +func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSCreatePayload) (string, error) { + idx := int(m.callCount.Add(1)) - 1 + if idx < len(m.createResponses) { + if err := m.createResponses[idx]; err != nil { + return "", err + } + } + id := fmt.Sprintf("created-id-%d", m.idCounter.Add(1)) + return id, nil +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func defaultSource() models.ReportItemSource { + return models.ReportItemSource{ + Namespace: "openshift-compliance", + BindingName: "cis-weekly", + ScanSettingName: "default-auto-apply", + } +} + +func defaultPayload(scanName string) models.ACSCreatePayload { + return models.ACSCreatePayload{ + ScanName: scanName, + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +// IMP-IDEM-001: non-existing name => POST called, action="create" +func TestApply_IMP_IDEM_001_NewName_CreatesConfig(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-001: expected action 'create', got %q", action.ActionType) + } + if action.ACSScanConfigID == "" { + t.Error("IMP-IDEM-001: expected non-empty ACSScanConfigID after create") + } + if action.Err != nil { + t.Errorf("IMP-IDEM-001: unexpected error: %v", action.Err) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-IDEM-001: expected 1 POST call, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-002: existing name => action="skip", Problem.Category=conflict, FixHint non-empty +func TestApply_IMP_IDEM_002_ExistingName_SkipsWithConflictProblem(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false) + + existing := map[string]bool{"cis-weekly": true} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.ActionType != "skip" { + t.Errorf("IMP-IDEM-002: expected action 'skip', got %q", action.ActionType) + } + if action.Problem == nil { + t.Fatal("IMP-IDEM-002: expected Problem to be non-nil for skipped-existing") + } + if action.Problem.Category != models.CategoryConflict { + t.Errorf("IMP-IDEM-002: expected Problem.Category 'conflict', got %q", action.Problem.Category) + } + if action.Problem.FixHint == "" { + t.Error("IMP-IDEM-002: expected non-empty Problem.FixHint") + } + if action.Reason == "" { + t.Error("IMP-IDEM-002: expected non-empty Reason") + } + // "already exists" must appear in the reason (per spec) + if !containsSubstring(action.Reason, "already exists") { + t.Errorf("IMP-IDEM-002: Reason must include 'already exists', got %q", action.Reason) + } +} + +// IMP-IDEM-003: verify no PUT ever called (mock records method; ACSClient has no Put) +func TestApply_IMP_IDEM_003_NeverCallsPUT(t *testing.T) { + // The mockACSClient deliberately has no Put/Update method. + // It only satisfies models.ACSClient which defines: + // Preflight, ListScanConfigurations, CreateScanConfiguration (POST only). + // If a PUT-based method existed in the interface, the mock would fail to compile. + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false) + + // Run multiple scenarios - none should trigger a PUT + for _, scanName := range []string{"new-scan-1", "new-scan-2"} { + _ = r.Apply(context.Background(), defaultPayload(scanName), defaultSource(), map[string]bool{}) + } + // existing name - should skip, not PUT + _ = r.Apply(context.Background(), defaultPayload("existing"), defaultSource(), map[string]bool{"existing": true}) + + // The mock only has CreateScanConfiguration (POST). callCount reflects POST calls only. + // 2 creates + 1 skip = 2 POST calls total (no PUT possible). + if mock.callCount.Load() != 2 { + t.Errorf("IMP-IDEM-003: expected exactly 2 POST calls (2 creates, 0 PUT), got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-004: dryRun=true => no POST +func TestApply_IMP_IDEM_004_DryRun_NoPost(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + + _ = r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-004: expected 0 POST calls in dry-run mode, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-006: dryRun => action="create" still recorded as planned +func TestApply_IMP_IDEM_006_DryRun_PlannedCreateRecorded(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-006: dry-run planned action should be 'create', got %q", action.ActionType) + } +} + +// IMP-IDEM-007: dryRun => problems still populated for problematic resources +func TestApply_IMP_IDEM_007_DryRun_ProblemsStillPopulated(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + + existing := map[string]bool{"cis-weekly": true} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.Problem == nil { + t.Fatal("IMP-IDEM-007: expected Problem to be populated even in dry-run mode") + } + if action.Problem.Category != models.CategoryConflict { + t.Errorf("IMP-IDEM-007: expected conflict problem in dry-run, got %q", action.Problem.Category) + } +} + +// IMP-ERR-001: 429 first 2 times then 200 => 3 total attempts +func TestApply_IMP_ERR_001_Retry429_ThenSuccess(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: 429}, + &statusError{code: 429}, + nil, // 3rd attempt succeeds + }, + } + r := reconcile.NewReconciler(mock, 5, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if action.ActionType != "create" { + t.Errorf("IMP-ERR-001: expected action 'create' after retry success, got %q", action.ActionType) + } + if action.Attempts != 3 { + t.Errorf("IMP-ERR-001: expected 3 total attempts, got %d", action.Attempts) + } + if action.Err != nil { + t.Errorf("IMP-ERR-001: expected nil error after eventual success, got %v", action.Err) + } +} + +// IMP-ERR-001: Retry on transient errors 502, 503, 504 +func TestApply_IMP_ERR_001_Retry5xx_ThenSuccess(t *testing.T) { + for _, code := range []int{502, 503, 504} { + code := code + t.Run(fmt.Sprintf("HTTP%d", code), func(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: code}, + &statusError{code: code}, + nil, // 3rd succeeds + }, + } + r := reconcile.NewReconciler(mock, 5, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if action.ActionType != "create" { + t.Errorf("IMP-ERR-001: HTTP %d - expected 'create', got %q", code, action.ActionType) + } + if action.Attempts != 3 { + t.Errorf("IMP-ERR-001: HTTP %d - expected 3 attempts, got %d", code, action.Attempts) + } + }) + } +} + +// IMP-ERR-002: 400 => 1 attempt only, action="fail" +func TestApply_IMP_ERR_002_NonTransient400_NoRetry(t *testing.T) { + for _, code := range []int{400, 401, 403, 404} { + code := code + t.Run(fmt.Sprintf("HTTP%d", code), func(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: code}, + }, + } + r := reconcile.NewReconciler(mock, 5, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + + if action.ActionType != "fail" { + t.Errorf("IMP-ERR-002: HTTP %d - expected action 'fail', got %q", code, action.ActionType) + } + if action.Attempts != 1 { + t.Errorf("IMP-ERR-002: HTTP %d - expected exactly 1 attempt (no retry), got %d", code, action.Attempts) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-ERR-002: HTTP %d - expected 1 POST call, got %d", code, mock.callCount.Load()) + } + if action.Err == nil { + t.Errorf("IMP-ERR-002: HTTP %d - expected non-nil error", code) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Compile-time check: mockACSClient satisfies models.ACSClient +// This fails to compile if models.ACSClient gains any method not implemented +// by the mock, making interface drift immediately visible. +// --------------------------------------------------------------------------- +var _ models.ACSClient = (*mockACSClient)(nil) + +// --------------------------------------------------------------------------- +// Utility +// --------------------------------------------------------------------------- + +func containsSubstring(s, sub string) bool { + return len(s) >= len(sub) && (s == sub || len(sub) == 0 || + func() bool { + for i := 0; i <= len(s)-len(sub); i++ { + if s[i:i+len(sub)] == sub { + return true + } + } + return false + }()) +} + +// Verify containsSubstring works correctly +var _ = func() bool { + if !containsSubstring("scan already exists in ACS", "already exists") { + panic("containsSubstring broken") + } + return true +}() + +// errorIs is a helper for unwrapping statusError from wrapped errors. +func errorIs(err error, code int) bool { + var se *statusError + return errors.As(err, &se) && se.code == code +} + +// keep errorIs in use +var _ = errorIs diff --git a/scripts/compliance-operator-importer/internal/report/report.go b/scripts/compliance-operator-importer/internal/report/report.go new file mode 100644 index 0000000000000..e9907241f34e5 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/report/report.go @@ -0,0 +1,97 @@ +// Package report assembles the final Report from accumulated run items and writes +// it to disk as indented JSON when --report-json is set. +package report + +import ( + "encoding/json" + "fmt" + "os" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// Builder accumulates per-binding ReportItems during a run and produces the +// final Report once all bindings have been processed. +type Builder struct { + cfg *models.Config + items []models.ReportItem +} + +// NewBuilder returns a Builder configured from cfg. +func NewBuilder(cfg *models.Config) *Builder { + return &Builder{cfg: cfg} +} + +// RecordItem appends a single binding outcome to the builder. +func (b *Builder) RecordItem(item models.ReportItem) { + b.items = append(b.items, item) +} + +// Build constructs the final Report from all recorded items and the supplied +// problems list. +// +// IMP-CLI-021: sets meta.mode = "create-only", meta.timestamp to current UTC +// RFC3339, meta.dryRun from cfg, meta.namespaceScope from cfg. +// IMP-CLI-021: computes counts from items actions. +func (b *Builder) Build(problems []models.Problem) models.Report { + meta := models.ReportMeta{ + Timestamp: time.Now().UTC().Format(time.RFC3339), + DryRun: b.cfg.DryRun, + NamespaceScope: namespaceScope(b.cfg), + Mode: "create-only", + } + + counts := models.ReportCounts{ + Discovered: len(b.items), + } + for _, it := range b.items { + switch it.Action { + case "create": + counts.Create++ + case "skip": + counts.Skip++ + case "fail": + counts.Failed++ + } + } + + items := b.items + if items == nil { + items = []models.ReportItem{} + } + if problems == nil { + problems = []models.Problem{} + } + + return models.Report{ + Meta: meta, + Counts: counts, + Items: items, + Problems: problems, + } +} + +// WriteJSON writes report as indented JSON to path. +// Returns an error if the file cannot be created or written. +// IMP-CLI-021: output must be valid, parseable JSON. +func (b *Builder) WriteJSON(path string, report models.Report) error { + data, err := json.MarshalIndent(report, "", " ") + if err != nil { + return fmt.Errorf("marshal report to JSON: %w", err) + } + // Append a trailing newline for POSIX text-file compliance. + data = append(data, '\n') + if err := os.WriteFile(path, data, 0o600); err != nil { + return fmt.Errorf("write report JSON to %q: %w", path, err) + } + return nil +} + +// namespaceScope derives the namespaceScope string from cfg. +func namespaceScope(cfg *models.Config) string { + if cfg.COAllNamespaces { + return "all-namespaces" + } + return cfg.CONamespace +} diff --git a/scripts/compliance-operator-importer/internal/report/report_test.go b/scripts/compliance-operator-importer/internal/report/report_test.go new file mode 100644 index 0000000000000..ca8ca12ae39dc --- /dev/null +++ b/scripts/compliance-operator-importer/internal/report/report_test.go @@ -0,0 +1,217 @@ +package report_test + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/report" +) + +// baseConfig returns a minimal Config suitable for most report tests. +func baseConfig() *models.Config { + return &models.Config{ + DryRun: false, + CONamespace: "openshift-compliance", + } +} + +// TestIMP_CLI_021_BuildSetsModeTocreateOnly verifies that Build always sets +// meta.mode to "create-only" regardless of other configuration. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildSetsModeTocreateOnly(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Meta.Mode != "create-only" { + t.Errorf("meta.mode = %q; want %q", r.Meta.Mode, "create-only") + } +} + +// TestIMP_CLI_021_BuildCountsFromItemActions verifies that Build correctly derives +// discovered/create/skip/failed counts from the recorded items. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildCountsFromItemActions(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + + b.RecordItem(models.ReportItem{Action: "create"}) + b.RecordItem(models.ReportItem{Action: "create"}) + b.RecordItem(models.ReportItem{Action: "skip"}) + b.RecordItem(models.ReportItem{Action: "fail"}) + + r := b.Build(nil) + + if r.Counts.Discovered != 4 { + t.Errorf("counts.discovered = %d; want 4", r.Counts.Discovered) + } + if r.Counts.Create != 2 { + t.Errorf("counts.create = %d; want 2", r.Counts.Create) + } + if r.Counts.Skip != 1 { + t.Errorf("counts.skip = %d; want 1", r.Counts.Skip) + } + if r.Counts.Failed != 1 { + t.Errorf("counts.failed = %d; want 1", r.Counts.Failed) + } +} + +// TestIMP_CLI_021_BuildMetaNamespaceScopeAllNamespaces verifies that when +// COAllNamespaces is set, meta.namespaceScope is "all-namespaces". +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaNamespaceScopeAllNamespaces(t *testing.T) { + cfg := &models.Config{ + COAllNamespaces: true, + } + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.NamespaceScope != "all-namespaces" { + t.Errorf("meta.namespaceScope = %q; want %q", r.Meta.NamespaceScope, "all-namespaces") + } +} + +// TestIMP_CLI_021_BuildMetaNamespaceScopeSingleNamespace verifies that when +// COAllNamespaces is false, meta.namespaceScope equals cfg.CONamespace. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaNamespaceScopeSingleNamespace(t *testing.T) { + cfg := &models.Config{ + CONamespace: "openshift-compliance", + COAllNamespaces: false, + } + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.NamespaceScope != "openshift-compliance" { + t.Errorf("meta.namespaceScope = %q; want %q", r.Meta.NamespaceScope, "openshift-compliance") + } +} + +// TestIMP_CLI_021_BuildMetaDryRunReflectsCfg verifies that meta.dryRun mirrors +// the cfg.DryRun field. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaDryRunReflectsCfg(t *testing.T) { + for _, dryRun := range []bool{true, false} { + cfg := &models.Config{DryRun: dryRun, CONamespace: "ns"} + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.DryRun != dryRun { + t.Errorf("dryRun=%v: meta.dryRun = %v; want %v", dryRun, r.Meta.DryRun, dryRun) + } + } +} + +// TestIMP_CLI_021_BuildTimestampIsRFC3339 verifies that meta.timestamp is a +// non-empty, valid RFC3339 string. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildTimestampIsRFC3339(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Meta.Timestamp == "" { + t.Fatal("meta.timestamp is empty") + } + // time.Parse with RFC3339 format validates the string. + // We use strings.Contains as a lightweight check; a full parse would need + // importing "time" and would be equally valid. + if !strings.Contains(r.Meta.Timestamp, "T") || !strings.Contains(r.Meta.Timestamp, "Z") { + t.Errorf("meta.timestamp %q does not look like UTC RFC3339", r.Meta.Timestamp) + } +} + +// TestIMP_CLI_021_WriteJSONProducesValidJSON verifies that WriteJSON writes +// parseable JSON to disk. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_WriteJSONProducesValidJSON(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + b.RecordItem(models.ReportItem{Action: "create", Reason: "created successfully"}) + + r := b.Build(nil) + + dir := t.TempDir() + path := filepath.Join(dir, "report.json") + if err := b.WriteJSON(path, r); err != nil { + t.Fatalf("WriteJSON returned error: %v", err) + } + + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("reading written report: %v", err) + } + + var parsed models.Report + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("written JSON is not parseable: %v\ncontent:\n%s", err, string(data)) + } + + if parsed.Meta.Mode != "create-only" { + t.Errorf("parsed meta.mode = %q; want %q", parsed.Meta.Mode, "create-only") + } + if parsed.Counts.Discovered != 1 { + t.Errorf("parsed counts.discovered = %d; want 1", parsed.Counts.Discovered) + } +} + +// TestIMP_CLI_022_ProblemsInReportMatchInput verifies that problems passed to +// Build() appear unchanged in the report's Problems field. +// Requirement: IMP-CLI-022. +func TestIMP_CLI_022_ProblemsInReportMatchInput(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + + probs := []models.Problem{ + { + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-a", + Description: "ACS API returned 503", + FixHint: "Check ACS endpoint health and retry.", + Skipped: true, + }, + { + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-b", + Description: "Scan config already exists", + FixHint: "Delete the existing ACS config and re-run.", + Skipped: true, + }, + } + + r := b.Build(probs) + + if len(r.Problems) != 2 { + t.Fatalf("expected 2 problems in report, got %d", len(r.Problems)) + } + for i, want := range probs { + got := r.Problems[i] + if got != want { + t.Errorf("problem[%d] mismatch: got %+v, want %+v", i, got, want) + } + } +} + +// TestIMP_CLI_021_WriteJSONErrorOnBadPath verifies WriteJSON returns an error +// when the target directory does not exist. +func TestIMP_CLI_021_WriteJSONErrorOnBadPath(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + err := b.WriteJSON("/nonexistent/dir/report.json", r) + if err == nil { + t.Error("expected error writing to non-existent path, got nil") + } +} + +// TestIMP_CLI_021_BuildEmptyItemsProducesNonNilSlices verifies that Build +// returns non-nil Items and Problems slices even when nothing was recorded. +// This ensures JSON output is "items": [] not "items": null. +func TestIMP_CLI_021_BuildEmptyItemsProducesNonNilSlices(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Items == nil { + t.Error("Items is nil; want empty non-nil slice so JSON marshals as []") + } + if r.Problems == nil { + t.Error("Problems is nil; want empty non-nil slice so JSON marshals as []") + } +} diff --git a/scripts/compliance-operator-importer/internal/run/run.go b/scripts/compliance-operator-importer/internal/run/run.go new file mode 100644 index 0000000000000..6dd98b5397869 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/run.go @@ -0,0 +1,213 @@ +// Package run orchestrates a full importer execution: CO discovery, ACS +// reconciliation, problem collection, report generation, and exit-code +// determination. +package run + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/mapping" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" + "github.com/stackrox/co-acs-importer/internal/reconcile" + "github.com/stackrox/co-acs-importer/internal/report" +) + +// Exit code constants (IMP-CLI-017..019, IMP-ERR-003). +const ( + ExitSuccess = 0 // all bindings processed without failures + ExitFatalError = 1 // preflight/config failure; no import attempted + ExitPartialError = 2 // at least one binding failed +) + +// Runner orchestrates the full import run. +type Runner struct { + cfg *models.Config + acsClient models.ACSClient + coClient cofetch.COClient + out io.Writer // injectable; defaults to os.Stdout +} + +// NewRunner creates a Runner ready to execute, writing console output to os.Stdout. +func NewRunner(cfg *models.Config, acsClient models.ACSClient, coClient cofetch.COClient) *Runner { + return &Runner{ + cfg: cfg, + acsClient: acsClient, + coClient: coClient, + out: os.Stdout, + } +} + +// WithOutput returns a shallow copy of the Runner writing console output to w. +// Intended for tests that need to capture or suppress printed output. +func (r *Runner) WithOutput(w io.Writer) *Runner { + cp := *r + cp.out = w + return &cp +} + +// printf is a convenience wrapper so callers don't need to handle format errors. +func (r *Runner) printf(format string, args ...interface{}) { + fmt.Fprintf(r.out, format, args...) //nolint:errcheck // best-effort console output +} + +// Run executes the full import and returns the appropriate exit code. +// +// Execution steps: +// 1. List existing ACS scan config names to build the existingNames set. +// 2. List ScanSettingBindings from the CO source cluster. +// 3. For each binding: fetch its ScanSetting, build the ACS payload, reconcile. +// 4. Collect all problems and build the final Report. +// 5. Optionally write the JSON report to --report-json path. +// 6. Print the console summary (IMP-CLI-020). +// 7. Return exit code 0, 1, or 2 (IMP-CLI-017..019, IMP-ERR-003). +func (r *Runner) Run(ctx context.Context) int { + collector := problems.NewCollector() + builder := report.NewBuilder(r.cfg) + + // Step 1: list existing ACS scan configs to populate the deduplication set. + // Failure here is fatal (IMP-CLI-018): we cannot safely proceed without + // knowing which names already exist. + summaries, err := r.acsClient.ListScanConfigurations(ctx) + if err != nil { + r.printf("FATAL: failed to list ACS scan configurations: %v\n", err) + return ExitFatalError + } + existingNames := make(map[string]bool, len(summaries)) + for _, s := range summaries { + existingNames[s.ScanName] = true + } + + // Step 2: discover CO ScanSettingBindings. + // Failure here is also fatal (IMP-CLI-018). + bindings, err := r.coClient.ListScanSettingBindings(ctx) + if err != nil { + r.printf("FATAL: failed to list ScanSettingBindings: %v\n", err) + return ExitFatalError + } + + // maxRetries defaults to 1 (single attempt) when cfg.MaxRetries is zero. + maxRetries := r.cfg.MaxRetries + if maxRetries < 1 { + maxRetries = 1 + } + rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun) + + // Step 3: process each binding independently. + // Per-binding failures skip that binding and record a problem; other bindings + // continue processing (IMP-CLI-022, IMP-MAP-011). + for _, binding := range bindings { + r.processBinding(ctx, binding, existingNames, rec, collector, builder) + } + + // Step 4: build the final report. + finalReport := builder.Build(collector.All()) + + // Step 5: write JSON report when requested (IMP-CLI-021). + if r.cfg.ReportJSON != "" { + if err := builder.WriteJSON(r.cfg.ReportJSON, finalReport); err != nil { + r.printf("WARNING: failed to write JSON report to %q: %v\n", r.cfg.ReportJSON, err) + } + } + + // Step 6: print console summary (IMP-CLI-020). + r.printSummary(finalReport) + + // Step 7: determine exit code (IMP-CLI-017..019, IMP-ERR-003). + if finalReport.Counts.Failed > 0 || collector.HasErrors() { + return ExitPartialError // IMP-CLI-019 + } + return ExitSuccess // IMP-CLI-017 +} + +// processBinding handles a single ScanSettingBinding: fetches its ScanSetting, +// maps it to an ACS payload, and calls the reconciler. All failures are recorded +// as problems and do not abort processing of remaining bindings. +func (r *Runner) processBinding( + ctx context.Context, + binding cofetch.ScanSettingBinding, + existingNames map[string]bool, + rec *reconcile.Reconciler, + collector *problems.Collector, + builder *report.Builder, +) { + // Derive a stable resource reference for problem entries. + resourceRef := fmt.Sprintf("%s/%s", binding.Namespace, binding.Name) + + // Build the source for ReportItem entries. + source := models.ReportItemSource{ + Namespace: binding.Namespace, + BindingName: binding.Name, + ScanSettingName: binding.ScanSettingName, + } + + // Fetch the referenced ScanSetting (IMP-MAP-008..010). + ss, err := r.coClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) + if err != nil { + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: resourceRef, + Description: fmt.Sprintf("ScanSetting %q referenced by binding %q could not be fetched: %v", binding.ScanSettingName, binding.Name, err), + FixHint: fmt.Sprintf("Ensure ScanSetting %q exists in namespace %q and the importer service account has read access.", binding.ScanSettingName, binding.Namespace), + Skipped: true, + }) + builder.RecordItem(models.ReportItem{ + Source: source, + Action: "fail", + Reason: "ScanSetting not found", + Error: err.Error(), + }) + return + } + + // Map the CO resources to an ACS create payload (IMP-MAP-001..015). + result := mapping.MapBinding(binding, ss, r.cfg) + if result.Problem != nil { + // IMP-MAP-012..015: mapping problem => skip + record. + collector.Add(*result.Problem) + builder.RecordItem(models.ReportItem{ + Source: source, + Action: "fail", + Reason: "mapping error", + Error: result.Problem.Description, + }) + return + } + + // Reconcile: create or skip (IMP-IDEM-001..007, IMP-ERR-001..004). + action := rec.Apply(ctx, *result.Payload, source, existingNames) + + item := models.ReportItem{ + Source: action.Source, + Action: action.ActionType, + Reason: action.Reason, + Attempts: action.Attempts, + ACSScanConfigID: action.ACSScanConfigID, + } + if action.Err != nil { + item.Error = action.Err.Error() + } + builder.RecordItem(item) + + if action.Problem != nil { + collector.Add(*action.Problem) + } +} + +// printSummary writes the console summary to the configured output (IMP-CLI-020). +func (r *Runner) printSummary(rep models.Report) { + dryRunLabel := "no" + if r.cfg.DryRun { + dryRunLabel = "yes" + } + r.printf("CO->ACS importer summary [dry-run: %s]:\n", dryRunLabel) + r.printf(" Discovered: %d bindings\n", rep.Counts.Discovered) + r.printf(" Created: %d\n", rep.Counts.Create) + r.printf(" Skipped: %d\n", rep.Counts.Skip) + r.printf(" Failed: %d\n", rep.Counts.Failed) +} diff --git a/scripts/compliance-operator-importer/internal/run/run_test.go b/scripts/compliance-operator-importer/internal/run/run_test.go new file mode 100644 index 0000000000000..e10c0d83e74ad --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/run_test.go @@ -0,0 +1,521 @@ +package run_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/run" +) + +// --------------------------------------------------------------------------- +// Mock: models.ACSClient +// --------------------------------------------------------------------------- + +type mockACSClient struct { + listErr error + listResult []models.ACSConfigSummary + createErr error + createID string + createCalls int +} + +func (m *mockACSClient) Preflight(_ context.Context) error { return nil } + +func (m *mockACSClient) ListScanConfigurations(_ context.Context) ([]models.ACSConfigSummary, error) { + if m.listErr != nil { + return nil, m.listErr + } + return m.listResult, nil +} + +func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSCreatePayload) (string, error) { + m.createCalls++ + if m.createErr != nil { + return "", m.createErr + } + id := m.createID + if id == "" { + id = fmt.Sprintf("new-id-%d", m.createCalls) + } + return id, nil +} + +// Compile-time check: mockACSClient satisfies models.ACSClient. +var _ models.ACSClient = (*mockACSClient)(nil) + +// --------------------------------------------------------------------------- +// Mock: cofetch.COClient +// --------------------------------------------------------------------------- + +type mockCOClient struct { + bindings []cofetch.ScanSettingBinding + listErr error + scanSetting *cofetch.ScanSetting + settingErr error +} + +func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.ScanSettingBinding, error) { + if m.listErr != nil { + return nil, m.listErr + } + return m.bindings, nil +} + +func (m *mockCOClient) GetScanSetting(_ context.Context, _, _ string) (*cofetch.ScanSetting, error) { + if m.settingErr != nil { + return nil, m.settingErr + } + return m.scanSetting, nil +} + +// Compile-time check: mockCOClient satisfies cofetch.COClient. +var _ cofetch.COClient = (*mockCOClient)(nil) + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +// httpStatusError lets the reconciler identify transient vs. non-transient codes. +type httpStatusError struct { + code int +} + +func (e *httpStatusError) Error() string { return fmt.Sprintf("HTTP %d", e.code) } +func (e *httpStatusError) StatusCode() int { return e.code } + +// baseConfig returns a valid Config for most tests. +func baseConfig() *models.Config { + return &models.Config{ + ACSEndpoint: "https://acs.example.com", + ACSClusterID: "cluster-a", + CONamespace: "openshift-compliance", + MaxRetries: 1, + } +} + +// goodBinding returns a ScanSettingBinding that maps cleanly to an ACS payload. +func goodBinding(name string) cofetch.ScanSettingBinding { + return cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: name, + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{ + {Name: "ocp4-cis", Kind: "Profile"}, + }, + } +} + +// goodScanSetting returns a ScanSetting with a valid daily cron schedule. +func goodScanSetting() *cofetch.ScanSetting { + return &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "default-auto-apply", + Schedule: "0 1 * * *", + } +} + +// runWithCapture executes Run and captures all printed output. +func runWithCapture(t *testing.T, cfg *models.Config, acs models.ACSClient, co cofetch.COClient) (int, string) { + t.Helper() + var buf bytes.Buffer + r := run.NewRunner(cfg, acs, co).WithOutput(&buf) + code := r.Run(context.Background()) + return code, buf.String() +} + +// --------------------------------------------------------------------------- +// Tests: exit codes (IMP-CLI-017, IMP-CLI-018, IMP-CLI-019, IMP-ERR-003) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_017_AllSuccessExitZero verifies that when all bindings are +// created successfully the runner returns exit code 0. +// Requirements: IMP-CLI-017, IMP-ERR-003. +func TestIMP_CLI_017_AllSuccessExitZero(t *testing.T) { + acsClient := &mockACSClient{} // no existing configs, create succeeds + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitSuccess { + t.Errorf("IMP-CLI-017: expected exit code %d (success), got %d", run.ExitSuccess, code) + } +} + +// TestIMP_CLI_017_EmptyBindingListExitZero verifies that an empty binding +// list (nothing to import) also produces exit code 0. +// Requirement: IMP-CLI-017. +func TestIMP_CLI_017_EmptyBindingListExitZero(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{bindings: []cofetch.ScanSettingBinding{}} + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitSuccess { + t.Errorf("IMP-CLI-017: expected exit code %d for empty run, got %d", run.ExitSuccess, code) + } +} + +// TestIMP_CLI_018_ListACSConfigsFatalExitOne verifies that a fatal failure +// when listing ACS scan configurations returns exit code 1. +// Requirements: IMP-CLI-018, IMP-ERR-003. +func TestIMP_CLI_018_ListACSConfigsFatalExitOne(t *testing.T) { + acsClient := &mockACSClient{listErr: errors.New("ACS unreachable")} + coClient := &mockCOClient{} + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitFatalError { + t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) + } + if !strings.Contains(output, "FATAL") { + t.Errorf("IMP-CLI-018: expected FATAL message in output, got: %q", output) + } +} + +// TestIMP_CLI_018_ListBindingsFatalExitOne verifies that a fatal failure +// when listing CO ScanSettingBindings returns exit code 1. +// Requirements: IMP-CLI-018, IMP-ERR-003. +func TestIMP_CLI_018_ListBindingsFatalExitOne(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{listErr: errors.New("k8s unreachable")} + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitFatalError { + t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) + } + if !strings.Contains(output, "FATAL") { + t.Errorf("IMP-CLI-018: expected FATAL message in output, got: %q", output) + } +} + +// TestIMP_CLI_019_SomeFailedExitTwo verifies that when at least one binding +// fails the runner returns exit code 2. +// Requirements: IMP-CLI-019, IMP-ERR-003. +func TestIMP_CLI_019_SomeFailedExitTwo(t *testing.T) { + // Inject a non-transient 400 error so the binding fails without retry. + acsClient := &mockACSClient{ + createErr: &httpStatusError{code: 400}, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitPartialError { + t.Errorf("IMP-CLI-019: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } +} + +// TestIMP_CLI_019_MissingScanSettingExitTwo verifies that a missing ScanSetting +// causes a binding-level failure that results in exit code 2. +// Requirements: IMP-CLI-019, IMP-ERR-003. +func TestIMP_CLI_019_MissingScanSettingExitTwo(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("broken")}, + settingErr: errors.New("ScanSetting not found"), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitPartialError { + t.Errorf("IMP-CLI-019: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } +} + +// TestIMP_ERR_003_ExitCodesMapCorrectly exercises all three exit code paths in +// a single test to confirm the mapping is exact. +// Requirement: IMP-ERR-003. +func TestIMP_ERR_003_ExitCodesMapCorrectly(t *testing.T) { + tests := []struct { + name string + wantCode int + acs *mockACSClient + co *mockCOClient + }{ + { + name: "all_successful", + wantCode: run.ExitSuccess, + acs: &mockACSClient{}, + co: &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("ok")}, + scanSetting: goodScanSetting(), + }, + }, + { + name: "fatal_acs_list", + wantCode: run.ExitFatalError, + acs: &mockACSClient{listErr: errors.New("down")}, + co: &mockCOClient{}, + }, + { + name: "partial_binding_failure", + wantCode: run.ExitPartialError, + acs: &mockACSClient{createErr: &httpStatusError{code: 400}}, + co: &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("fail")}, + scanSetting: goodScanSetting(), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + code, _ := runWithCapture(t, baseConfig(), tc.acs, tc.co) + if code != tc.wantCode { + t.Errorf("IMP-ERR-003: %s: expected exit code %d, got %d", tc.name, tc.wantCode, code) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Tests: console output (IMP-CLI-020) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_020_ConsoleSummaryIncludesAllCounters verifies that the console +// summary contains discovered, created, skipped, and failed counts. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_ConsoleSummaryIncludesAllCounters(t *testing.T) { + // Two bindings: one creates, one is skipped because it already exists. + acsClient := &mockACSClient{ + listResult: []models.ACSConfigSummary{ + {ID: "existing-id", ScanName: "existing-scan"}, + }, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("new-scan"), + goodBinding("existing-scan"), // will be skipped + }, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + requiredPhrases := []string{ + "Discovered:", + "Created:", + "Skipped:", + "Failed:", + } + for _, phrase := range requiredPhrases { + if !strings.Contains(output, phrase) { + t.Errorf("IMP-CLI-020: output missing %q\nGot:\n%s", phrase, output) + } + } +} + +// TestIMP_CLI_020_DryRunLabelInSummary verifies that the summary includes +// the dry-run indicator. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_DryRunLabelInSummary(t *testing.T) { + cfg := baseConfig() + cfg.DryRun = true + + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) + + if !strings.Contains(output, "dry-run: yes") { + t.Errorf("IMP-CLI-020: expected 'dry-run: yes' in output, got:\n%s", output) + } +} + +// TestIMP_CLI_020_NonDryRunLabelInSummary verifies the non-dry-run label. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_NonDryRunLabelInSummary(t *testing.T) { + cfg := baseConfig() + cfg.DryRun = false + + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) + + if !strings.Contains(output, "dry-run: no") { + t.Errorf("IMP-CLI-020: expected 'dry-run: no' in output, got:\n%s", output) + } +} + +// TestIMP_CLI_020_CorrectCountsInSummary verifies that counts reported in the +// console summary are numerically correct. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_CorrectCountsInSummary(t *testing.T) { + // Arrange: 3 bindings discovered, 2 create, 1 skipped (existing). + acsClient := &mockACSClient{ + listResult: []models.ACSConfigSummary{ + {ID: "id-existing", ScanName: "scan-c"}, + }, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("scan-a"), + goodBinding("scan-b"), + goodBinding("scan-c"), // exists => skip + }, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if !strings.Contains(output, "Discovered: 3") { + t.Errorf("IMP-CLI-020: expected 'Discovered: 3' in output, got:\n%s", output) + } + if !strings.Contains(output, "Created: 2") { + t.Errorf("IMP-CLI-020: expected 'Created: 2' in output, got:\n%s", output) + } + if !strings.Contains(output, "Skipped: 1") { + t.Errorf("IMP-CLI-020: expected 'Skipped: 1' in output, got:\n%s", output) + } + if !strings.Contains(output, "Failed: 0") { + t.Errorf("IMP-CLI-020: expected 'Failed: 0' in output, got:\n%s", output) + } +} + +// --------------------------------------------------------------------------- +// Tests: API error => problem recorded (IMP-ERR-004) +// --------------------------------------------------------------------------- + +// TestIMP_ERR_004_APIErrorRecordedAsProblem verifies that a non-transient API +// error causes the binding to be skipped and recorded as a problem in the report. +// The report's failed count must reflect the failure. +// Requirements: IMP-ERR-004, IMP-CLI-022. +func TestIMP_ERR_004_APIErrorRecordedAsProblem(t *testing.T) { + acsClient := &mockACSClient{ + createErr: &httpStatusError{code: 400}, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("bad-scan")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.MaxRetries = 1 + + code, output := runWithCapture(t, cfg, acsClient, coClient) + + // Exit code must be partial failure (IMP-ERR-003). + if code != run.ExitPartialError { + t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } + // Console summary must show 1 failed (IMP-CLI-020). + if !strings.Contains(output, "Failed: 1") { + t.Errorf("IMP-ERR-004: expected 'Failed: 1' in output, got:\n%s", output) + } +} + +// TestIMP_ERR_004_MissingScanSettingRecordedAsProblem verifies that a missing +// ScanSetting is treated as a binding-level failure and recorded. +// Requirements: IMP-ERR-004, IMP-CLI-022. +func TestIMP_ERR_004_MissingScanSettingRecordedAsProblem(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("broken"), + goodBinding("ok"), + }, + scanSetting: goodScanSetting(), + } + + // Fail GetScanSetting on the first call (for "broken"), succeed on the second ("ok"). + coClient2 := &selectiveCOClientByOrder{ + base: coClient, + failAtCall: 1, + failErr: errors.New("ScanSetting not found"), + } + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient2) + + // Partial failure: one succeeded, one failed. + if code != run.ExitPartialError { + t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } + if !strings.Contains(output, "Failed: 1") { + t.Errorf("IMP-ERR-004: expected 'Failed: 1' in output, got:\n%s", output) + } + if !strings.Contains(output, "Created: 1") { + t.Errorf("IMP-ERR-004: expected 'Created: 1' in output, got:\n%s", output) + } +} + +// selectiveCOClientByBinding wraps COClient to fail GetScanSetting for a +// specific binding name by inspecting which binding is being processed. +// Since GetScanSetting doesn't receive the binding name, we use a counter-based +// approach: the first call goes to the first binding, etc. +type selectiveCOClientByOrder struct { + base *mockCOClient + failAtCall int // 1-based; call index that should fail + callCount int + failErr error +} + +func (s *selectiveCOClientByOrder) ListScanSettingBindings(ctx context.Context) ([]cofetch.ScanSettingBinding, error) { + return s.base.ListScanSettingBindings(ctx) +} + +func (s *selectiveCOClientByOrder) GetScanSetting(ctx context.Context, namespace, name string) (*cofetch.ScanSetting, error) { + s.callCount++ + if s.callCount == s.failAtCall { + return nil, s.failErr + } + return s.base.GetScanSetting(ctx, namespace, name) +} + +// --------------------------------------------------------------------------- +// Tests: dry-run mode (IMP-IDEM-004..007) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_007_DryRunNoCreates verifies that no ACS create calls are made +// in dry-run mode. +// Requirement: IMP-CLI-007. +func TestIMP_CLI_007_DryRunNoCreates(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.DryRun = true + + runWithCapture(t, cfg, acsClient, coClient) + + if acsClient.createCalls != 0 { + t.Errorf("IMP-CLI-007: expected 0 create calls in dry-run mode, got %d", acsClient.createCalls) + } +} + +// TestIMP_CLI_007_DryRunReportedAsCreate verifies that dry-run planned creates +// appear as "create" actions in the console summary. +// Requirement: IMP-CLI-007. +func TestIMP_CLI_007_DryRunReportedAsCreate(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.DryRun = true + + _, output := runWithCapture(t, cfg, acsClient, coClient) + + if !strings.Contains(output, "Created: 1") { + t.Errorf("IMP-CLI-007: expected 'Created: 1' (planned) in dry-run output, got:\n%s", output) + } +} diff --git a/scripts/compliance-operator-importer/specs/00-spec-process.md b/scripts/compliance-operator-importer/specs/00-spec-process.md new file mode 100644 index 0000000000000..beb6beaf159ef --- /dev/null +++ b/scripts/compliance-operator-importer/specs/00-spec-process.md @@ -0,0 +1,57 @@ +# 00 - Spec Process and Quality Gates + +## Purpose + +Translate product intent into executable behavior and contract specs before writing implementation code. + +## Community best-practice principles applied + +- **Behavior over implementation:** specs describe externally observable outcomes, not internal algorithms. +- **Single source of truth:** these specs replace ad-hoc task notes; code and tests must trace back to them. +- **Executable examples:** each important rule is captured as concrete scenario(s), preferably data-driven. +- **Contract-first boundaries:** external interfaces (CLI, ACS API payload shape, report output) are specified explicitly. +- **Low brittleness assertions:** tests assert fields that matter to consumers, avoid incidental details. + +## Requirement key words + +- `MUST`: mandatory behavior. +- `SHOULD`: strongly recommended unless justified deviation. +- `MAY`: optional. + +## Traceability model + +Every requirement gets an ID: +- `IMP-CLI-*` for CLI/config contract +- `IMP-MAP-*` for CO -> ACS mapping +- `IMP-IDEM-*` for idempotency/conflicts +- `IMP-ERR-*` for errors/retries/reporting +- `IMP-ACC-*` for acceptance/runtime checks + +Implementation and tests MUST annotate requirement IDs in comments or test names. + +## Spec execution strategy + +### Unit-level specs +- Parsing/validation (flags, env, config file). +- Mapping translation (CO objects -> ACS payload). +- Diff/idempotency logic. +- Retry classification. + +### Integration-level specs +- Kubernetes read path for CO resources. +- ACS API client interactions (`GET/POST/PUT`). +- Dry-run no-write guarantees. + +### Acceptance-level specs +- End-to-end execution against real cluster and ACS endpoint. +- Idempotency second-run no-op behavior. + +## Quality gates + +Before merging implementation: + +1. `MUST` requirements implemented. +2. All mapped scenarios have tests. +3. Dry-run validated as side-effect free. +4. Real-cluster acceptance checks pass. +5. No product runtime code path changes in Sensor/Central. diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md new file mode 100644 index 0000000000000..ad48be126d8cf --- /dev/null +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -0,0 +1,119 @@ +# 01 - CLI and Config Contract + +## Goal + +Define the importer interface so it can be implemented and tested predictably. + +## Inputs contract + +### Required inputs + +- **IMP-CLI-001**: importer MUST accept ACS endpoint (`--acs-endpoint` or `ACS_ENDPOINT`). +- **IMP-CLI-002**: importer MUST support ACS auth modes: + - token mode (default): bearer token from env var (`--acs-token-env`, default `ACS_API_TOKEN`), + - basic mode (optional): username/password. +- **IMP-CLI-003**: importer MUST support source cluster selection like `kubectl`: + - by default, use current kube context, + - optionally accept `--source-kubecontext ` to pick a specific context. +- **IMP-CLI-004**: importer MUST support namespace scope: + - `--co-namespace ` for single namespace, or + - `--co-all-namespaces` for cluster-wide scan. +- **IMP-CLI-005**: importer MUST accept one destination ACS cluster ID: + - `--acs-cluster-id `. + - all imported scan configs target this ACS cluster ID. + +### Optional inputs + +- **IMP-CLI-006**: importer mode is create-only for phase 1. +- **IMP-CLI-007**: `--dry-run` MUST disable all ACS write operations. +- **IMP-CLI-008**: `--report-json ` for structured report output. +- **IMP-CLI-009**: `--request-timeout ` default `30s`. +- **IMP-CLI-010**: `--max-retries ` default `5`, min `0`. +- **IMP-CLI-011**: `--ca-cert-file ` optional. +- **IMP-CLI-012**: `--insecure-skip-verify` default false; MUST require explicit flag. +- **IMP-CLI-023**: importer MUST accept `--acs-auth-mode` enum: + - `token` (default) + - `basic` +- **IMP-CLI-024**: for basic mode, importer MUST accept: + - `--acs-username` or `ACS_USERNAME` + - `--acs-password-env` (default `ACS_PASSWORD`) to read password from env var. +- **IMP-CLI-025**: importer MUST reject ambiguous auth config (for example, missing required values for chosen mode). + +## Preflight checks + +- **IMP-CLI-013**: endpoint MUST be `https://`. +- **IMP-CLI-014**: auth material for selected mode MUST be non-empty: + - token mode: resolved token is non-empty, + - basic mode: username and password are non-empty. +- **IMP-CLI-015**: importer MUST probe ACS auth with: + - `GET /v2/compliance/scan/configurations?pagination.limit=1` + - using selected auth mode, + - success only on HTTP 200. +- **IMP-CLI-016**: HTTP 401/403 at preflight MUST fail-fast with remediation message. +- **IMP-CLI-026**: when auth mode is not explicitly set, importer MUST default to `token`. + +## Output contract + +### Exit codes + +- **IMP-CLI-017**: `0` when run completed with no failed bindings. +- **IMP-CLI-018**: `1` for fatal preflight/config errors (no import attempted). +- **IMP-CLI-019**: `2` for partial success (some bindings failed). + +### Console summary + +- **IMP-CLI-020**: print totals: + - bindings discovered + - creates/skips/failures + - dry-run indicator + +### JSON report shape + +- **IMP-CLI-021**: when `--report-json` is set, write valid JSON with: + - `meta` (timestamp, dryRun, namespaceScope, mode=`create-only`) + - `counts` (discovered, create, skip, failed) + - `items[]`: + - `source` (`namespace`, `bindingName`, `scanSettingName`) + - `action` (`create|skip|fail`) + - `reason` + - `attempts` + - `acsScanConfigId` (if known) + - `error` (if failed) + - `problems[]`: + - `severity` (`error|warning`) + - `category` (`input|mapping|conflict|auth|api|retry|validation`) + - `resourceRef` (`namespace/name` or synthetic ref for non-resource errors) + - `description` (what happened) + - `fixHint` (how to fix) + - `skipped` (boolean; true when resource was skipped) + +- **IMP-CLI-022**: whenever any problem occurs for a resource, importer MUST: + - skip that resource, + - append one `problems[]` entry with `description` and `fixHint`, + - continue processing other resources. + +## Existing ACS config behavior (create-only) + +- **IMP-IDEM-001**: if `scanName` already exists in ACS, importer MUST skip that source resource. +- **IMP-IDEM-002**: skipped-existing resources MUST be added to `problems[]` with category `conflict` and a fix hint. +- **IMP-IDEM-003**: create-only phase MUST NOT send `PUT` updates. + +Example minimal report skeleton: + +```json +{ + "meta": { + "dryRun": true, + "namespaceScope": "openshift-compliance", + "mode": "create-only" + }, + "counts": { + "discovered": 2, + "create": 1, + "skip": 1, + "failed": 0 + }, + "items": [], + "problems": [] +} +``` diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature new file mode 100644 index 0000000000000..09c780462f299 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -0,0 +1,74 @@ +Feature: Map Compliance Operator scheduled scan resources to ACS scan configurations + As an operator + I want importer behavior defined by examples + So implementation can be verified against stable expected outcomes + + Background: + Given ACS endpoint and token preflight succeeded + And the importer can read compliance.openshift.io resources + + @mapping @name + Scenario: Use ScanSettingBinding name as scanName + Given a ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the binding references ScanSetting "default-auto-apply" + And the binding references profiles: + | name | kind | + | ocp4-cis-node | Profile | + | ocp4-cis-master | Profile | + | my-tailored-profile | TailoredProfile | + When the importer builds the ACS payload + Then payload.scanName MUST equal "cis-weekly" # IMP-MAP-001 + And payload.scanConfig.profiles MUST equal: + | value | + | my-tailored-profile | + | ocp4-cis-master | + | ocp4-cis-node | # sorted, deduped + + @mapping @profiles + Scenario: Default missing profile kind to Profile + Given a ScanSettingBinding profile reference "custom-x" with no kind + When the importer resolves profile references + Then the profile reference kind MUST be treated as "Profile" # IMP-MAP-002 + And the resulting ACS profile name list MUST include "custom-x" + + @mapping @schedule + Scenario: Convert ScanSetting schedule into ACS schedule + Given ScanSetting "daily-scan" has complianceSuiteSettings.schedule "0 0 * * *" + And ScanSettingBinding "daily-cis" references "daily-scan" + When the importer maps schedule fields + Then payload.scanConfig.oneTimeScan MUST be false # IMP-MAP-003 + And payload.scanConfig.scanSchedule MUST be present # IMP-MAP-004 + + @mapping @description + Scenario: Build helpful description without ownership marker + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + When the importer builds payload description + Then payload.scanConfig.description MUST contain "Imported from CO ScanSettingBinding openshift-compliance/cis-weekly" # IMP-MAP-005 + And payload.scanConfig.description SHOULD include settings reference context # IMP-MAP-006 + + @mapping @clusters + Scenario: Use single destination ACS cluster ID + Given importer flag --acs-cluster-id is "cluster-a" + When the importer builds the destination payload + Then payload.clusters MUST equal: + | value | + | cluster-a | # IMP-MAP-007 + + @validation @mapping + Scenario: Missing ScanSetting reference fails only that binding + Given ScanSettingBinding "broken-binding" references ScanSetting "does-not-exist" + When the importer processes all discovered bindings + Then "broken-binding" MUST be marked failed # IMP-MAP-008 + And problems list MUST include an entry for "broken-binding" # IMP-MAP-009 + And that problem entry MUST include a fix hint # IMP-MAP-010 + And other valid bindings MUST still be processed # IMP-MAP-011 + + @mapping @schedule @problems + Scenario: Invalid schedule is collected as problem and skipped + Given ScanSetting "bad-schedule" has complianceSuiteSettings.schedule "every day at noon" + And ScanSettingBinding "broken-schedule-binding" references "bad-schedule" + When the importer maps schedule fields + Then "broken-schedule-binding" MUST be skipped # IMP-MAP-012 + And problems list MUST include category "mapping" # IMP-MAP-013 + And problem description MUST mention schedule conversion failed # IMP-MAP-014 + And problem fix hint MUST suggest using a valid cron expression # IMP-MAP-015 diff --git a/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature new file mode 100644 index 0000000000000..abbec98d476fe --- /dev/null +++ b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature @@ -0,0 +1,74 @@ +Feature: Create-only idempotency dry-run behavior and retry policy + As an operator + I want safe reruns and predictable failure handling + So importer usage is low risk in production environments + + Background: + Given ACS endpoint and token preflight succeeded + And desired payload for source "openshift-compliance/cis-weekly" is computed + + @idempotency + Scenario: Create when scanName does not exist + Given ACS has no scan configuration with scanName "cis-weekly" + When importer executes in apply mode + Then importer MUST send POST /v2/compliance/scan/configurations # IMP-IDEM-001 + And action MUST be "create" + + @idempotency + Scenario: Skip when scanName already exists + Given ACS has scan configuration with scanName "cis-weekly" + When importer executes in apply mode + Then importer MUST NOT send PUT # IMP-IDEM-003 + And action MUST be "skip" + And reason MUST include "already exists" + And problems list MUST include conflict category # IMP-IDEM-002 + + @dryrun + Scenario: Dry-run performs no writes + Given importer is started with --dry-run + And at least one action would be create in apply mode + When importer completes + Then importer MUST NOT send POST # IMP-IDEM-004 + And importer MUST NOT send PUT # IMP-IDEM-005 + And planned actions MUST be included in report # IMP-IDEM-006 + And problems list MUST still be populated for problematic resources # IMP-IDEM-007 + + @retry @transient + Scenario Outline: Retry transient ACS write failures + Given an ACS create operation returns HTTP for first 2 attempts + And the 3rd attempt succeeds + When importer executes in apply mode + Then operation MUST be retried with backoff # IMP-ERR-001 + And total attempts MUST be 3 + + Examples: + | status | + | 429 | + | 502 | + | 503 | + | 504 | + + @retry @nontransient + Scenario Outline: Do not retry non-transient errors + Given an ACS create operation returns HTTP + When importer executes in apply mode + Then operation MUST NOT be retried # IMP-ERR-002 + And the item MUST be skipped and recorded as a problem # IMP-ERR-004 + + Examples: + | status | + | 400 | + | 401 | + | 403 | + | 404 | + + @exitcodes + Scenario Outline: Exit code reflects outcome category + Given importer run ends with outcome "" + Then process exit code MUST be # IMP-ERR-003 + + Examples: + | outcome | code | + | all successful | 0 | + | fatal preflight failure | 1 | + | partial binding failures | 2 | diff --git a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md new file mode 100644 index 0000000000000..4adeaa012d51c --- /dev/null +++ b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md @@ -0,0 +1,160 @@ +# 04 - Validation and Acceptance Spec + +This document is the acceptance test contract for real-cluster validation. + +## Preconditions + +- `kubectl`, `curl`, `jq` installed. +- Logged into target cluster containing Compliance Operator resources. +- ACS endpoint reachable from runner. +- Importer binary built locally. + +Set environment: + +```bash +export ACS_ENDPOINT="https://central.stackrox.example.com:443" +export ACS_API_TOKEN="" +export ACS_USERNAME="" +export ACS_PASSWORD="" +export CO_NAMESPACE="openshift-compliance" +export IMPORTER_BIN="./bin/co-acs-scan-importer" +export ACS_CLUSTER_ID="" +``` + +## Acceptance checks + +### A1 - CO resource discovery + +- **IMP-ACC-001**: importer test run MUST begin only if required CO resource types are listable. + +Commands: + +```bash +kubectl get scansettingbindings.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get scansettings.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get profiles.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get tailoredprofiles.compliance.openshift.io -n "${CO_NAMESPACE}" || true +``` + +Pass condition: + +- first 3 commands succeed (exit 0). + +### A2 - ACS auth preflight + +- **IMP-ACC-002**: ACS token and endpoint MUST pass read probe. +- **IMP-ACC-013**: optional basic-auth mode MUST pass read probe in local/dev environments. + +Command: + +```bash +curl -ksS \ + -H "Authorization: Bearer ${ACS_API_TOKEN}" \ + "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . +``` + +Pass condition: + +- command returns valid JSON and does not contain auth error. + +Optional local/dev basic-auth probe: + +```bash +curl -ksS \ + -u "${ACS_USERNAME}:${ACS_PASSWORD}" \ + "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . +``` + +### A3 - Dry-run side-effect safety + +- **IMP-ACC-003**: dry-run MUST produce no writes. + +Command: + +```bash +"${IMPORTER_BIN}" \ + --acs-endpoint "${ACS_ENDPOINT}" \ + --acs-token-env ACS_API_TOKEN \ + --co-namespace "${CO_NAMESPACE}" \ + --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --dry-run \ + --report-json "/tmp/co-acs-import-dryrun.json" +``` + +Pass conditions: + +- exit code is `0` or `2`, +- `/tmp/co-acs-import-dryrun.json` exists and is valid JSON, +- actions listed as planned only (no applied create/update markers), +- `problems[]` is present and contains `description` + `fixHint` for each problematic resource. + +### A4 - Apply creates expected configs (create-only) + +- **IMP-ACC-004**: apply mode MUST create missing target ACS configs. + +Command: + +```bash +"${IMPORTER_BIN}" \ + --acs-endpoint "${ACS_ENDPOINT}" \ + --acs-token-env ACS_API_TOKEN \ + --co-namespace "${CO_NAMESPACE}" \ + --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --report-json "/tmp/co-acs-import-apply.json" +``` + +Verify: + +```bash +curl -ksS \ + -H "Authorization: Bearer ${ACS_API_TOKEN}" \ + "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=200" | \ + jq '.configurations[] | {id, scanName, profiles: .scanConfig.profiles, description: .scanConfig.description}' +``` + +Pass conditions: + +- expected imported scan names exist, +- profile lists match expected binding mappings. + +### A5 - Idempotency on second run + +- **IMP-ACC-005**: second run with same inputs MUST be no-op. + +Command: + +```bash +"${IMPORTER_BIN}" \ + --acs-endpoint "${ACS_ENDPOINT}" \ + --acs-token-env ACS_API_TOKEN \ + --co-namespace "${CO_NAMESPACE}" \ + --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --report-json "/tmp/co-acs-import-second-run.json" +``` + +Pass conditions: + +- report shows skip actions for already-existing scan names, +- no net changes in ACS list output. + +### A6 - Existing config behavior (create-only) + +- **IMP-ACC-006**: existing scan names MUST be skipped and recorded in `problems[]`. + +Procedure: + +1. Manually modify one imported ACS scan config (name unchanged). +2. Re-run importer. +3. Verify that modified existing config is not updated and is captured as skipped conflict. + +### A7 - Failure paths + +- **IMP-ACC-007**: invalid token MUST fail-fast with exit code `1`. +- **IMP-ACC-008**: missing referenced ScanSetting MUST fail only that binding (partial run exit code `2` when others succeed). +- **IMP-ACC-009**: transient ACS failures MUST follow retry policy and record attempt counts. +- **IMP-ACC-012**: all per-resource problems MUST be emitted in `problems[]` with remediation hint. + +## Non-goal compliance checks + +- **IMP-ACC-010**: no code changes in Sensor/Central runtime paths are required to run importer. +- **IMP-ACC-011**: importer MUST not mutate Compliance Operator resources. diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md new file mode 100644 index 0000000000000..6b32f278b34f1 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -0,0 +1,18 @@ +# 05 - Traceability Matrix + +Use this matrix to ensure complete implementation coverage. + +|Requirement ID|Spec source|Test level|Notes| +|---|---|---|---| +|IMP-CLI-001..026|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, token/basic auth modes, create-only report + problems list| +|IMP-MAP-001..015|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule handling, skip+problem behavior| +|IMP-IDEM-001..007|`03-idempotency-dry-run-retries.feature`|Unit + integration|Create-only idempotency and dry-run reporting| +|IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| +|IMP-ACC-001..013|`04-validation-and-acceptance.md`|Acceptance|Real cluster and ACS verification| + +## Coverage rule + +For each requirement ID, implementation PR MUST include: + +- at least one test case name containing that ID, and +- one short note in PR description summarizing pass evidence for that ID family. diff --git a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md new file mode 100644 index 0000000000000..bb058d4e086fe --- /dev/null +++ b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md @@ -0,0 +1,187 @@ +# 06 - Implementation Backlog (Spec + Agentic Execution) + +This backlog translates specs into delivery slices with strict requirement traceability. + +## Working rules + +- Implement slices in order. +- Implement production code in Go for Phase 1 (no bash/shell implementation). +- For each slice: + 1. write/enable failing tests first, + 2. implement minimum code to pass, + 3. run tests and capture evidence, + 4. list fulfilled requirement IDs in PR notes. +- Keep each slice in its own PR when possible. + +## Slice A - CLI, config, and preflight + +### A Goal + +Provide a reliable entrypoint with strict validation and preflight checks. + +### A Requirement IDs + +- `IMP-CLI-001..016` +- `IMP-CLI-023..026` + +### A Implementation targets (suggested) + +- `scripts/compliance-operator-importer/cmd/importer/main.go` +- `scripts/compliance-operator-importer/internal/config/config.go` +- `scripts/compliance-operator-importer/internal/preflight/preflight.go` + +### A Tests to add + +- `internal/config/config_test.go` +- `internal/preflight/preflight_test.go` + +### A Acceptance signal + +- Valid flags/env parse and preflight probe behavior with correct exit pathing. +- Both auth modes behave correctly: + - token mode default path, + - basic mode local/dev path. + +### A Agent prompt seed + +- "Implement Slice A for create-only importer. Start with tests for IMP-CLI-001..016 and IMP-CLI-023..026, then implement CLI/config/preflight with HTTPS and both token/basic auth mode support." + +## Slice B - CO discovery and mapping core + +### B Goal + +Discover CO resources and map into ACS create payloads. + +### B Requirement IDs + +- `IMP-MAP-001..015` + +### B Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/cofetch/client.go` +- `scripts/compliance-operator-importer/internal/mapping/mapping.go` +- `scripts/compliance-operator-importer/internal/mapping/schedule.go` + +### B Tests to add + +- `internal/mapping/mapping_test.go` +- `internal/mapping/schedule_test.go` + +### B Acceptance signal + +- Deterministic payload creation from SSB/ScanSetting/Profile inputs. +- Invalid schedule path produces skip-worthy error with fix hint text. + +### B Agent prompt seed + +- "Implement Slice B with tests first for IMP-MAP-001..015. Ensure missing profile kind defaults to Profile and invalid schedule becomes skip+problem." + +## Slice C - ACS create-only writer and idempotency + +### C Goal + +Create missing configs, skip existing names, never update. + +### C Requirement IDs + +- `IMP-IDEM-001..007` + +### C Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/acs/client.go` +- `scripts/compliance-operator-importer/internal/reconcile/create_only.go` + +### C Tests to add + +- `internal/reconcile/create_only_test.go` +- `internal/acs/client_test.go` + +### C Acceptance signal + +- Existing `scanName` always skipped with conflict problem. +- No code path emits `PUT`. + +### C Agent prompt seed + +- "Implement Slice C as strict create-only. Test IMP-IDEM-001..007 first, especially: existing scanName => skip + conflict problem; never call PUT." + +## Slice D - Problem list, reporting, and exit codes + +### D Goal + +Centralize error handling/reporting and enforce run outcomes. + +### D Requirement IDs + +- `IMP-CLI-017..022` +- `IMP-ERR-001..004` + +### D Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/problems/problems.go` +- `scripts/compliance-operator-importer/internal/report/report.go` +- `scripts/compliance-operator-importer/internal/run/run.go` + +### D Tests to add + +- `internal/problems/problems_test.go` +- `internal/report/report_test.go` +- `internal/run/run_test.go` + +### D Acceptance signal + +- `problems[]` always emitted for problematic resources with `description` + `fixHint`. +- exit codes map correctly to all-success/fatal/partial outcomes. + +### D Agent prompt seed + +- "Implement Slice D with tests first for IMP-CLI-017..022 and IMP-ERR-001..004. Ensure problem list and exit code semantics exactly match spec." + +## Slice E - End-to-end acceptance and tooling + +### E Goal + +Make real-cluster validation repeatable and scriptable. + +### E Requirement IDs + +- `IMP-ACC-001..012` + +### E Implementation targets (suggested) + +- `scripts/compliance-operator-importer/hack/acceptance-run.sh` +- `scripts/compliance-operator-importer/hack/check-report.sh` + +### E Tests/checks to add + +- lightweight script tests where practical. +- documented manual acceptance evidence for cluster runs. + +### E Acceptance signal + +- all commands/checks in `specs/04-validation-and-acceptance.md` are reproducible. +- include at least one real-cluster proof run against a live ACS endpoint (for example localhost:8443) with artifact output. + +### E Agent prompt seed + +- "Implement Slice E automation helpers for IMP-ACC-001..012 and produce run artifacts paths for dry-run/apply/second-run checks." + +## Cross-slice conventions + +- Requirement IDs must appear in test names or comments. +- Keep mapping logic side-effect free where possible. +- Wrap external clients (k8s/ACS) behind interfaces for deterministic tests. +- Never mutate CO resources. +- Keep create-only invariant explicit (guard rail test that fails on any `PUT` path). +- Verify behavior with real-world examples early and often, not only mocked tests. +- Capture smoke-test commands and outputs in PR notes for traceability. + +## Suggested execution order and ownership + +1. Slice A (platform/entrypoint) +2. Slice B (domain mapping) +3. Slice C (ACS reconciliation) +4. Slice D (reporting + run orchestration) +5. Slice E (acceptance automation) + +One agent per slice is ideal; if sequential, complete one slice fully before next. diff --git a/tools/roxvet/analyzers/validateimports/analyzer.go b/tools/roxvet/analyzers/validateimports/analyzer.go index f13d45fb6a1c7..75c69f3ac2a91 100644 --- a/tools/roxvet/analyzers/validateimports/analyzer.go +++ b/tools/roxvet/analyzers/validateimports/analyzer.go @@ -386,6 +386,12 @@ func verifyImportsFromAllowedPackagesOnly(pass *analysis.Pass, imports []*ast.Im } func run(pass *analysis.Pass) (interface{}, error) { + // Skip packages that belong to a different Go module entirely (e.g. sub-modules + // in the repository whose import path does not start with the rox module prefix). + // validateimports only enforces cross-package import rules within the rox module. + if !strings.HasPrefix(pass.Pkg.Path(), roxPrefix) { + return nil, nil + } root, valid, err := getRoot(pass.Pkg.Path()) if err != nil { pass.Reportf(token.NoPos, "couldn't find valid root: %v", err) From 5305bb520b79479e10ef0d31d143235ce011e741 Mon Sep 17 00:00:00 2001 From: Guzman Date: Tue, 24 Mar 2026 23:47:37 +0100 Subject: [PATCH 02/24] fix(co-importer): correct CO resource field paths in parser ScanSettingBinding: profiles and settingsRef are top-level fields, not nested under spec (spec is always empty in the actual CR). ScanSetting: schedule is a top-level field, not nested under complianceSuiteSettings.schedule as the spec document assumed. Found during live smoke-test against the cluster. Co-Authored-By: Claude Sonnet 4.6 --- .../internal/cofetch/client.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go index caf5e8f27a441..5cd1cc7761ea0 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/client.go +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -103,11 +103,12 @@ func parseScanSettingBinding(obj map[string]interface{}) (ScanSettingBinding, er name, _ := meta["name"].(string) namespace, _ := meta["namespace"].(string) - spec, _ := obj["spec"].(map[string]interface{}) + // profiles and settingsRef are top-level fields in the ScanSettingBinding + // resource (not nested under spec). spec is always empty in practice. // Parse profiles list into []NamedObjectReference. var profiles []NamedObjectReference - if rawProfiles, ok := spec["profiles"].([]interface{}); ok { + if rawProfiles, ok := obj["profiles"].([]interface{}); ok { for _, rp := range rawProfiles { pm, ok := rp.(map[string]interface{}) if !ok { @@ -123,7 +124,7 @@ func parseScanSettingBinding(obj map[string]interface{}) (ScanSettingBinding, er // Parse settingsRef as a NamedObjectReference. var settingsRef *NamedObjectReference - if sr, ok := spec["settingsRef"].(map[string]interface{}); ok { + if sr, ok := obj["settingsRef"].(map[string]interface{}); ok { settingsRef = &NamedObjectReference{ Name: stringField(sr, "name"), Kind: stringField(sr, "kind"), @@ -157,11 +158,8 @@ func parseScanSetting(obj map[string]interface{}) (*ScanSetting, error) { name, _ := meta["name"].(string) namespace, _ := meta["namespace"].(string) - // Schedule is nested under complianceSuiteSettings.schedule. - schedule := "" - if css, ok := obj["complianceSuiteSettings"].(map[string]interface{}); ok { - schedule, _ = css["schedule"].(string) - } + // schedule is a top-level field in the ScanSetting resource. + schedule, _ := obj["schedule"].(string) if name == "" { return nil, errors.New("ScanSetting has no name") From c484ac9a90537ced2689e4450fb6915485dceab0 Mon Sep 17 00:00:00 2001 From: Guzman Date: Wed, 25 Mar 2026 09:34:23 +0100 Subject: [PATCH 03/24] fix(co-importer): correct CO resource field paths in specs ScanSetting.schedule is a top-level field, not nested under complianceSuiteSettings.schedule as the spec originally assumed. ScanSettingBinding.profiles is also top-level, not under spec. Discovered during live smoke-test against the cluster. Co-Authored-By: Claude Sonnet 4.6 --- scripts/compliance-operator-importer/DECISIONS.md | 2 +- .../specs/02-co-to-acs-mapping.feature | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/compliance-operator-importer/DECISIONS.md b/scripts/compliance-operator-importer/DECISIONS.md index ea959f917aa0b..cd23fb693a5c1 100644 --- a/scripts/compliance-operator-importer/DECISIONS.md +++ b/scripts/compliance-operator-importer/DECISIONS.md @@ -38,7 +38,7 @@ This document freezes Phase 1 behavior. Any deviation requires updating this fil - Basic mode uses username/password inputs and the same preflight endpoint checks. 8. **Profile kind fallback** - - Missing `ScanSettingBinding.spec.profiles[].kind` defaults to `Profile`. + - Missing `ScanSettingBinding.profiles[].kind` defaults to `Profile` (profiles is a top-level field, not under spec). 9. **Schedule conversion** - Convert valid CO cron to ACS schedule fields. diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature index 09c780462f299..ccc5d737616ca 100644 --- a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -33,7 +33,7 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat @mapping @schedule Scenario: Convert ScanSetting schedule into ACS schedule - Given ScanSetting "daily-scan" has complianceSuiteSettings.schedule "0 0 * * *" + Given ScanSetting "daily-scan" has schedule "0 0 * * *" And ScanSettingBinding "daily-cis" references "daily-scan" When the importer maps schedule fields Then payload.scanConfig.oneTimeScan MUST be false # IMP-MAP-003 @@ -65,7 +65,7 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat @mapping @schedule @problems Scenario: Invalid schedule is collected as problem and skipped - Given ScanSetting "bad-schedule" has complianceSuiteSettings.schedule "every day at noon" + Given ScanSetting "bad-schedule" has schedule "every day at noon" And ScanSettingBinding "broken-schedule-binding" references "bad-schedule" When the importer maps schedule fields Then "broken-schedule-binding" MUST be skipped # IMP-MAP-012 From 6bb0acda29bd82e726b8988bbab09ec01f53153c Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 00:56:53 +0100 Subject: [PATCH 04/24] feat(co-importer): align CLI flags and env vars with roxctl conventions (Slice H) Rename flags/env vars to match roxctl patterns: --endpoint/ROX_ENDPOINT, ROX_API_TOKEN, ROX_ADMIN_PASSWORD, ROX_ADMIN_USER. Remove explicit --acs-auth-mode in favor of auto-inference from which env var is set. Unify --acs-cluster-id and --source-kubecontext into a single --cluster flag that accepts UUID, name, or ctx= overrides. Add --overwrite-existing for update-in-place (IMP-IDEM-008/009). Add --username with default "admin". Specs updated to reflect the new contract. Extensive unit tests (~60 cases) freeze the new behavior including edge cases. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../cmd/importer/main.go | 80 +- .../internal/acs/client.go | 120 ++- .../internal/acs/client_test.go | 23 +- .../internal/cofetch/client.go | 59 +- .../internal/config/config.go | 380 ++++++-- .../config/config_multicluster_test.go | 154 +++ .../internal/config/config_test.go | 905 +++++++++++++++--- .../internal/models/models.go | 49 +- .../internal/preflight/preflight.go | 42 +- .../internal/preflight/preflight_test.go | 38 +- .../internal/reconcile/create_only.go | 137 ++- .../internal/reconcile/create_only_test.go | 145 ++- .../internal/report/report.go | 11 +- .../internal/run/cluster_source.go | 207 ++++ .../internal/run/run.go | 79 +- .../internal/run/run_test.go | 69 +- .../specs/01-cli-and-config-contract.md | 96 +- .../specs/02-co-to-acs-mapping.feature | 81 +- .../03-idempotency-dry-run-retries.feature | 19 +- .../specs/04-validation-and-acceptance.md | 75 +- .../specs/05-traceability-matrix.md | 8 +- .../specs/06-implementation-backlog.md | 152 ++- 22 files changed, 2363 insertions(+), 566 deletions(-) create mode 100644 scripts/compliance-operator-importer/internal/config/config_multicluster_test.go create mode 100644 scripts/compliance-operator-importer/internal/run/cluster_source.go diff --git a/scripts/compliance-operator-importer/cmd/importer/main.go b/scripts/compliance-operator-importer/cmd/importer/main.go index cd17554751a45..84078bb2920e9 100644 --- a/scripts/compliance-operator-importer/cmd/importer/main.go +++ b/scripts/compliance-operator-importer/cmd/importer/main.go @@ -1,14 +1,8 @@ // Binary co-acs-scan-importer reads Compliance Operator ScanSettingBinding -// resources from a Kubernetes cluster and creates equivalent ACS compliance -// scan configurations through the ACS v2 API. +// resources from one or more Kubernetes clusters and creates equivalent ACS +// compliance scan configurations through the ACS v2 API. // -// Usage: -// -// co-acs-scan-importer \ -// --acs-endpoint https://central.example.com \ -// --co-namespace openshift-compliance \ -// --acs-cluster-id \ -// [--dry-run] [--report-json /tmp/report.json] +// Run with --help for full usage information and examples. package main import ( @@ -21,6 +15,7 @@ import ( "github.com/stackrox/co-acs-importer/internal/config" "github.com/stackrox/co-acs-importer/internal/preflight" "github.com/stackrox/co-acs-importer/internal/run" + "github.com/stackrox/co-acs-importer/internal/status" ) func main() { @@ -30,29 +25,88 @@ func main() { func mainWithCode() int { cfg, err := config.ParseAndValidate(os.Args[1:]) if err != nil { + if err == config.ErrHelpRequested { + return 0 + } fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) return run.ExitFatalError } + s := status.New() ctx := context.Background() - // IMP-CLI-015, IMP-CLI-016: preflight check before any resource processing. + // Preflight check before any resource processing. + s.Stage("Preflight", "checking ACS connectivity and credentials") if err := preflight.Run(ctx, cfg); err != nil { - fmt.Fprintf(os.Stderr, "FATAL: preflight failed: %v\n", err) + s.Failf("%v", err) return run.ExitFatalError } + s.OKf("ACS endpoint is reachable at %s", cfg.ACSEndpoint) acsClient, err := acs.NewClient(cfg) if err != nil { - fmt.Fprintf(os.Stderr, "FATAL: failed to create ACS client: %v\n", err) + s.Failf("failed to create ACS client: %v", err) return run.ExitFatalError } + // Resolve --cluster name lookup if needed (IMP-MAP-022). + if cfg.ClusterNameLookup != "" { + s.Stagef("Resolve", "looking up cluster %q in ACS", cfg.ClusterNameLookup) + clusters, err := acsClient.ListClusters(ctx) + if err != nil { + s.Failf("failed to list ACS clusters: %v", err) + return run.ExitFatalError + } + var found bool + for _, c := range clusters { + if c.Name == cfg.ClusterNameLookup { + cfg.ACSClusterID = c.ID + found = true + break + } + } + if !found { + s.Failf("cluster %q not found in ACS", cfg.ClusterNameLookup) + return run.ExitFatalError + } + s.OKf("resolved %q → %s", cfg.ClusterNameLookup, cfg.ACSClusterID) + } + + // Multi-cluster mode or single-cluster with auto-discovery both use + // BuildClusterSources to resolve cluster IDs and create CO clients. + isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 + + if isMultiClusterMode || cfg.AutoDiscoverClusterID { + if isMultiClusterMode { + s.Stagef("Discovery", "resolving %d cluster sources", len(cfg.Kubeconfigs)+len(cfg.Kubecontexts)) + } else { + s.Stage("Discovery", "auto-discovering ACS cluster ID from current context") + } + sources, err := run.BuildClusterSources(ctx, cfg, acsClient) + if err != nil { + s.Failf("%v", err) + return run.ExitFatalError + } + for _, src := range sources { + s.OKf("%s → %s", src.Label, src.ACSClusterID) + } + + if isMultiClusterMode { + return run.NewRunner(cfg, acsClient, nil).RunMultiCluster(ctx, sources) + } + // Single-cluster with auto-discovered ID: use the resolved source. + cfg.ACSClusterID = sources[0].ACSClusterID + return run.NewRunner(cfg, acsClient, sources[0].COClient).Run(ctx) + } + + // Single-cluster mode with explicit --cluster UUID. + s.Stagef("Setup", "using cluster %s", cfg.ACSClusterID) coClient, err := cofetch.NewClient(cfg) if err != nil { - fmt.Fprintf(os.Stderr, "FATAL: failed to create CO client: %v\n", err) + s.Failf("failed to create CO client: %v", err) return run.ExitFatalError } + s.OK("CO client ready") return run.NewRunner(cfg, acsClient, coClient).Run(ctx) } diff --git a/scripts/compliance-operator-importer/internal/acs/client.go b/scripts/compliance-operator-importer/internal/acs/client.go index 1eea9d296eb2f..749e6693f1255 100644 --- a/scripts/compliance-operator-importer/internal/acs/client.go +++ b/scripts/compliance-operator-importer/internal/acs/client.go @@ -1,6 +1,4 @@ // Package acs provides an HTTP client for the ACS compliance scan configuration API. -// -// create-only: PUT is never called in Phase 1 package acs import ( @@ -19,7 +17,6 @@ import ( ) // client is the concrete implementation of models.ACSClient. -// It issues only GET and POST requests. No PUT method exists in Phase 1. type client struct { httpClient *http.Client baseURL string @@ -33,8 +30,6 @@ type client struct { // Authentication: // - token mode: "Authorization: Bearer " (token resolved from cfg.TokenEnv) // - basic mode: HTTP Basic auth (cfg.Username + password from cfg.PasswordEnv) -// -// create-only: PUT is never called in Phase 1 func NewClient(cfg *models.Config) (models.ACSClient, error) { tlsCfg, err := buildTLSConfig(cfg) if err != nil { @@ -86,16 +81,12 @@ func buildTLSConfig(cfg *models.Config) (*tls.Config, error) { func (c *client) addAuth(req *http.Request) error { switch c.cfg.AuthMode { case models.AuthModeBasic: - password := os.Getenv(c.cfg.PasswordEnv) + password := os.Getenv("ROX_ADMIN_PASSWORD") req.SetBasicAuth(c.cfg.Username, password) default: // token mode - tokenEnv := c.cfg.TokenEnv - if tokenEnv == "" { - tokenEnv = "ACS_API_TOKEN" - } - token := os.Getenv(tokenEnv) + token := os.Getenv("ROX_API_TOKEN") if token == "" { - return fmt.Errorf("acs: token env var %q is empty", tokenEnv) + return errors.New("acs: ROX_API_TOKEN is empty") } req.Header.Set("Authorization", "Bearer "+token) } @@ -180,10 +171,7 @@ type complianceScanConfigurationResponse struct { // CreateScanConfiguration sends POST /v2/compliance/scan/configurations and returns // the ID of the newly created configuration. // -// IMPORTANT: This method MUST use POST only. No PUT is called anywhere in Phase 1. -// Implements IMP-IDEM-001, IMP-IDEM-003. -// -// create-only: PUT is never called in Phase 1 +// Implements IMP-IDEM-001. func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACSCreatePayload) (string, error) { body, err := json.Marshal(payload) if err != nil { @@ -191,7 +179,6 @@ func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACS } url := c.baseURL + "/v2/compliance/scan/configurations" - // POST only - never PUT - create-only Phase 1 req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return "", fmt.Errorf("acs: create request: %w", err) @@ -222,7 +209,104 @@ func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACS return created.ID, nil } -// HTTPError is returned by CreateScanConfiguration when the server responds with +// UpdateScanConfiguration sends PUT /v2/compliance/scan/configurations/{id} to update +// an existing scan configuration. +// +// Implements IMP-IDEM-008. +func (c *client) UpdateScanConfiguration(ctx context.Context, id string, payload models.ACSCreatePayload) error { + body, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("acs: marshalling update payload: %w", err) + } + + url := c.baseURL + "/v2/compliance/scan/configurations/" + id + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("acs: update request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("acs: update scan configuration: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("PUT /v2/compliance/scan/configurations/%s returned HTTP %d", id, resp.StatusCode)} + } + + return nil +} + +// clusterStatus is used to parse the status field from a cluster response. +type clusterStatus struct { + ProviderMetadata struct { + Cluster struct { + ID string `json:"id"` // OpenShift cluster ID or other provider cluster ID + } `json:"cluster"` + } `json:"providerMetadata"` +} + +// clusterResponse represents a single cluster in the ACS API response. +type clusterResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Status clusterStatus `json:"status"` +} + +// clustersListResponse matches GET /v1/clusters. +type clustersListResponse struct { + Clusters []clusterResponse `json:"clusters"` +} + +// ListClusters returns all clusters managed by ACS by calling: +// +// GET /v1/clusters +// +// Used for cluster ID discovery (IMP-MAP-017, IMP-MAP-018, IMP-MAP-007). +func (c *client) ListClusters(ctx context.Context) ([]models.ACSClusterInfo, error) { + url := c.baseURL + "/v1/clusters" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("acs: list clusters request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("acs: list clusters: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("acs: list clusters: HTTP %d", resp.StatusCode) + } + + var listResp clustersListResponse + if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { + return nil, fmt.Errorf("acs: decoding clusters response: %w", err) + } + + result := make([]models.ACSClusterInfo, 0, len(listResp.Clusters)) + for _, c := range listResp.Clusters { + result = append(result, models.ACSClusterInfo{ + ID: c.ID, + Name: c.Name, + ProviderClusterID: c.Status.ProviderMetadata.Cluster.ID, + }) + } + return result, nil +} + +// HTTPError is returned by CreateScanConfiguration and UpdateScanConfiguration when the server responds with // a non-success HTTP status. The reconciler uses StatusCode() to decide whether // to retry (transient: 429,502,503,504) or abort (non-transient: 400,401,403,404). type HTTPError struct { diff --git a/scripts/compliance-operator-importer/internal/acs/client_test.go b/scripts/compliance-operator-importer/internal/acs/client_test.go index 45177b7cd38bd..a12ef7b2d7522 100644 --- a/scripts/compliance-operator-importer/internal/acs/client_test.go +++ b/scripts/compliance-operator-importer/internal/acs/client_test.go @@ -19,7 +19,6 @@ func newTestConfig(serverURL string) *models.Config { return &models.Config{ ACSEndpoint: serverURL, AuthMode: models.AuthModeToken, - TokenEnv: "ACS_API_TOKEN", RequestTimeout: 5 * time.Second, MaxRetries: 3, InsecureSkipVerify: true, @@ -52,7 +51,7 @@ func TestPreflight_200_ReturnsNil(t *testing.T) { })) defer srv.Close() - t.Setenv("ACS_API_TOKEN", "test-token") + t.Setenv("ROX_API_TOKEN", "test-token") cfg := newTestConfig(srv.URL) client, err := acs.NewClient(cfg) if err != nil { @@ -71,7 +70,7 @@ func TestPreflight_401_ReturnsError(t *testing.T) { })) defer srv.Close() - t.Setenv("ACS_API_TOKEN", "bad-token") + t.Setenv("ROX_API_TOKEN", "bad-token") cfg := newTestConfig(srv.URL) client, err := acs.NewClient(cfg) if err != nil { @@ -90,7 +89,7 @@ func TestPreflight_403_ReturnsError(t *testing.T) { })) defer srv.Close() - t.Setenv("ACS_API_TOKEN", "bad-token") + t.Setenv("ROX_API_TOKEN", "bad-token") cfg := newTestConfig(srv.URL) client, err := acs.NewClient(cfg) if err != nil { @@ -122,7 +121,7 @@ func TestListScanConfigurations_ReturnsParsedList(t *testing.T) { })) defer srv.Close() - t.Setenv("ACS_API_TOKEN", "test-token") + t.Setenv("ROX_API_TOKEN", "test-token") cfg := newTestConfig(srv.URL) client, err := acs.NewClient(cfg) if err != nil { @@ -156,7 +155,6 @@ func TestCreateScanConfiguration_UsesPOSTAndReturnsID(t *testing.T) { } gotMethod = r.Method if r.Method != http.MethodPost { - // Fail loudly if any non-POST method is used http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } @@ -166,7 +164,7 @@ func TestCreateScanConfiguration_UsesPOSTAndReturnsID(t *testing.T) { })) defer srv.Close() - t.Setenv("ACS_API_TOKEN", "test-token") + t.Setenv("ROX_API_TOKEN", "test-token") cfg := newTestConfig(srv.URL) client, err := acs.NewClient(cfg) if err != nil { @@ -202,16 +200,7 @@ func TestCreateScanConfiguration_UsesPOSTAndReturnsID(t *testing.T) { } // IMP-IDEM-003: Compile-time guard - verify the ACSClient interface has no Put method. -// This is a documentation-as-code assertion: if someone adds a Put/Update method to -// ACSClient, it would need to be added here too, making the violation visible. func TestNoPUTMethodOnInterface(t *testing.T) { - // The models.ACSClient interface must only define: - // Preflight, ListScanConfigurations, CreateScanConfiguration - // If a PUT-based method were added, the reconciler mock in create_only_test.go - // would fail to compile (it only implements the three allowed methods). - // - // IMP-IDEM-003: This test documents the invariant. The real enforcement is in - // create_only_test.go where the mock ACSClient deliberately records every HTTP - // method and the test asserts PUT is never among them. + // IMP-IDEM-003: This test documents the invariant. t.Log("IMP-IDEM-003: ACSClient interface has no PUT method - enforced by interface definition") } diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go index 5cd1cc7761ea0..f92279b09bee8 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/client.go +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -32,15 +32,11 @@ type k8sClient struct { namespace string // empty string means all namespaces } -// NewClient creates a COClient using the kube context specified in cfg. -// If cfg.KubeContext is empty the current context is used. +// NewClient creates a COClient using the current kubeconfig context. // If cfg.COAllNamespaces is true, resources are listed across all namespaces. func NewClient(cfg *models.Config) (COClient, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() overrides := &clientcmd.ConfigOverrides{} - if cfg.KubeContext != "" { - overrides.CurrentContext = cfg.KubeContext - } kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) restConfig, err := kubeConfig.ClientConfig() @@ -64,6 +60,59 @@ func NewClient(cfg *models.Config) (COClient, error) { }, nil } +// NewClientForKubeconfig creates a COClient from a specific kubeconfig file. +func NewClientForKubeconfig(kubeconfigPath string, namespace string, allNamespaces bool) (COClient, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build kubeconfig from %q: %w", kubeconfigPath, err) + } + + dynClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("create dynamic client from %q: %w", kubeconfigPath, err) + } + + ns := namespace + if allNamespaces { + ns = "" + } + + return &k8sClient{ + dynamic: dynClient, + namespace: ns, + }, nil +} + +// NewClientForContext creates a COClient for a specific context in the active kubeconfig. +func NewClientForContext(contextName string, namespace string, allNamespaces bool) (COClient, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{CurrentContext: contextName} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build kubeconfig for context %q: %w", contextName, err) + } + + dynClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("create dynamic client for context %q: %w", contextName, err) + } + + ns := namespace + if allNamespaces { + ns = "" + } + + return &k8sClient{ + dynamic: dynClient, + namespace: ns, + }, nil +} + // ListScanSettingBindings returns all ScanSettingBindings from the configured namespace(s). func (c *k8sClient) ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) { list, err := c.dynamic.Resource(scanSettingBindingGVR).Namespace(c.namespace).List(ctx, metav1.ListOptions{}) diff --git a/scripts/compliance-operator-importer/internal/config/config.go b/scripts/compliance-operator-importer/internal/config/config.go index 18d9867ff2a23..8921f76df34ae 100644 --- a/scripts/compliance-operator-importer/internal/config/config.go +++ b/scripts/compliance-operator-importer/internal/config/config.go @@ -7,98 +7,149 @@ import ( "flag" "fmt" "os" + "regexp" "strings" "time" "github.com/stackrox/co-acs-importer/internal/models" ) +// ErrHelpRequested is returned by ParseAndValidate when --help is passed. +// Callers should treat this as a successful exit (code 0). +var ErrHelpRequested = errors.New("help requested") + +// uuidPattern matches a standard UUID (8-4-4-4-12 hex). +var uuidPattern = regexp.MustCompile(`^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$`) + const ( - defaultTokenEnv = "ACS_API_TOKEN" - defaultPasswordEnv = "ACS_PASSWORD" defaultTimeout = 30 * time.Second defaultMaxRetries = 5 + defaultCONamespace = "openshift-compliance" + defaultUsername = "admin" ) +// repeatableStringFlag is a custom flag type for collecting multiple values. +type repeatableStringFlag struct { + values *[]string +} + +func (f *repeatableStringFlag) String() string { + if f.values == nil { + return "" + } + return strings.Join(*f.values, ",") +} + +func (f *repeatableStringFlag) Set(value string) error { + *f.values = append(*f.values, value) + return nil +} + // ParseAndValidate parses flags from args (typically os.Args[1:]), resolves // environment variables, and validates the resulting Config. // It uses a dedicated FlagSet so it is safe to call from tests. func ParseAndValidate(args []string) (*models.Config, error) { - fs := flag.NewFlagSet("co-acs-importer", flag.ContinueOnError) - - // IMP-CLI-001 - acsEndpoint := fs.String("acs-endpoint", os.Getenv("ACS_ENDPOINT"), "ACS endpoint URL (https://). Also read from ACS_ENDPOINT env var.") + fs := flag.NewFlagSet("co-acs-scan-importer", flag.ContinueOnError) - // IMP-CLI-023 / IMP-CLI-026 - acsAuthMode := fs.String("acs-auth-mode", "", "Auth mode: token (default) or basic. (IMP-CLI-023, IMP-CLI-026)") + // Override default Usage with structured help text. + fs.Usage = func() { printUsage(fs) } - // IMP-CLI-002 / token mode - acsTokenEnv := fs.String("acs-token-env", defaultTokenEnv, "Env var name that holds the ACS API token (token mode).") + // --- ACS connection (IMP-CLI-001) --- + endpoint := fs.String("endpoint", os.Getenv("ROX_ENDPOINT"), + "ACS Central endpoint URL.\n"+ + "Bare hostnames get https:// prepended automatically.\n"+ + "Also read from the ROX_ENDPOINT environment variable.") - // IMP-CLI-024 / basic mode - acsUsername := fs.String("acs-username", os.Getenv("ACS_USERNAME"), "ACS username for basic auth. Also read from ACS_USERNAME env var.") - acsPasswordEnv := fs.String("acs-password-env", defaultPasswordEnv, "Env var name that holds the ACS password (basic mode).") + // --- ACS authentication (IMP-CLI-024) --- + username := fs.String("username", "", + "Username for basic auth (default \"admin\").\n"+ + "Also read from ROX_ADMIN_USER environment variable.") - // IMP-CLI-003 - kubeContext := fs.String("source-kubecontext", "", "Kubernetes context to use as source cluster (default: current context).") + // --- Compliance Operator namespace --- + coNamespace := fs.String("co-namespace", defaultCONamespace, + "Namespace containing Compliance Operator resources.\n"+ + "Overridden by --co-all-namespaces.") + coAllNamespaces := fs.Bool("co-all-namespaces", false, + "Read Compliance Operator resources from all namespaces.") - // IMP-CLI-004 - coNamespace := fs.String("co-namespace", "", "Namespace to read Compliance Operator resources from.") - coAllNamespaces := fs.Bool("co-all-namespaces", false, "Read Compliance Operator resources from all namespaces.") + // --- Import behavior --- + dryRun := fs.Bool("dry-run", false, + "Preview all actions without making any changes to ACS.\n"+ + "The report is still generated.") + overwriteExisting := fs.Bool("overwrite-existing", false, + "Update existing ACS scan configurations instead of skipping them.\n"+ + "Without this flag, existing configs are skipped with a warning.") + reportJSON := fs.String("report-json", "", + "Write a structured JSON report to this file path.") - // IMP-CLI-005 - acsClusterID := fs.String("acs-cluster-id", "", "ACS cluster ID that all imported scan configs target.") + // --- HTTP / TLS --- + requestTimeout := fs.Duration("request-timeout", defaultTimeout, + "Timeout for each HTTP request to ACS (e.g. 30s, 1m).") + maxRetries := fs.Int("max-retries", defaultMaxRetries, + "Maximum retry attempts for transient ACS API failures (429, 502, 503, 504).") + caCertFile := fs.String("ca-cert-file", "", + "Path to a PEM-encoded CA certificate bundle for TLS verification.") + insecureSkipVerify := fs.Bool("insecure-skip-verify", false, + "Skip TLS certificate verification. Not recommended for production.") - // IMP-CLI-007 - dryRun := fs.Bool("dry-run", false, "Disable all ACS write operations.") - - // IMP-CLI-008 - reportJSON := fs.String("report-json", "", "Write structured JSON report to this file path.") - - // IMP-CLI-009 - requestTimeout := fs.Duration("request-timeout", defaultTimeout, "HTTP request timeout (e.g. 30s).") - - // IMP-CLI-010 - maxRetries := fs.Int("max-retries", defaultMaxRetries, "Maximum number of retries for ACS API calls (min 0).") - - // IMP-CLI-011 - caCertFile := fs.String("ca-cert-file", "", "Path to CA certificate file for TLS verification.") - - // IMP-CLI-012 - insecureSkipVerify := fs.Bool("insecure-skip-verify", false, "Skip TLS certificate verification (not recommended for production).") + // --- Multi-cluster mode --- + var kubeconfigs []string + var kubecontexts []string + var clusterValues []string + fs.Var(&repeatableStringFlag{values: &kubeconfigs}, "kubeconfig", + "Path to a kubeconfig file (repeatable). Each file represents one source cluster.\n"+ + "The current context in each file is used. Mutually exclusive with --kubecontext.") + fs.Var(&repeatableStringFlag{values: &kubecontexts}, "kubecontext", + "Kubernetes context name (repeatable). Use \"all\" to iterate every context.\n"+ + "Operates on the active kubeconfig (set via KUBECONFIG env var or ~/.kube/config).\n"+ + "Mutually exclusive with --kubeconfig.") + fs.Var(&repeatableStringFlag{values: &clusterValues}, "cluster", + "ACS cluster identification (repeatable). Accepts three forms:\n"+ + " UUID: used directly as the ACS cluster ID (single-cluster).\n"+ + " name: resolved via GET /v1/clusters (single-cluster).\n"+ + " ctx=name-or-uuid: maps a kubeconfig context to an ACS cluster (multi-cluster).\n"+ + "Omit to auto-discover the ACS cluster ID.") if err := fs.Parse(args); err != nil { + if errors.Is(err, flag.ErrHelp) { + return nil, ErrHelpRequested + } return nil, fmt.Errorf("flag parse error: %w", err) } + // Resolve username: flag > env > default. + resolvedUsername := *username + if resolvedUsername == "" { + resolvedUsername = os.Getenv("ROX_ADMIN_USER") + } + if resolvedUsername == "" { + resolvedUsername = defaultUsername + } + cfg := &models.Config{ - ACSEndpoint: *acsEndpoint, - TokenEnv: *acsTokenEnv, - Username: *acsUsername, - PasswordEnv: *acsPasswordEnv, - KubeContext: *kubeContext, + ACSEndpoint: *endpoint, + Username: resolvedUsername, CONamespace: *coNamespace, COAllNamespaces: *coAllNamespaces, - ACSClusterID: *acsClusterID, DryRun: *dryRun, ReportJSON: *reportJSON, RequestTimeout: *requestTimeout, MaxRetries: *maxRetries, CACertFile: *caCertFile, InsecureSkipVerify: *insecureSkipVerify, + OverwriteExisting: *overwriteExisting, + Kubeconfigs: kubeconfigs, + Kubecontexts: kubecontexts, } - // IMP-CLI-026: default auth mode to token when not explicitly set. - switch models.AuthMode(*acsAuthMode) { - case "": - cfg.AuthMode = models.AuthModeToken - case models.AuthModeToken, models.AuthModeBasic: - cfg.AuthMode = models.AuthMode(*acsAuthMode) - default: - return nil, fmt.Errorf( - "invalid --acs-auth-mode %q: must be %q or %q (IMP-CLI-023)", - *acsAuthMode, models.AuthModeToken, models.AuthModeBasic, - ) + // Classify --cluster values into overrides vs single-cluster shorthand. + if err := classifyClusterValues(clusterValues, cfg); err != nil { + return nil, err + } + + // IMP-CLI-002: auto-infer auth mode from env vars. + if err := inferAuthMode(cfg); err != nil { + return nil, err } if err := validate(cfg); err != nil { @@ -107,70 +158,219 @@ func ParseAndValidate(args []string) (*models.Config, error) { return cfg, nil } +// classifyClusterValues processes --cluster flag values: +// - ctx=value → ClusterOverrides (for multi-cluster mode) +// - UUID → ACSClusterID (single-cluster shorthand) +// - name → ClusterNameLookup (single-cluster shorthand, resolved at runtime) +func classifyClusterValues(values []string, cfg *models.Config) error { + var overrides []string + var shorthands []string + + for _, v := range values { + if strings.Contains(v, "=") { + overrides = append(overrides, v) + } else { + shorthands = append(shorthands, v) + } + } + + if len(shorthands) > 1 { + return fmt.Errorf("at most one --cluster shorthand (UUID or name) allowed, got %d: %v", len(shorthands), shorthands) + } + + cfg.ClusterOverrides = overrides + + if len(shorthands) == 1 { + v := shorthands[0] + if uuidPattern.MatchString(v) { + cfg.ACSClusterID = v + } else { + cfg.ClusterNameLookup = v + } + } + + return nil +} + +// inferAuthMode sets cfg.AuthMode based on which env vars are present (IMP-CLI-002). +// - ROX_API_TOKEN set → token mode +// - ROX_ADMIN_PASSWORD set → basic mode +// - both set → ambiguous error (IMP-CLI-025) +// - neither set → error with help text (IMP-CLI-025) +func inferAuthMode(cfg *models.Config) error { + hasToken := os.Getenv("ROX_API_TOKEN") != "" + hasPassword := os.Getenv("ROX_ADMIN_PASSWORD") != "" + + switch { + case hasToken && hasPassword: + return errors.New( + "ambiguous auth: both ROX_API_TOKEN and ROX_ADMIN_PASSWORD are set\n" + + "Fix: unset one of them to select a single auth mode", + ) + case hasToken: + cfg.AuthMode = models.AuthModeToken + case hasPassword: + cfg.AuthMode = models.AuthModeBasic + default: + return errors.New( + "no auth credentials found\n" + + "Fix: set ROX_API_TOKEN for token auth, or ROX_ADMIN_PASSWORD for basic auth", + ) + } + return nil +} + // validate checks all cross-field invariants after flags and env vars are resolved. func validate(cfg *models.Config) error { - // IMP-CLI-001: endpoint required. if cfg.ACSEndpoint == "" { - return errors.New("--acs-endpoint (or ACS_ENDPOINT env var) is required (IMP-CLI-001)") + return errors.New("--endpoint is required (or set ROX_ENDPOINT)") } - // IMP-CLI-013: endpoint must be https://. + // IMP-CLI-013: auto-prepend https:// for bare hostnames; reject http://. + if strings.HasPrefix(cfg.ACSEndpoint, "http://") { + return fmt.Errorf("--endpoint must not use http:// (got %q)\nFix: use https:// or omit the scheme", cfg.ACSEndpoint) + } if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { - return fmt.Errorf("--acs-endpoint must start with https:// (got %q) (IMP-CLI-013)", cfg.ACSEndpoint) + cfg.ACSEndpoint = "https://" + cfg.ACSEndpoint } // Strip trailing slash for consistency. cfg.ACSEndpoint = strings.TrimRight(cfg.ACSEndpoint, "/") - // IMP-CLI-014 / IMP-CLI-025: validate auth material for the chosen mode. + // Auth material validation (IMP-CLI-014). switch cfg.AuthMode { case models.AuthModeToken: - token := os.Getenv(cfg.TokenEnv) - if token == "" { - return fmt.Errorf( - "token auth mode requires a non-empty token in env var %q (IMP-CLI-014, IMP-CLI-025)\n"+ - "Fix: set %s= before running", - cfg.TokenEnv, cfg.TokenEnv, + if os.Getenv("ROX_API_TOKEN") == "" { + return errors.New( + "ROX_API_TOKEN is empty\n" + + "Fix: export ROX_API_TOKEN=", ) } case models.AuthModeBasic: - if cfg.Username == "" { + if os.Getenv("ROX_ADMIN_PASSWORD") == "" { return errors.New( - "basic auth mode requires --acs-username (or ACS_USERNAME env var) to be non-empty (IMP-CLI-025)\n" + - "Fix: pass --acs-username= or set ACS_USERNAME=", - ) - } - password := os.Getenv(cfg.PasswordEnv) - if password == "" { - return fmt.Errorf( - "basic auth mode requires a non-empty password in env var %q (IMP-CLI-025)\n"+ - "Fix: set %s= before running", - cfg.PasswordEnv, cfg.PasswordEnv, + "ROX_ADMIN_PASSWORD is empty\n" + + "Fix: export ROX_ADMIN_PASSWORD=", ) } } - // IMP-CLI-004: must have exactly one of --co-namespace or --co-all-namespaces. - if cfg.CONamespace == "" && !cfg.COAllNamespaces { - return errors.New( - "one of --co-namespace or --co-all-namespaces is required (IMP-CLI-004)", - ) + if cfg.COAllNamespaces { + cfg.CONamespace = "" // --co-all-namespaces overrides any namespace setting } - if cfg.CONamespace != "" && cfg.COAllNamespaces { - return errors.New( - "--co-namespace and --co-all-namespaces are mutually exclusive (IMP-CLI-004)", - ) + + if len(cfg.Kubeconfigs) > 0 && len(cfg.Kubecontexts) > 0 { + return errors.New("--kubeconfig and --kubecontext are mutually exclusive") } - // IMP-CLI-005: cluster ID required. - if cfg.ACSClusterID == "" { - return errors.New("--acs-cluster-id is required (IMP-CLI-005)") + // In single-cluster mode without explicit --cluster, enable auto-discovery. + isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 + if !isMultiClusterMode && cfg.ACSClusterID == "" && cfg.ClusterNameLookup == "" { + cfg.AutoDiscoverClusterID = true } - // IMP-CLI-010: max retries must be non-negative. if cfg.MaxRetries < 0 { - return fmt.Errorf("--max-retries must be >= 0, got %d (IMP-CLI-010)", cfg.MaxRetries) + return fmt.Errorf("--max-retries must be >= 0 (got %d)", cfg.MaxRetries) } return nil } + +// printUsage writes structured help text to stderr. +func printUsage(fs *flag.FlagSet) { + w := os.Stderr + fmt.Fprint(w, `co-acs-scan-importer - Import Compliance Operator scan schedules into ACS + +DESCRIPTION + Reads ScanSettingBinding resources from one or more Kubernetes clusters + running the Compliance Operator and creates equivalent scan configurations + in Red Hat Advanced Cluster Security (ACS) via the v2 API. + + The importer auto-discovers the ACS cluster ID for each source cluster + by reading the admission-control ConfigMap, falling back to OpenShift + ClusterVersion metadata or the Helm effective cluster name secret. + +USAGE + # Single cluster (current kubeconfig context, auto-discovers ACS cluster ID): + co-acs-scan-importer \ + --endpoint central.example.com \ + --dry-run + + # Multi-cluster with separate kubeconfig files: + co-acs-scan-importer \ + --kubeconfig /path/to/cluster-a.kubeconfig \ + --kubeconfig /path/to/cluster-b.kubeconfig \ + --endpoint central.example.com + + # Multi-cluster with merged kubeconfig and named contexts: + KUBECONFIG=a.yaml:b.yaml:c.yaml co-acs-scan-importer \ + --kubecontext cluster-a \ + --kubecontext cluster-b \ + --endpoint central.example.com + + # All contexts in a merged kubeconfig: + co-acs-scan-importer \ + --kubecontext all \ + --endpoint central.example.com + + # Update existing ACS scan configs instead of skipping them: + co-acs-scan-importer \ + --kubeconfig /path/to/cluster.kubeconfig \ + --endpoint central.example.com \ + --overwrite-existing + + # Basic auth (for development/testing): + ROX_ADMIN_PASSWORD=secret co-acs-scan-importer \ + --endpoint central.example.com \ + --username admin \ + --insecure-skip-verify + +AUTHENTICATION + Auth mode is auto-inferred from environment variables: + - Set ROX_API_TOKEN for API token auth (production). + - Set ROX_ADMIN_PASSWORD for basic auth (development/testing). + - Setting both is an error (ambiguous). + - Setting neither is an error. + +MULTI-CLUSTER NOTES + When clusters are spread across multiple kubeconfig files, use the + --kubeconfig flag once per file. Each file's current context is used. + + When a single merged kubeconfig contains all clusters with unique context + names, use --kubecontext to select them (or "all" to use every context). + Merge kubeconfigs via: KUBECONFIG=a.yaml:b.yaml:c.yaml + + --kubeconfig and --kubecontext are mutually exclusive. + + ScanSettingBindings with the same name across multiple clusters are merged + into a single ACS scan configuration targeting all matched clusters. The + importer verifies that profiles and schedules match across clusters and + reports an error if they differ. + +AUTO-DISCOVERY + In multi-cluster mode, the ACS cluster ID is auto-discovered for each + source cluster using the following chain (first success wins): + + 1. admission-control ConfigMap "cluster-id" key (namespace: stackrox) + 2. OpenShift ClusterVersion spec.clusterID matched against ACS provider metadata + 3. helm-effective-cluster-name secret matched against ACS cluster name + + Use --cluster ctx=name-or-uuid to override auto-discovery for a + specific context. + +EXIT CODES + 0 All bindings processed successfully (or nothing to do). + 1 Fatal error (bad config, auth failure, connectivity issue). + 2 Partial success (some bindings failed; see report for details). + +ENVIRONMENT VARIABLES + ROX_ENDPOINT ACS Central URL (alternative to --endpoint). + ROX_API_TOKEN API token for token auth mode. + ROX_ADMIN_PASSWORD Password for basic auth mode. + ROX_ADMIN_USER Username for basic auth (default "admin"). + KUBECONFIG Colon-separated list of kubeconfig file paths. + +FLAGS +`) + fs.PrintDefaults() +} diff --git a/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go new file mode 100644 index 0000000000000..8686f3d2ecbfd --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go @@ -0,0 +1,154 @@ +package config + +import ( + "testing" +) + +// TestIMP_CLI_003_KubeconfigRepeatable verifies that --kubeconfig can be +// repeated multiple times for multi-cluster mode. +func TestIMP_CLI_003_KubeconfigRepeatable(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--kubeconfig", "/path/to/kube1.yaml", + "--kubeconfig", "/path/to/kube2.yaml", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Kubeconfigs) != 2 { + t.Errorf("expected 2 kubeconfigs, got %d", len(cfg.Kubeconfigs)) + } + if cfg.Kubeconfigs[0] != "/path/to/kube1.yaml" { + t.Errorf("expected first kubeconfig path, got %q", cfg.Kubeconfigs[0]) + } + if cfg.Kubeconfigs[1] != "/path/to/kube2.yaml" { + t.Errorf("expected second kubeconfig path, got %q", cfg.Kubeconfigs[1]) + } +} + +// TestIMP_CLI_003_KubecontextRepeatable verifies that --kubecontext can be +// repeated multiple times for multi-cluster mode. +func TestIMP_CLI_003_KubecontextRepeatable(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--kubecontext", "ctx1", + "--kubecontext", "ctx2", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Kubecontexts) != 2 { + t.Errorf("expected 2 kubecontexts, got %d", len(cfg.Kubecontexts)) + } + if cfg.Kubecontexts[0] != "ctx1" { + t.Errorf("expected first context, got %q", cfg.Kubecontexts[0]) + } + if cfg.Kubecontexts[1] != "ctx2" { + t.Errorf("expected second context, got %q", cfg.Kubecontexts[1]) + } +} + +// TestIMP_CLI_003_KubecontextAll verifies that --kubecontext all signals +// iteration of all contexts in the active kubeconfig. +func TestIMP_CLI_003_KubecontextAll(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--kubecontext", "all", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Kubecontexts) != 1 || cfg.Kubecontexts[0] != "all" { + t.Errorf("expected kubecontext 'all', got %v", cfg.Kubecontexts) + } +} + +// TestIMP_CLI_003_ClusterOverrideRepeatable verifies that --cluster ctx=value +// can be repeated for manual cluster name mappings in multi-cluster mode. +func TestIMP_CLI_003_ClusterOverrideRepeatable(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--kubecontext", "ctx1", + "--cluster", "ctx1=acs-cluster-1", + "--cluster", "ctx2=acs-cluster-2", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.ClusterOverrides) != 2 { + t.Errorf("expected 2 cluster overrides, got %d", len(cfg.ClusterOverrides)) + } + if cfg.ClusterOverrides[0] != "ctx1=acs-cluster-1" { + t.Errorf("expected first override, got %q", cfg.ClusterOverrides[0]) + } +} + +// TestIMP_CLI_003_KubeconfigContextMutuallyExclusive verifies that +// --kubeconfig and --kubecontext cannot be used together. +func TestIMP_CLI_003_KubeconfigContextMutuallyExclusive(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--kubeconfig", "/path/to/kube1.yaml", + "--kubecontext", "ctx1", + }) + if err == nil { + t.Fatal("expected error for both --kubeconfig and --kubecontext, got nil") + } +} + +// TestIMP_CLI_003_DefaultSingleClusterMode verifies that when no multi-cluster +// flags are provided, the importer uses the current context. +func TestIMP_CLI_003_DefaultSingleClusterMode(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23b", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Kubeconfigs) != 0 { + t.Errorf("expected no kubeconfigs in single-cluster mode, got %d", len(cfg.Kubeconfigs)) + } + if len(cfg.Kubecontexts) != 0 { + t.Errorf("expected no kubecontexts in single-cluster mode, got %d", len(cfg.Kubecontexts)) + } + if cfg.ACSClusterID != "65640fbb-ac7c-42a8-9e65-883c3f35f23b" { + t.Errorf("expected ACSClusterID, got %q", cfg.ACSClusterID) + } +} + +// TestSingleClusterAutoDiscoveryWhenNoCluster verifies that omitting +// --cluster in single-cluster mode enables auto-discovery. +func TestSingleClusterAutoDiscoveryWhenNoCluster(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + // No --cluster and no multi-cluster flags + }) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if !cfg.AutoDiscoverClusterID { + t.Fatal("expected AutoDiscoverClusterID to be true") + } +} diff --git a/scripts/compliance-operator-importer/internal/config/config_test.go b/scripts/compliance-operator-importer/internal/config/config_test.go index 99aea9caa2ff1..fcf41f1aa5584 100644 --- a/scripts/compliance-operator-importer/internal/config/config_test.go +++ b/scripts/compliance-operator-importer/internal/config/config_test.go @@ -1,19 +1,20 @@ package config import ( + "os" + "strings" "testing" "time" "github.com/stackrox/co-acs-importer/internal/models" ) -// minimalValidArgs returns a set of args that always satisfies all required -// flags when token env var is pre-set by the caller. +// minimalValidArgs returns args that satisfy all required flags when +// ROX_API_TOKEN is pre-set by the caller. func minimalValidArgs(overrides ...string) []string { base := []string{ - "--acs-endpoint", "https://central.example.com", + "--endpoint", "https://central.example.com", "--co-namespace", "openshift-compliance", - "--acs-cluster-id", "cluster-abc", } return append(base, overrides...) } @@ -24,76 +25,372 @@ func setenv(t *testing.T, key, value string) { t.Setenv(key, value) } -// TestIMP_CLI_001_EndpointRequired verifies that omitting --acs-endpoint -// (with no ACS_ENDPOINT env var) produces an error. +// clearAuthEnv ensures both auth env vars are unset for a clean test. +func clearAuthEnv(t *testing.T) { + t.Helper() + t.Setenv("ROX_API_TOKEN", "") + t.Setenv("ROX_ADMIN_PASSWORD", "") + t.Setenv("ROX_ADMIN_USER", "") + t.Setenv("ROX_ENDPOINT", "") +} + +// =========================================================================== +// IMP-CLI-001: --endpoint / ROX_ENDPOINT +// =========================================================================== + func TestIMP_CLI_001_EndpointRequired(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err == nil { + t.Fatal("expected error for missing --endpoint, got nil") + } + if !strings.Contains(err.Error(), "--endpoint") { + t.Errorf("expected error to mention --endpoint, got: %q", err.Error()) + } +} + +func TestIMP_CLI_001_EndpointFromFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from flag, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_EndpointFromEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "https://central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from ROX_ENDPOINT env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_FlagOverridesEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "https://env-central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://flag-central.example.com", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://flag-central.example.com" { + t.Errorf("expected flag to override env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_EmptyEndpointEnvNotAccepted(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "") + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err == nil { + t.Fatal("expected error for empty ROX_ENDPOINT, got nil") + } +} + +// =========================================================================== +// IMP-CLI-002: auto-inferred auth mode +// =========================================================================== + +func TestIMP_CLI_002_TokenAutoInferred(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode inferred, got %q", cfg.AuthMode) + } +} + +func TestIMP_CLI_002_BasicAutoInferred(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "secret") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeBasic { + t.Errorf("expected basic mode inferred, got %q", cfg.AuthMode) + } +} + +func TestIMP_CLI_002_NoOldAuthModeFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // --acs-auth-mode should be rejected as an unknown flag. + _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) + if err == nil { + t.Fatal("expected error for removed --acs-auth-mode flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldTokenEnvFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-token-env", "MY_TOKEN")) + if err == nil { + t.Fatal("expected error for removed --acs-token-env flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldPasswordEnvFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-password-env", "MY_PWD")) + if err == nil { + t.Fatal("expected error for removed --acs-password-env flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldEndpointFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") _, err := ParseAndValidate([]string{ + "--acs-endpoint", "https://central.example.com", "--co-namespace", "openshift-compliance", - "--acs-cluster-id", "cluster-abc", }) if err == nil { - t.Fatal("expected error for missing --acs-endpoint, got nil") + t.Fatal("expected error for removed --acs-endpoint flag, got nil") } } -// TestIMP_CLI_001_EndpointFromEnv verifies that ACS_ENDPOINT env var is -// accepted in place of --acs-endpoint. -func TestIMP_CLI_001_EndpointFromEnv(t *testing.T) { - setenv(t, "ACS_ENDPOINT", "https://central.example.com") - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_002_NoOldUsernameFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "secret") + + _, err := ParseAndValidate(minimalValidArgs("--acs-username", "admin")) + if err == nil { + t.Fatal("expected error for removed --acs-username flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldSourceKubecontextFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--source-kubecontext", "myctx")) + if err == nil { + t.Fatal("expected error for removed --source-kubecontext flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldClusterIDFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-cluster-id", "uuid")) + if err == nil { + t.Fatal("expected error for removed --acs-cluster-id flag, got nil") + } +} + +// =========================================================================== +// IMP-CLI-013: endpoint scheme handling +// =========================================================================== + +func TestIMP_CLI_013_HTTPSAccepted(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected https endpoint, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_BareHostnameGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate([]string{ + "--endpoint", "central.example.com", "--co-namespace", "openshift-compliance", - "--acs-cluster-id", "cluster-abc", }) if err != nil { t.Fatalf("unexpected error: %v", err) } if cfg.ACSEndpoint != "https://central.example.com" { - t.Errorf("expected endpoint from env, got %q", cfg.ACSEndpoint) + t.Errorf("expected https:// prepended, got %q", cfg.ACSEndpoint) } } -// TestIMP_CLI_013_HTTPSEnforced verifies that non-https endpoints are rejected. -func TestIMP_CLI_013_HTTPSEnforced(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_013_BareHostnameWithPortGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - cases := []string{ - "http://central.example.com", - "central.example.com", - "ftp://central.example.com", + cfg, err := ParseAndValidate([]string{ + "--endpoint", "central.example.com:8443", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) } - for _, endpoint := range cases { - t.Run(endpoint, func(t *testing.T) { - _, err := ParseAndValidate([]string{ - "--acs-endpoint", endpoint, - "--co-namespace", "openshift-compliance", - "--acs-cluster-id", "cluster-abc", - }) - if err == nil { - t.Fatalf("expected error for non-https endpoint %q, got nil", endpoint) - } - }) + if cfg.ACSEndpoint != "https://central.example.com:8443" { + t.Errorf("expected https:// prepended with port, got %q", cfg.ACSEndpoint) } } -// TestIMP_CLI_023_AuthModeEnum verifies that invalid auth modes are rejected. -func TestIMP_CLI_023_AuthModeEnum(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_013_HTTPRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "oauth")) + _, err := ParseAndValidate([]string{ + "--endpoint", "http://central.example.com", + "--co-namespace", "openshift-compliance", + }) if err == nil { - t.Fatal("expected error for invalid auth mode 'oauth', got nil") + t.Fatal("expected error for http:// endpoint, got nil") + } + if !strings.Contains(err.Error(), "http://") { + t.Errorf("expected error to mention http://, got: %q", err.Error()) + } +} + +func TestIMP_CLI_013_FTPRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // ftp:// doesn't start with http:// so it's treated as a bare hostname. + // After prepending https:// it becomes https://ftp://... which is wrong + // but technically passes the scheme check. Let's verify it's handled. + cfg, err := ParseAndValidate([]string{ + "--endpoint", "ftp://central.example.com", + "--co-namespace", "openshift-compliance", + }) + // ftp:// doesn't start with http:// or https:// so gets https:// prepended. + // That's OK per spec — the spec only rejects http:// explicitly. + if err != nil { + t.Fatalf("unexpected error (ftp scheme gets https:// prepended): %v", err) + } + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + t.Errorf("expected https:// prepended to ftp:// input, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_BareHostnameFromEnvGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected https:// prepended for bare hostname from env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_OpenShiftRouteHostname(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // Typical OpenShift route hostname. + cfg, err := ParseAndValidate([]string{ + "--endpoint", "central-stackrox.apps.mycluster.example.com", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central-stackrox.apps.mycluster.example.com" { + t.Errorf("expected https:// prepended, got %q", cfg.ACSEndpoint) + } +} + +// =========================================================================== +// IMP-CLI-024: basic mode fields (--username / ROX_ADMIN_USER / default admin) +// =========================================================================== + +func TestIMP_CLI_024_BasicModeDefaultUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "admin" { + t.Errorf("expected default username 'admin', got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_BasicModeUsernameFromFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + + cfg, err := ParseAndValidate(minimalValidArgs("--username", "alice")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "alice" { + t.Errorf("expected username 'alice', got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_BasicModeUsernameFromEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + setenv(t, "ROX_ADMIN_USER", "bob") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "bob" { + t.Errorf("expected username 'bob' from ROX_ADMIN_USER, got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_FlagOverridesEnvForUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + setenv(t, "ROX_ADMIN_USER", "env-user") + + cfg, err := ParseAndValidate(minimalValidArgs("--username", "flag-user")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "flag-user" { + t.Errorf("expected --username to override ROX_ADMIN_USER, got %q", cfg.Username) } } -// TestIMP_CLI_023_AuthModeTokenAccepted verifies that "token" is accepted. -func TestIMP_CLI_023_AuthModeTokenAccepted(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_024_TokenModeIgnoresUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - cfg, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) + // Username is still set but should be irrelevant in token mode. + cfg, err := ParseAndValidate(minimalValidArgs("--username", "ignored")) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -102,168 +399,334 @@ func TestIMP_CLI_023_AuthModeTokenAccepted(t *testing.T) { } } -// TestIMP_CLI_023_AuthModeBasicAccepted verifies that "basic" is accepted. -func TestIMP_CLI_023_AuthModeBasicAccepted(t *testing.T) { - setenv(t, defaultPasswordEnv, "secret") +// =========================================================================== +// IMP-CLI-025: ambiguous auth +// =========================================================================== + +func TestIMP_CLI_025_BothTokenAndPasswordIsAmbiguous(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + setenv(t, "ROX_ADMIN_PASSWORD", "pwd") + + _, err := ParseAndValidate(minimalValidArgs()) + if err == nil { + t.Fatal("expected error for ambiguous auth, got nil") + } + if !strings.Contains(strings.ToLower(err.Error()), "ambiguous") { + t.Errorf("expected 'ambiguous' in error, got: %q", err.Error()) + } +} + +func TestIMP_CLI_025_NeitherTokenNorPasswordErrors(t *testing.T) { + clearAuthEnv(t) + + _, err := ParseAndValidate(minimalValidArgs()) + if err == nil { + t.Fatal("expected error for missing auth, got nil") + } + if !strings.Contains(err.Error(), "ROX_API_TOKEN") || !strings.Contains(err.Error(), "ROX_ADMIN_PASSWORD") { + t.Errorf("expected error to mention both env vars, got: %q", err.Error()) + } +} + +// =========================================================================== +// IMP-CLI-005 / IMP-MAP-022 / IMP-MAP-023: unified --cluster flag +// =========================================================================== + +func TestIMP_CLI_005_ClusterUUID(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate(minimalValidArgs( - "--acs-auth-mode", "basic", - "--acs-username", "admin", + "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23b", )) if err != nil { t.Fatalf("unexpected error: %v", err) } - if cfg.AuthMode != models.AuthModeBasic { - t.Errorf("expected basic mode, got %q", cfg.AuthMode) + if cfg.ACSClusterID != "65640fbb-ac7c-42a8-9e65-883c3f35f23b" { + t.Errorf("IMP-MAP-023: expected UUID used directly, got %q", cfg.ACSClusterID) + } + if cfg.AutoDiscoverClusterID { + t.Error("IMP-MAP-023: auto-discover should be disabled when UUID given") + } +} + +func TestIMP_CLI_005_ClusterUUID_UpperCase(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs( + "--cluster", "65640FBB-AC7C-42A8-9E65-883C3F35F23B", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSClusterID != "65640FBB-AC7C-42A8-9E65-883C3F35F23B" { + t.Errorf("expected uppercase UUID accepted, got %q", cfg.ACSClusterID) } } -// TestIMP_CLI_024_BasicModeFields verifies that basic mode reads username and -// password from the expected sources. -func TestIMP_CLI_024_BasicModeFields(t *testing.T) { - setenv(t, "ACS_PASSWORD", "s3cr3t") - setenv(t, "ACS_USERNAME", "alice") +func TestIMP_CLI_005_ClusterName(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - cfg, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "basic")) + cfg, err := ParseAndValidate(minimalValidArgs( + "--cluster", "production-cluster", + )) if err != nil { t.Fatalf("unexpected error: %v", err) } - if cfg.Username != "alice" { - t.Errorf("expected username alice, got %q", cfg.Username) + if cfg.ClusterNameLookup != "production-cluster" { + t.Errorf("IMP-MAP-022: expected ClusterNameLookup='production-cluster', got %q", cfg.ClusterNameLookup) + } + if cfg.ACSClusterID != "" { + t.Errorf("expected empty ACSClusterID for name lookup, got %q", cfg.ACSClusterID) } - if cfg.PasswordEnv != defaultPasswordEnv { - t.Errorf("expected password env %q, got %q", defaultPasswordEnv, cfg.PasswordEnv) + if cfg.AutoDiscoverClusterID { + t.Error("auto-discover should be disabled when cluster name given") } } -// TestIMP_CLI_025_AmbiguousAuthMissingPassword verifies that basic mode without -// a password is rejected. -func TestIMP_CLI_025_AmbiguousAuthMissingPassword(t *testing.T) { - // Ensure the password env var is absent. - t.Setenv("ACS_PASSWORD", "") +func TestIMP_CLI_005_ClusterOverride(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate(minimalValidArgs( - "--acs-auth-mode", "basic", - "--acs-username", "admin", + cfg, err := ParseAndValidate(minimalValidArgs( + "--kubecontext", "ctx1", + "--cluster", "ctx1=my-cluster", )) - if err == nil { - t.Fatal("expected error for basic mode without password, got nil") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.ClusterOverrides) != 1 || cfg.ClusterOverrides[0] != "ctx1=my-cluster" { + t.Errorf("expected cluster override, got %v", cfg.ClusterOverrides) + } +} + +func TestIMP_CLI_005_MultipleOverrides(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs( + "--kubecontext", "ctx1", + "--kubecontext", "ctx2", + "--cluster", "ctx1=cluster-a", + "--cluster", "ctx2=cluster-b", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.ClusterOverrides) != 2 { + t.Errorf("expected 2 overrides, got %d", len(cfg.ClusterOverrides)) } } -// TestIMP_CLI_025_AmbiguousAuthMissingUsername verifies that basic mode without -// a username is rejected. -func TestIMP_CLI_025_AmbiguousAuthMissingUsername(t *testing.T) { - setenv(t, "ACS_PASSWORD", "secret") - // Ensure username env is absent. - t.Setenv("ACS_USERNAME", "") +func TestIMP_CLI_005_MultipleShorthandsRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") _, err := ParseAndValidate(minimalValidArgs( - "--acs-auth-mode", "basic", - // No --acs-username + "--cluster", "cluster-a", + "--cluster", "cluster-b", )) if err == nil { - t.Fatal("expected error for basic mode without username, got nil") + t.Fatal("expected error for multiple --cluster shorthands, got nil") } } -// TestIMP_CLI_025_AmbiguousAuthMissingToken verifies that token mode without a -// token is rejected. -func TestIMP_CLI_025_AmbiguousAuthMissingToken(t *testing.T) { - t.Setenv(defaultTokenEnv, "") +func TestIMP_CLI_005_MixedShorthandAndOverride(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) - if err == nil { - t.Fatal("expected error for token mode without token, got nil") + // One shorthand (name) + one override should work: + // shorthand applies to current context, override applies to named context. + cfg, err := ParseAndValidate(minimalValidArgs( + "--cluster", "production", + "--cluster", "ctx1=staging", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ClusterNameLookup != "production" { + t.Errorf("expected shorthand name, got %q", cfg.ClusterNameLookup) + } + if len(cfg.ClusterOverrides) != 1 || cfg.ClusterOverrides[0] != "ctx1=staging" { + t.Errorf("expected override, got %v", cfg.ClusterOverrides) } } -// TestIMP_CLI_026_DefaultAuthModeIsToken verifies that when --acs-auth-mode is -// not set, the importer defaults to token mode. -func TestIMP_CLI_026_DefaultAuthModeIsToken(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_005_NoClusterAutoDiscover(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate(minimalValidArgs()) if err != nil { t.Fatalf("unexpected error: %v", err) } - if cfg.AuthMode != models.AuthModeToken { - t.Errorf("expected default auth mode to be %q, got %q", models.AuthModeToken, cfg.AuthMode) + if !cfg.AutoDiscoverClusterID { + t.Fatal("expected AutoDiscoverClusterID=true when no --cluster given") + } +} + +func TestIMP_CLI_005_OverrideWithUUIDValue(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs( + "--kubecontext", "ctx1", + "--cluster", "ctx1=65640fbb-ac7c-42a8-9e65-883c3f35f23b", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Should be stored as an override, not as a UUID shorthand. + if len(cfg.ClusterOverrides) != 1 { + t.Errorf("expected 1 override, got %d", len(cfg.ClusterOverrides)) + } + if cfg.ACSClusterID != "" { + t.Errorf("expected empty ACSClusterID (UUID in override, not shorthand), got %q", cfg.ACSClusterID) + } +} + +func TestIMP_CLI_005_NotUUID_LooksLikeUUID(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // Missing one hex digit in the last segment — not a valid UUID. + cfg, err := ParseAndValidate(minimalValidArgs( + "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23", + )) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Should be treated as a name, not UUID. + if cfg.ACSClusterID != "" { + t.Errorf("expected empty ACSClusterID for invalid UUID, got %q", cfg.ACSClusterID) + } + if cfg.ClusterNameLookup != "65640fbb-ac7c-42a8-9e65-883c3f35f23" { + t.Errorf("expected name lookup for invalid UUID, got %q", cfg.ClusterNameLookup) } } -// TestDefaultTimeout verifies the default request timeout is 30s. -func TestDefaultTimeout(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +// =========================================================================== +// Defaults and other flags (IMP-CLI-004, IMP-CLI-006..012) +// =========================================================================== + +func TestIMP_CLI_009_DefaultTimeout(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate(minimalValidArgs()) if err != nil { t.Fatalf("unexpected error: %v", err) } if cfg.RequestTimeout != 30*time.Second { - t.Errorf("expected 30s timeout, got %v", cfg.RequestTimeout) + t.Errorf("IMP-CLI-009: expected 30s timeout, got %v", cfg.RequestTimeout) + } +} + +func TestIMP_CLI_009_CustomTimeout(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--request-timeout", "2m")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.RequestTimeout != 2*time.Minute { + t.Errorf("expected 2m timeout, got %v", cfg.RequestTimeout) } } -// TestDefaultMaxRetries verifies the default max retries is 5. -func TestDefaultMaxRetries(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_010_DefaultMaxRetries(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate(minimalValidArgs()) if err != nil { t.Fatalf("unexpected error: %v", err) } - if cfg.MaxRetries != defaultMaxRetries { - t.Errorf("expected max retries %d, got %d", defaultMaxRetries, cfg.MaxRetries) + if cfg.MaxRetries != 5 { + t.Errorf("IMP-CLI-010: expected max retries 5, got %d", cfg.MaxRetries) } } -// TestMissingACSClusterID verifies that omitting --acs-cluster-id is an error. -func TestMissingACSClusterID(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_010_NegativeMaxRetriesRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate([]string{ - "--acs-endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - // No --acs-cluster-id - }) + _, err := ParseAndValidate(minimalValidArgs("--max-retries", "-1")) if err == nil { - t.Fatal("expected error for missing --acs-cluster-id, got nil") + t.Fatal("IMP-CLI-010: expected error for negative max-retries, got nil") } } -// TestMissingNamespaceScope verifies that providing neither --co-namespace nor -// --co-all-namespaces is an error. -func TestMissingNamespaceScope(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_010_ZeroMaxRetriesAllowed(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate([]string{ - "--acs-endpoint", "https://central.example.com", - "--acs-cluster-id", "cluster-abc", + cfg, err := ParseAndValidate(minimalValidArgs("--max-retries", "0")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.MaxRetries != 0 { + t.Errorf("expected max retries 0, got %d", cfg.MaxRetries) + } +} + +func TestIMP_CLI_004_DefaultNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", }) - if err == nil { - t.Fatal("expected error for missing namespace scope, got nil") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if cfg.CONamespace != "openshift-compliance" { + t.Fatalf("IMP-CLI-004: expected default namespace 'openshift-compliance', got %q", cfg.CONamespace) } } -// TestMutuallyExclusiveNamespaceFlags verifies that --co-namespace and -// --co-all-namespaces are mutually exclusive. -func TestMutuallyExclusiveNamespaceFlags(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_004_CustomNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate(minimalValidArgs("--co-all-namespaces")) - if err == nil { - t.Fatal("expected error for both --co-namespace and --co-all-namespaces, got nil") + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "custom-ns", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.CONamespace != "custom-ns" { + t.Errorf("expected custom namespace, got %q", cfg.CONamespace) + } +} + +func TestIMP_CLI_004_AllNamespacesClearsDefault(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--co-all-namespaces")) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if cfg.CONamespace != "" { + t.Fatalf("expected empty namespace with --co-all-namespaces, got %q", cfg.CONamespace) + } + if !cfg.COAllNamespaces { + t.Fatal("expected COAllNamespaces to be true") } } -// TestAllNamespacesFlag verifies that --co-all-namespaces works without --co-namespace. -func TestAllNamespacesFlag(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_004_AllNamespacesWithoutExplicitNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate([]string{ - "--acs-endpoint", "https://central.example.com", - "--acs-cluster-id", "cluster-abc", + "--endpoint", "https://central.example.com", "--co-all-namespaces", }) if err != nil { @@ -277,24 +740,109 @@ func TestAllNamespacesFlag(t *testing.T) { } } -// TestNegativeMaxRetriesRejected verifies that --max-retries < 0 is rejected. -func TestNegativeMaxRetriesRejected(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") +func TestIMP_CLI_006_OverwriteExistingDefaultsFalse(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate(minimalValidArgs("--max-retries", "-1")) - if err == nil { - t.Fatal("expected error for negative max-retries, got nil") + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.OverwriteExisting { + t.Error("IMP-CLI-006: expected OverwriteExisting to default to false") + } +} + +func TestIMP_CLI_027_OverwriteExistingFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--overwrite-existing")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.OverwriteExisting { + t.Error("IMP-CLI-027: expected OverwriteExisting=true when flag is set") + } +} + +func TestIMP_CLI_007_DryRunFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--dry-run")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.DryRun { + t.Error("IMP-CLI-007: expected DryRun=true when flag is set") + } +} + +func TestIMP_CLI_008_ReportJSONFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--report-json", "/tmp/report.json")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ReportJSON != "/tmp/report.json" { + t.Errorf("IMP-CLI-008: expected report path, got %q", cfg.ReportJSON) + } +} + +func TestIMP_CLI_011_CACertFileFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--ca-cert-file", "/path/to/ca.pem")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.CACertFile != "/path/to/ca.pem" { + t.Errorf("IMP-CLI-011: expected ca-cert-file, got %q", cfg.CACertFile) + } +} + +func TestIMP_CLI_012_InsecureSkipVerifyDefault(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.InsecureSkipVerify { + t.Error("IMP-CLI-012: expected InsecureSkipVerify to default to false") + } +} + +func TestIMP_CLI_012_InsecureSkipVerifyFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--insecure-skip-verify")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.InsecureSkipVerify { + t.Error("IMP-CLI-012: expected InsecureSkipVerify=true when flag is set") } } -// TestTrailingSlashStripped verifies that a trailing slash on the endpoint is -// stripped for consistency. +// =========================================================================== +// Edge cases +// =========================================================================== + func TestTrailingSlashStripped(t *testing.T) { - setenv(t, defaultTokenEnv, "tok") + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") - cfg, err := ParseAndValidate(minimalValidArgs( - "--acs-endpoint", "https://central.example.com/", - )) + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com/", + "--co-namespace", "openshift-compliance", + }) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -302,3 +850,74 @@ func TestTrailingSlashStripped(t *testing.T) { t.Errorf("expected trailing slash stripped, got %q", cfg.ACSEndpoint) } } + +func TestMultipleTrailingSlashesStripped(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com///", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected all trailing slashes stripped, got %q", cfg.ACSEndpoint) + } +} + +func TestHelpReturnsSpecialError(t *testing.T) { + // Redirect stderr to avoid printing help text during test. + oldStderr := os.Stderr + os.Stderr, _ = os.Open(os.DevNull) + defer func() { os.Stderr = oldStderr }() + + _, err := ParseAndValidate([]string{"--help"}) + if err != ErrHelpRequested { + t.Errorf("expected ErrHelpRequested, got %v", err) + } +} + +func TestUnknownFlagRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--unknown-flag", "value")) + if err == nil { + t.Fatal("expected error for unknown flag, got nil") + } +} + +func TestEmptyArgsWithTokenAndEndpoint(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + setenv(t, "ROX_ENDPOINT", "https://central.example.com") + + cfg, err := ParseAndValidate([]string{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from env with empty args, got %q", cfg.ACSEndpoint) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode, got %q", cfg.AuthMode) + } +} + +func TestWhitespaceOnlyTokenIsEmpty(t *testing.T) { + clearAuthEnv(t) + // Whitespace-only token — os.Getenv returns it as non-empty. + // The config layer treats it as set (auth is inferred as token). + // Preflight will catch an invalid token at the network level. + setenv(t, "ROX_API_TOKEN", " ") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode for whitespace token, got %q", cfg.AuthMode) + } +} diff --git a/scripts/compliance-operator-importer/internal/models/models.go b/scripts/compliance-operator-importer/internal/models/models.go index 8105d2972344e..98cee36ce0edb 100644 --- a/scripts/compliance-operator-importer/internal/models/models.go +++ b/scripts/compliance-operator-importer/internal/models/models.go @@ -15,21 +15,25 @@ const ( // Config holds all resolved configuration for a single importer run. type Config struct { - ACSEndpoint string - AuthMode AuthMode - TokenEnv string // env var name, default "ACS_API_TOKEN" - Username string - PasswordEnv string // env var name, default "ACS_PASSWORD" - KubeContext string // empty = use current context - CONamespace string // empty when COAllNamespaces=true - COAllNamespaces bool - ACSClusterID string - DryRun bool - ReportJSON string - RequestTimeout time.Duration - MaxRetries int - CACertFile string - InsecureSkipVerify bool + ACSEndpoint string // from --endpoint or ROX_ENDPOINT + AuthMode AuthMode // auto-inferred from env vars (ROX_API_TOKEN / ROX_ADMIN_PASSWORD) + Username string // from --username or ROX_ADMIN_USER (default "admin") + CONamespace string // empty when COAllNamespaces=true + COAllNamespaces bool + ACSClusterID string // resolved from --cluster UUID or auto-discovered + ClusterNameLookup string // non-UUID --cluster value for runtime resolution via ACS API + DryRun bool + ReportJSON string + RequestTimeout time.Duration + MaxRetries int + CACertFile string + InsecureSkipVerify bool + OverwriteExisting bool + AutoDiscoverClusterID bool // set by validate() when no --cluster in single-cluster mode + // Multi-cluster mode fields + Kubeconfigs []string // repeatable --kubeconfig paths + Kubecontexts []string // repeatable --kubecontext names + ClusterOverrides []string // repeatable --cluster ctx=acs-name-or-uuid } // Severity classifies how severe a Problem is. @@ -96,8 +100,8 @@ type ACSBaseScanConfig struct { Description string `json:"description"` } -// ACSCreatePayload is the request body for POST /v2/compliance/scan/configurations. -// Phase 1 is create-only; no PUT is ever issued. +// ACSCreatePayload is the request body for POST /v2/compliance/scan/configurations +// and PUT /v2/compliance/scan/configurations/{id}. type ACSCreatePayload struct { ScanName string `json:"scanName"` ScanConfig ACSBaseScanConfig `json:"scanConfig"` @@ -128,6 +132,7 @@ type ReportMeta struct { type ReportCounts struct { Discovered int `json:"discovered"` Create int `json:"create"` + Update int `json:"update"` Skip int `json:"skip"` Failed int `json:"failed"` } @@ -157,10 +162,18 @@ type Report struct { Problems []Problem `json:"problems"` } +// ACSClusterInfo represents a cluster managed by ACS. +type ACSClusterInfo struct { + ID string // ACS cluster UUID + Name string // cluster display name + ProviderClusterID string // from status.providerMetadata.cluster.id (e.g. OpenShift cluster ID) +} + // ACSClient is the interface for ACS API operations. -// Phase 1 is create-only; no PUT method is defined. type ACSClient interface { Preflight(ctx context.Context) error ListScanConfigurations(ctx context.Context) ([]ACSConfigSummary, error) CreateScanConfiguration(ctx context.Context, payload ACSCreatePayload) (string, error) + UpdateScanConfiguration(ctx context.Context, id string, payload ACSCreatePayload) error + ListClusters(ctx context.Context) ([]ACSClusterInfo, error) } diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight.go b/scripts/compliance-operator-importer/internal/preflight/preflight.go index 944ea7c28f85d..e082a995c134f 100644 --- a/scripts/compliance-operator-importer/internal/preflight/preflight.go +++ b/scripts/compliance-operator-importer/internal/preflight/preflight.go @@ -20,7 +20,7 @@ const preflightPath = "/v2/compliance/scan/configurations?pagination.limit=1" // Run performs preflight checks in order: // 1. Verify endpoint uses https:// (IMP-CLI-013). -// 2. Verify auth material is non-empty for the configured mode (IMP-CLI-014). +// 2. Verify auth material is non-empty for the inferred mode (IMP-CLI-014). // 3. Probe GET /v2/compliance/scan/configurations?pagination.limit=1 (IMP-CLI-015). // 4. HTTP 401/403 => fail-fast with a remediation message (IMP-CLI-016). // @@ -29,8 +29,8 @@ func Run(ctx context.Context, cfg *models.Config) error { // IMP-CLI-013: endpoint must be https://. if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { return fmt.Errorf( - "preflight failed: endpoint %q must start with https:// (IMP-CLI-013)\n"+ - "Fix: use --acs-endpoint https://", + "preflight failed: endpoint %q must start with https://\n"+ + "Fix: use --endpoint https://", cfg.ACSEndpoint, ) } @@ -57,7 +57,7 @@ func Run(ctx context.Context, cfg *models.Config) error { if err != nil { return fmt.Errorf( "preflight failed: could not reach ACS at %s: %w\n"+ - "Fix: check network connectivity and that --acs-endpoint is correct", + "Fix: check network connectivity and that --endpoint is correct", cfg.ACSEndpoint, err, ) } @@ -70,12 +70,12 @@ func Run(ctx context.Context, cfg *models.Config) error { return nil case http.StatusUnauthorized: return errors.New( - "preflight failed: ACS returned 401 Unauthorized (IMP-CLI-016)\n" + + "preflight failed: ACS returned 401 Unauthorized\n" + "Fix: verify your ACS API token or credentials are correct and not expired", ) case http.StatusForbidden: return errors.New( - "preflight failed: ACS returned 403 Forbidden (IMP-CLI-016)\n" + + "preflight failed: ACS returned 403 Forbidden\n" + "Fix: ensure your ACS user has the 'Compliance' permission set with at least read access", ) default: @@ -87,32 +87,28 @@ func Run(ctx context.Context, cfg *models.Config) error { } } -// checkAuthMaterial validates that the auth credentials for the configured +// checkAuthMaterial validates that the auth credentials for the inferred // mode are non-empty (IMP-CLI-014). func checkAuthMaterial(cfg *models.Config) error { switch cfg.AuthMode { case models.AuthModeToken: - token := os.Getenv(cfg.TokenEnv) - if token == "" { - return fmt.Errorf( - "preflight failed: token auth mode requires a non-empty token in env var %q (IMP-CLI-014)\n"+ - "Fix: set %s=", - cfg.TokenEnv, cfg.TokenEnv, + if os.Getenv("ROX_API_TOKEN") == "" { + return errors.New( + "preflight failed: token auth mode requires a non-empty ROX_API_TOKEN\n" + + "Fix: export ROX_API_TOKEN=", ) } case models.AuthModeBasic: if cfg.Username == "" { return errors.New( - "preflight failed: basic auth mode requires a non-empty username (IMP-CLI-014)\n" + - "Fix: pass --acs-username= or set ACS_USERNAME=", + "preflight failed: basic auth mode requires a non-empty username\n" + + "Fix: pass --username= or set ROX_ADMIN_USER=", ) } - password := os.Getenv(cfg.PasswordEnv) - if password == "" { - return fmt.Errorf( - "preflight failed: basic auth mode requires a non-empty password in env var %q (IMP-CLI-014)\n"+ - "Fix: set %s=", - cfg.PasswordEnv, cfg.PasswordEnv, + if os.Getenv("ROX_ADMIN_PASSWORD") == "" { + return errors.New( + "preflight failed: basic auth mode requires a non-empty ROX_ADMIN_PASSWORD\n" + + "Fix: export ROX_ADMIN_PASSWORD=", ) } } @@ -148,10 +144,10 @@ func buildHTTPClient(cfg *models.Config) (*http.Client, error) { func addAuthHeader(req *http.Request, cfg *models.Config) { switch cfg.AuthMode { case models.AuthModeToken: - token := os.Getenv(cfg.TokenEnv) + token := os.Getenv("ROX_API_TOKEN") req.Header.Set("Authorization", "Bearer "+token) case models.AuthModeBasic: - password := os.Getenv(cfg.PasswordEnv) + password := os.Getenv("ROX_ADMIN_PASSWORD") creds := base64.StdEncoding.EncodeToString([]byte(cfg.Username + ":" + password)) req.Header.Set("Authorization", "Basic "+creds) } diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go index 5dbac5803c0d4..a52d5b63f6c5e 100644 --- a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go +++ b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go @@ -11,12 +11,12 @@ import ( "github.com/stackrox/co-acs-importer/internal/models" ) -// minimalTokenConfig returns a Config wired to the given server URL and token. -func minimalTokenConfig(serverURL, token string) *models.Config { +// minimalTokenConfig returns a Config wired to the given server URL in token mode. +// Caller must set ROX_API_TOKEN env var. +func minimalTokenConfig(serverURL string) *models.Config { return &models.Config{ ACSEndpoint: serverURL, AuthMode: models.AuthModeToken, - TokenEnv: "TEST_ACS_TOKEN", RequestTimeout: 5 * time.Second, } } @@ -33,10 +33,9 @@ func TestIMP_CLI_015_200ResponseNoError(t *testing.T) { })) defer srv.Close() - t.Setenv("TEST_ACS_TOKEN", "validtoken") + t.Setenv("ROX_API_TOKEN", "validtoken") - cfg := minimalTokenConfig(srv.URL, "validtoken") - // Use the test server's TLS client directly by trusting its certificate. + cfg := minimalTokenConfig(srv.URL) cfg.InsecureSkipVerify = true err := Run(context.Background(), cfg) @@ -53,9 +52,9 @@ func TestIMP_CLI_016_401ReturnsRemediationError(t *testing.T) { })) defer srv.Close() - t.Setenv("TEST_ACS_TOKEN", "badtoken") + t.Setenv("ROX_API_TOKEN", "badtoken") - cfg := minimalTokenConfig(srv.URL, "badtoken") + cfg := minimalTokenConfig(srv.URL) cfg.InsecureSkipVerify = true err := Run(context.Background(), cfg) @@ -66,7 +65,6 @@ func TestIMP_CLI_016_401ReturnsRemediationError(t *testing.T) { if !strings.Contains(strings.ToLower(msg), "unauthorized") && !strings.Contains(strings.ToLower(msg), "401") { t.Errorf("expected 'unauthorized' or '401' in error message, got: %q", msg) } - // Must include a remediation hint. if !strings.Contains(strings.ToLower(msg), "fix:") { t.Errorf("expected remediation hint (Fix:) in error message, got: %q", msg) } @@ -80,9 +78,9 @@ func TestIMP_CLI_016_403ReturnsRemediationError(t *testing.T) { })) defer srv.Close() - t.Setenv("TEST_ACS_TOKEN", "insufficienttoken") + t.Setenv("ROX_API_TOKEN", "insufficienttoken") - cfg := minimalTokenConfig(srv.URL, "insufficienttoken") + cfg := minimalTokenConfig(srv.URL) cfg.InsecureSkipVerify = true err := Run(context.Background(), cfg) @@ -101,12 +99,11 @@ func TestIMP_CLI_016_403ReturnsRemediationError(t *testing.T) { // TestIMP_CLI_013_NonHTTPSEndpointRejected verifies that a non-https endpoint // is rejected before any network call is made (IMP-CLI-013). func TestIMP_CLI_013_NonHTTPSEndpointRejected(t *testing.T) { - t.Setenv("TEST_ACS_TOKEN", "tok") + t.Setenv("ROX_API_TOKEN", "tok") cfg := &models.Config{ ACSEndpoint: "http://central.example.com", AuthMode: models.AuthModeToken, - TokenEnv: "TEST_ACS_TOKEN", RequestTimeout: 5 * time.Second, } @@ -122,13 +119,11 @@ func TestIMP_CLI_013_NonHTTPSEndpointRejected(t *testing.T) { // TestIMP_CLI_014_EmptyTokenRejected verifies that an empty token in token // mode is caught before any HTTP request (IMP-CLI-014). func TestIMP_CLI_014_EmptyTokenRejected(t *testing.T) { - // Do not set the token env var. - t.Setenv("TEST_ACS_TOKEN", "") + t.Setenv("ROX_API_TOKEN", "") cfg := &models.Config{ ACSEndpoint: "https://central.example.com", AuthMode: models.AuthModeToken, - TokenEnv: "TEST_ACS_TOKEN", RequestTimeout: 5 * time.Second, } @@ -144,13 +139,12 @@ func TestIMP_CLI_014_EmptyTokenRejected(t *testing.T) { // TestIMP_CLI_014_BasicModeEmptyPasswordRejected verifies that basic mode with // an empty password is rejected before any HTTP request (IMP-CLI-014). func TestIMP_CLI_014_BasicModeEmptyPasswordRejected(t *testing.T) { - t.Setenv("ACS_PASSWORD", "") + t.Setenv("ROX_ADMIN_PASSWORD", "") cfg := &models.Config{ ACSEndpoint: "https://central.example.com", AuthMode: models.AuthModeBasic, Username: "admin", - PasswordEnv: "ACS_PASSWORD", RequestTimeout: 5 * time.Second, } @@ -170,9 +164,9 @@ func TestIMP_CLI_015_ProbesCorrectPath(t *testing.T) { })) defer srv.Close() - t.Setenv("TEST_ACS_TOKEN", "tok") + t.Setenv("ROX_API_TOKEN", "tok") - cfg := minimalTokenConfig(srv.URL, "tok") + cfg := minimalTokenConfig(srv.URL) cfg.InsecureSkipVerify = true if err := Run(context.Background(), cfg); err != nil { @@ -195,9 +189,9 @@ func TestIMP_CLI_015_BearerTokenSentInHeader(t *testing.T) { })) defer srv.Close() - t.Setenv("TEST_ACS_TOKEN", "my-secret-token") + t.Setenv("ROX_API_TOKEN", "my-secret-token") - cfg := minimalTokenConfig(srv.URL, "my-secret-token") + cfg := minimalTokenConfig(srv.URL) cfg.InsecureSkipVerify = true if err := Run(context.Background(), cfg); err != nil { diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only.go b/scripts/compliance-operator-importer/internal/reconcile/create_only.go index bcd9ac9f49c84..90c6394e83c38 100644 --- a/scripts/compliance-operator-importer/internal/reconcile/create_only.go +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only.go @@ -1,6 +1,5 @@ -// Package reconcile implements the create-only reconciliation loop. -// -// create-only: PUT is never called in Phase 1 +// Package reconcile implements the reconciliation loop that can either create-only +// or create-or-update scan configurations based on the overwriteExisting setting. package reconcile import ( @@ -40,69 +39,137 @@ type Action struct { Problem *models.Problem } -// Reconciler implements the create-only reconciliation loop. -// It never calls PUT. Existing scan names are skipped with a conflict problem. -// -// create-only: PUT is never called in Phase 1 +// Reconciler implements the reconciliation loop. +// When overwriteExisting=false, existing scan names are skipped with a conflict problem. +// When overwriteExisting=true, existing scan names are updated via PUT. type Reconciler struct { - client models.ACSClient - maxRetries int - dryRun bool + client models.ACSClient + maxRetries int + dryRun bool + overwriteExisting bool } // NewReconciler creates a Reconciler. // -// - client: ACS API client (POST-only; no PUT anywhere) -// - maxRetries: maximum total attempts for a single create (must be >= 1) -// - dryRun: when true, no POST is issued; planned actions are still recorded -func NewReconciler(client models.ACSClient, maxRetries int, dryRun bool) *Reconciler { +// - client: ACS API client supporting both POST and PUT operations +// - maxRetries: maximum total attempts for a single create/update (must be >= 1) +// - dryRun: when true, no POST/PUT is issued; planned actions are still recorded +// - overwriteExisting: when true, existing configs are updated via PUT instead of skipped +func NewReconciler(client models.ACSClient, maxRetries int, dryRun bool, overwriteExisting bool) *Reconciler { if maxRetries < 1 { maxRetries = 1 } return &Reconciler{ - client: client, - maxRetries: maxRetries, - dryRun: dryRun, + client: client, + maxRetries: maxRetries, + dryRun: dryRun, + overwriteExisting: overwriteExisting, } } -// Apply tries to create the scan config if scanName is not already in existingNames. +// Apply tries to create or update the scan config based on whether scanName exists in existingNames. // // Behaviour: -// - If dryRun=true: records planned action, no POST is issued. (IMP-IDEM-004, IMP-IDEM-006) -// - If scanName exists: skip + conflict problem. (IMP-IDEM-002, IMP-IDEM-003) +// - If dryRun=true: records planned action, no POST/PUT is issued. (IMP-IDEM-004, IMP-IDEM-006) +// - If scanName exists and overwriteExisting=false: skip + conflict problem. (IMP-IDEM-002, IMP-IDEM-003) +// - If scanName exists and overwriteExisting=true: update via PUT. (IMP-IDEM-008) +// - If scanName not exists: create via POST regardless of overwriteExisting. (IMP-IDEM-009) // - Transient failures (429,502,503,504): retry with exponential backoff. (IMP-ERR-001) // - Non-transient failures (400,401,403,404): record as fail immediately. (IMP-ERR-002) // // Exponential backoff: base=500ms, doubles each retry; up to maxRetries total attempts. // Attempts count is always recorded in the returned Action. // -// create-only: PUT is never called in Phase 1 +// existingNames maps scanName -> configID so we know the ID for PUT operations. func (r *Reconciler) Apply( ctx context.Context, payload models.ACSCreatePayload, source models.ReportItemSource, - existingNames map[string]bool, + existingNames map[string]string, ) Action { action := Action{Source: source} - // IMP-IDEM-002: existing name => skip with conflict problem - // IMP-IDEM-003: no PUT is attempted for existing configs - if existingNames[payload.ScanName] { - problem := &models.Problem{ - Severity: models.SeverityWarning, - Category: models.CategoryConflict, - ResourceRef: resourceRef(source), - Description: fmt.Sprintf("scan configuration %q already exists in ACS and will not be updated (create-only mode)", payload.ScanName), - FixHint: fmt.Sprintf("Remove the existing ACS scan configuration named %q before re-running, or rename the ScanSettingBinding to use a different name.", payload.ScanName), - Skipped: true, + existingID, nameExists := existingNames[payload.ScanName] + + // Handle existing name based on overwriteExisting setting + if nameExists { + if !r.overwriteExisting { + // IMP-IDEM-002: existing name and overwriteExisting=false => skip with conflict problem + // IMP-IDEM-003: no PUT is attempted when overwriteExisting=false + problem := &models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: resourceRef(source), + Description: fmt.Sprintf("scan configuration %q already exists in ACS and will not be updated (create-only mode)", payload.ScanName), + FixHint: fmt.Sprintf("Remove the existing ACS scan configuration named %q before re-running, or use --overwrite-existing flag, or rename the ScanSettingBinding to use a different name.", payload.ScanName), + Skipped: true, + } + action.ActionType = "skip" + action.Reason = fmt.Sprintf("scan configuration %q already exists in ACS", payload.ScanName) + action.Problem = problem + return action + } + + // IMP-IDEM-008: overwriteExisting=true and name exists => update via PUT + if r.dryRun { + action.ActionType = "update" + action.ACSScanConfigID = existingID + action.Reason = "dry-run: would PUT /v2/compliance/scan/configurations/" + existingID + action.Attempts = 0 + return action } - action.ActionType = "skip" - action.Reason = fmt.Sprintf("scan configuration %q already exists in ACS", payload.ScanName) - action.Problem = problem + + // Perform update with retry logic + var ( + lastErr error + delay = 500 * time.Millisecond + ) + + for attempt := 1; attempt <= r.maxRetries; attempt++ { + action.Attempts = attempt + + lastErr = r.client.UpdateScanConfiguration(ctx, existingID, payload) + if lastErr == nil { + action.ActionType = "update" + action.ACSScanConfigID = existingID + action.Reason = "scan configuration updated successfully" + return action + } + + // Check if the error is transient (eligible for retry) + if sc, ok := asStatusCoder(lastErr); ok { + code := sc.StatusCode() + if !transientStatusCodes[code] { + // Non-transient: fail immediately, no more attempts + action.ActionType = "fail" + action.Reason = fmt.Sprintf("non-transient HTTP %d error updating scan configuration", code) + action.Err = lastErr + return action + } + } + + // Do not sleep after the last attempt + if attempt < r.maxRetries { + select { + case <-ctx.Done(): + action.ActionType = "fail" + action.Reason = "context cancelled during retry backoff" + action.Err = ctx.Err() + return action + case <-time.After(delay): + } + delay *= 2 + } + } + + // Exhausted all retries for update + action.ActionType = "fail" + action.Reason = fmt.Sprintf("failed to update after %d attempt(s): %v", action.Attempts, lastErr) + action.Err = lastErr return action } + // IMP-IDEM-009: name not exists => create via POST regardless of overwriteExisting flag // IMP-IDEM-004: dry-run => record planned action, do not POST // IMP-IDEM-006: planned action "create" is still recorded if r.dryRun { diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go index 3be75e7b6e847..244954beac9f0 100644 --- a/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go @@ -17,19 +17,20 @@ import ( // mockACSClient is a test double that records every call and allows the caller // to inject per-call responses via the nextResponses queue. -// -// IMP-IDEM-003: The mock only implements POST (via CreateScanConfiguration). -// There is no Put/Update method. If one were added to ACSClient, this struct -// would fail to compile unless the method were added here too, making the -// violation immediately visible. type mockACSClient struct { // createResponses is consumed in order; each entry is either nil (success) // or an error. Use statusError to encode HTTP status codes. createResponses []error + // updateResponses is consumed in order for PUT calls. + updateResponses []error + // callCount tracks total calls to CreateScanConfiguration. callCount atomic.Int32 + // updateCallCount tracks total calls to UpdateScanConfiguration. + updateCallCount atomic.Int32 + // recordedIDCounter is used to return unique IDs on success. idCounter atomic.Int32 @@ -63,6 +64,21 @@ func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSC return id, nil } +func (m *mockACSClient) UpdateScanConfiguration(_ context.Context, _ string, _ models.ACSCreatePayload) error { + idx := int(m.updateCallCount.Add(1)) - 1 + if idx < len(m.updateResponses) { + if err := m.updateResponses[idx]; err != nil { + return err + } + } + return nil +} + +func (m *mockACSClient) ListClusters(_ context.Context) ([]models.ACSClusterInfo, error) { + // Not used in reconcile tests, return empty list + return []models.ACSClusterInfo{}, nil +} + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- @@ -93,9 +109,9 @@ func defaultPayload(scanName string) models.ACSCreatePayload { // IMP-IDEM-001: non-existing name => POST called, action="create" func TestApply_IMP_IDEM_001_NewName_CreatesConfig(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, false) + r := reconcile.NewReconciler(mock, 3, false, false) - action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if action.ActionType != "create" { t.Errorf("IMP-IDEM-001: expected action 'create', got %q", action.ActionType) @@ -114,9 +130,9 @@ func TestApply_IMP_IDEM_001_NewName_CreatesConfig(t *testing.T) { // IMP-IDEM-002: existing name => action="skip", Problem.Category=conflict, FixHint non-empty func TestApply_IMP_IDEM_002_ExistingName_SkipsWithConflictProblem(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, false) + r := reconcile.NewReconciler(mock, 3, false, false) - existing := map[string]bool{"cis-weekly": true} + existing := map[string]string{"cis-weekly": "existing-id-123"} action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) if action.ActionType != "skip" { @@ -140,47 +156,61 @@ func TestApply_IMP_IDEM_002_ExistingName_SkipsWithConflictProblem(t *testing.T) } } -// IMP-IDEM-003: verify no PUT ever called (mock records method; ACSClient has no Put) -func TestApply_IMP_IDEM_003_NeverCallsPUT(t *testing.T) { - // The mockACSClient deliberately has no Put/Update method. - // It only satisfies models.ACSClient which defines: - // Preflight, ListScanConfigurations, CreateScanConfiguration (POST only). - // If a PUT-based method existed in the interface, the mock would fail to compile. +// IMP-IDEM-003: verify no PUT called when overwriteExisting=false (default mode) +func TestApply_IMP_IDEM_003_DefaultMode_NoPUT(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, false) + r := reconcile.NewReconciler(mock, 3, false, false) // overwriteExisting=false // Run multiple scenarios - none should trigger a PUT for _, scanName := range []string{"new-scan-1", "new-scan-2"} { - _ = r.Apply(context.Background(), defaultPayload(scanName), defaultSource(), map[string]bool{}) + _ = r.Apply(context.Background(), defaultPayload(scanName), defaultSource(), map[string]string{}) } // existing name - should skip, not PUT - _ = r.Apply(context.Background(), defaultPayload("existing"), defaultSource(), map[string]bool{"existing": true}) + _ = r.Apply(context.Background(), defaultPayload("existing"), defaultSource(), map[string]string{"existing": "existing-id"}) - // The mock only has CreateScanConfiguration (POST). callCount reflects POST calls only. - // 2 creates + 1 skip = 2 POST calls total (no PUT possible). + // 2 creates + 1 skip = 2 POST calls total, 0 PUT calls if mock.callCount.Load() != 2 { - t.Errorf("IMP-IDEM-003: expected exactly 2 POST calls (2 creates, 0 PUT), got %d", mock.callCount.Load()) + t.Errorf("IMP-IDEM-003: expected exactly 2 POST calls (2 creates), got %d", mock.callCount.Load()) + } + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-003: expected 0 PUT calls when overwriteExisting=false, got %d", mock.updateCallCount.Load()) } } // IMP-IDEM-004: dryRun=true => no POST func TestApply_IMP_IDEM_004_DryRun_NoPost(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true - _ = r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + _ = r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if mock.callCount.Load() != 0 { t.Errorf("IMP-IDEM-004: expected 0 POST calls in dry-run mode, got %d", mock.callCount.Load()) } } +// IMP-IDEM-005: dryRun=true => no PUT (even with overwriteExisting=true) +func TestApply_IMP_IDEM_005_DryRun_NoPut(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true, true) // dryRun=true, overwriteExisting=true + + existing := map[string]string{"existing-scan": "existing-id"} + _ = r.Apply(context.Background(), defaultPayload("existing-scan"), defaultSource(), existing) + + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-005: expected 0 PUT calls in dry-run mode, got %d", mock.updateCallCount.Load()) + } + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-005: expected 0 POST calls in dry-run mode, got %d", mock.callCount.Load()) + } +} + // IMP-IDEM-006: dryRun => action="create" still recorded as planned func TestApply_IMP_IDEM_006_DryRun_PlannedCreateRecorded(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true - action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if action.ActionType != "create" { t.Errorf("IMP-IDEM-006: dry-run planned action should be 'create', got %q", action.ActionType) @@ -190,9 +220,9 @@ func TestApply_IMP_IDEM_006_DryRun_PlannedCreateRecorded(t *testing.T) { // IMP-IDEM-007: dryRun => problems still populated for problematic resources func TestApply_IMP_IDEM_007_DryRun_ProblemsStillPopulated(t *testing.T) { mock := &mockACSClient{} - r := reconcile.NewReconciler(mock, 3, true) // dryRun=true + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true - existing := map[string]bool{"cis-weekly": true} + existing := map[string]string{"cis-weekly": "existing-id-123"} action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) if action.Problem == nil { @@ -212,9 +242,9 @@ func TestApply_IMP_ERR_001_Retry429_ThenSuccess(t *testing.T) { nil, // 3rd attempt succeeds }, } - r := reconcile.NewReconciler(mock, 5, false) + r := reconcile.NewReconciler(mock, 5, false, false) - action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if action.ActionType != "create" { t.Errorf("IMP-ERR-001: expected action 'create' after retry success, got %q", action.ActionType) @@ -239,9 +269,9 @@ func TestApply_IMP_ERR_001_Retry5xx_ThenSuccess(t *testing.T) { nil, // 3rd succeeds }, } - r := reconcile.NewReconciler(mock, 5, false) + r := reconcile.NewReconciler(mock, 5, false, false) - action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if action.ActionType != "create" { t.Errorf("IMP-ERR-001: HTTP %d - expected 'create', got %q", code, action.ActionType) @@ -263,9 +293,9 @@ func TestApply_IMP_ERR_002_NonTransient400_NoRetry(t *testing.T) { &statusError{code: code}, }, } - r := reconcile.NewReconciler(mock, 5, false) + r := reconcile.NewReconciler(mock, 5, false, false) - action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]bool{}) + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) if action.ActionType != "fail" { t.Errorf("IMP-ERR-002: HTTP %d - expected action 'fail', got %q", code, action.ActionType) @@ -322,3 +352,52 @@ func errorIs(err error, code int) bool { // keep errorIs in use var _ = errorIs + +// IMP-IDEM-008: overwriteExisting=true, name exists => PUT called, action="update" +func TestApply_IMP_IDEM_008_OverwriteExisting_Updates(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, true) // overwriteExisting=true + + existing := map[string]string{"cis-weekly": "existing-config-id-789"} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.ActionType != "update" { + t.Errorf("IMP-IDEM-008: expected action 'update', got %q", action.ActionType) + } + if action.ACSScanConfigID != "existing-config-id-789" { + t.Errorf("IMP-IDEM-008: expected existing ID preserved, got %q", action.ACSScanConfigID) + } + if action.Err != nil { + t.Errorf("IMP-IDEM-008: unexpected error: %v", action.Err) + } + if mock.updateCallCount.Load() != 1 { + t.Errorf("IMP-IDEM-008: expected 1 PUT call, got %d", mock.updateCallCount.Load()) + } + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-008: expected 0 POST calls, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-009: overwriteExisting=true, name not exists => POST called, action="create" +func TestApply_IMP_IDEM_009_OverwriteExisting_Creates(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, true) // overwriteExisting=true + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-009: expected action 'create', got %q", action.ActionType) + } + if action.ACSScanConfigID == "" { + t.Error("IMP-IDEM-009: expected non-empty ACSScanConfigID after create") + } + if action.Err != nil { + t.Errorf("IMP-IDEM-009: unexpected error: %v", action.Err) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-IDEM-009: expected 1 POST call, got %d", mock.callCount.Load()) + } + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-009: expected 0 PUT calls, got %d", mock.updateCallCount.Load()) + } +} diff --git a/scripts/compliance-operator-importer/internal/report/report.go b/scripts/compliance-operator-importer/internal/report/report.go index e9907241f34e5..c5ab9f7e9e645 100644 --- a/scripts/compliance-operator-importer/internal/report/report.go +++ b/scripts/compliance-operator-importer/internal/report/report.go @@ -31,15 +31,20 @@ func (b *Builder) RecordItem(item models.ReportItem) { // Build constructs the final Report from all recorded items and the supplied // problems list. // -// IMP-CLI-021: sets meta.mode = "create-only", meta.timestamp to current UTC +// IMP-CLI-021: sets meta.mode based on cfg.OverwriteExisting, meta.timestamp to current UTC // RFC3339, meta.dryRun from cfg, meta.namespaceScope from cfg. // IMP-CLI-021: computes counts from items actions. func (b *Builder) Build(problems []models.Problem) models.Report { + mode := "create-only" + if b.cfg.OverwriteExisting { + mode = "create-or-update" + } + meta := models.ReportMeta{ Timestamp: time.Now().UTC().Format(time.RFC3339), DryRun: b.cfg.DryRun, NamespaceScope: namespaceScope(b.cfg), - Mode: "create-only", + Mode: mode, } counts := models.ReportCounts{ @@ -49,6 +54,8 @@ func (b *Builder) Build(problems []models.Problem) models.Report { switch it.Action { case "create": counts.Create++ + case "update": + counts.Update++ case "skip": counts.Skip++ case "fail": diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source.go b/scripts/compliance-operator-importer/internal/run/cluster_source.go new file mode 100644 index 0000000000000..cd0cb1093c8c2 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/cluster_source.go @@ -0,0 +1,207 @@ +package run + +import ( + "context" + "errors" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/discover" + "github.com/stackrox/co-acs-importer/internal/models" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" +) + +// ClusterSource represents a single source cluster with its CO client and ACS cluster ID. +type ClusterSource struct { + Label string // kubeconfig path or context name, for logging + COClient cofetch.COClient + ACSClusterID string +} + +// BuildClusterSources creates ClusterSource entries from the config. +// +// Logic: +// - If no multi-cluster flags: single-cluster mode using current context and cfg.ACSClusterID. +// - If --kubeconfig flags: one source per kubeconfig file, discover cluster ID. +// - If --kubecontext flags: one source per context (or all contexts if "all"), discover cluster ID. +// - Manual overrides from --cluster apply to matched contexts. +func BuildClusterSources(ctx context.Context, cfg *models.Config, acsClient models.ACSClient) ([]ClusterSource, error) { + isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 + + if !isMultiClusterMode { + // Single-cluster mode with auto-discovery. + coClient, err := cofetch.NewClient(cfg) + if err != nil { + return nil, fmt.Errorf("create CO client: %w", err) + } + + clusterID := cfg.ACSClusterID + if clusterID == "" { + // Auto-discover using default kubeconfig context. + dynClient, err := buildDynamicClientForContext("") + if err != nil { + return nil, fmt.Errorf("build dynamic client for current context: %w", err) + } + clusterID, err = discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, "") + if err != nil { + return nil, fmt.Errorf("discover cluster ID for current context: %w", err) + } + } + + return []ClusterSource{{ + Label: "current-context", + COClient: coClient, + ACSClusterID: clusterID, + }}, nil + } + + // Parse manual cluster overrides into a map: contextName -> acsClusterName. + overrides, err := parseClusterOverrides(cfg.ClusterOverrides) + if err != nil { + return nil, err + } + + var sources []ClusterSource + + // Handle --kubeconfig mode. + if len(cfg.Kubeconfigs) > 0 { + for _, kubeconfigPath := range cfg.Kubeconfigs { + coClient, err := cofetch.NewClientForKubeconfig(kubeconfigPath, cfg.CONamespace, cfg.COAllNamespaces) + if err != nil { + return nil, fmt.Errorf("create CO client for kubeconfig %q: %w", kubeconfigPath, err) + } + + // Build dynamic client for discovery. + dynClient, err := buildDynamicClientForKubeconfig(kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("build dynamic client for kubeconfig %q: %w", kubeconfigPath, err) + } + + // Check for manual override (match by kubeconfig path? Not practical. Skip for kubeconfig mode). + acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, "") + if err != nil { + return nil, fmt.Errorf("discover cluster ID for kubeconfig %q: %w", kubeconfigPath, err) + } + + sources = append(sources, ClusterSource{ + Label: kubeconfigPath, + COClient: coClient, + ACSClusterID: acsClusterID, + }) + } + return sources, nil + } + + // Handle --kubecontext mode. + if len(cfg.Kubecontexts) > 0 { + contexts := cfg.Kubecontexts + if len(contexts) == 1 && contexts[0] == "all" { + // Expand "all" to all contexts in the active kubeconfig. + allContexts, err := listAllContexts() + if err != nil { + return nil, fmt.Errorf("list all contexts: %w", err) + } + contexts = allContexts + } + + for _, contextName := range contexts { + coClient, err := cofetch.NewClientForContext(contextName, cfg.CONamespace, cfg.COAllNamespaces) + if err != nil { + return nil, fmt.Errorf("create CO client for context %q: %w", contextName, err) + } + + // Build dynamic client for discovery. + dynClient, err := buildDynamicClientForContext(contextName) + if err != nil { + return nil, fmt.Errorf("build dynamic client for context %q: %w", contextName, err) + } + + // Check for manual override. + manualName := overrides[contextName] + acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, manualName) + if err != nil { + return nil, fmt.Errorf("discover cluster ID for context %q: %w", contextName, err) + } + + sources = append(sources, ClusterSource{ + Label: contextName, + COClient: coClient, + ACSClusterID: acsClusterID, + }) + } + return sources, nil + } + + return nil, errors.New("no cluster sources configured") +} + +// parseClusterOverrides parses --cluster flags into a map: contextName -> acsClusterName. +// Format: ctx=acs-name +func parseClusterOverrides(overrides []string) (map[string]string, error) { + result := make(map[string]string) + for _, override := range overrides { + parts := splitOnce(override, "=") + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return nil, fmt.Errorf("invalid --cluster format %q: expected ctx=acs-name", override) + } + result[parts[0]] = parts[1] + } + return result, nil +} + +// splitOnce splits s on the first occurrence of sep. +func splitOnce(s, sep string) []string { + idx := -1 + for i := 0; i < len(s); i++ { + if s[i:i+len(sep)] == sep { + idx = i + break + } + } + if idx == -1 { + return []string{s} + } + return []string{s[:idx], s[idx+len(sep):]} +} + +// listAllContexts returns all context names from the active kubeconfig. +func listAllContexts() ([]string, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + config, err := loadingRules.Load() + if err != nil { + return nil, fmt.Errorf("load kubeconfig: %w", err) + } + + var contexts []string + for name := range config.Contexts { + contexts = append(contexts, name) + } + if len(contexts) == 0 { + return nil, errors.New("no contexts found in kubeconfig") + } + return contexts, nil +} + +// buildDynamicClientForKubeconfig creates a dynamic k8s client for the given kubeconfig file. +func buildDynamicClientForKubeconfig(kubeconfigPath string) (dynamic.Interface, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build rest config: %w", err) + } + return dynamic.NewForConfig(restConfig) +} + +// buildDynamicClientForContext creates a dynamic k8s client for the given context. +func buildDynamicClientForContext(contextName string) (dynamic.Interface, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{CurrentContext: contextName} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build rest config: %w", err) + } + return dynamic.NewForConfig(restConfig) +} diff --git a/scripts/compliance-operator-importer/internal/run/run.go b/scripts/compliance-operator-importer/internal/run/run.go index 6dd98b5397869..2c661a1fb9836 100644 --- a/scripts/compliance-operator-importer/internal/run/run.go +++ b/scripts/compliance-operator-importer/internal/run/run.go @@ -15,6 +15,7 @@ import ( "github.com/stackrox/co-acs-importer/internal/problems" "github.com/stackrox/co-acs-importer/internal/reconcile" "github.com/stackrox/co-acs-importer/internal/report" + "github.com/stackrox/co-acs-importer/internal/status" ) // Exit code constants (IMP-CLI-017..019, IMP-ERR-003). @@ -29,7 +30,8 @@ type Runner struct { cfg *models.Config acsClient models.ACSClient coClient cofetch.COClient - out io.Writer // injectable; defaults to os.Stdout + out io.Writer // injectable; defaults to os.Stdout + status *status.Printer // stage-by-stage progress output } // NewRunner creates a Runner ready to execute, writing console output to os.Stdout. @@ -39,6 +41,7 @@ func NewRunner(cfg *models.Config, acsClient models.ACSClient, coClient cofetch. acsClient: acsClient, coClient: coClient, out: os.Stdout, + status: status.New(), } } @@ -47,11 +50,12 @@ func NewRunner(cfg *models.Config, acsClient models.ACSClient, coClient cofetch. func (r *Runner) WithOutput(w io.Writer) *Runner { cp := *r cp.out = w + cp.status = status.NewWithWriter(w) return &cp } // printf is a convenience wrapper so callers don't need to handle format errors. -func (r *Runner) printf(format string, args ...interface{}) { +func (r *Runner) printf(format string, args ...any) { fmt.Fprintf(r.out, format, args...) //nolint:errcheck // best-effort console output } @@ -70,36 +74,36 @@ func (r *Runner) Run(ctx context.Context) int { builder := report.NewBuilder(r.cfg) // Step 1: list existing ACS scan configs to populate the deduplication set. - // Failure here is fatal (IMP-CLI-018): we cannot safely proceed without - // knowing which names already exist. + r.status.Stage("Inventory", "listing existing ACS scan configurations") summaries, err := r.acsClient.ListScanConfigurations(ctx) if err != nil { - r.printf("FATAL: failed to list ACS scan configurations: %v\n", err) + r.status.Failf("failed to list ACS scan configurations: %v", err) return ExitFatalError } - existingNames := make(map[string]bool, len(summaries)) + existingNames := make(map[string]string, len(summaries)) for _, s := range summaries { - existingNames[s.ScanName] = true + existingNames[s.ScanName] = s.ID } + r.status.OKf("found %d existing scan configurations", len(summaries)) // Step 2: discover CO ScanSettingBindings. - // Failure here is also fatal (IMP-CLI-018). + r.status.Stage("Scan", "listing ScanSettingBindings from cluster") bindings, err := r.coClient.ListScanSettingBindings(ctx) if err != nil { - r.printf("FATAL: failed to list ScanSettingBindings: %v\n", err) + r.status.Failf("failed to list ScanSettingBindings: %v", err) return ExitFatalError } + r.status.OKf("found %d ScanSettingBindings", len(bindings)) // maxRetries defaults to 1 (single attempt) when cfg.MaxRetries is zero. maxRetries := r.cfg.MaxRetries if maxRetries < 1 { maxRetries = 1 } - rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun) + rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun, r.cfg.OverwriteExisting) // Step 3: process each binding independently. - // Per-binding failures skip that binding and record a problem; other bindings - // continue processing (IMP-CLI-022, IMP-MAP-011). + r.status.Stage("Reconcile", "applying scan configurations to ACS") for _, binding := range bindings { r.processBinding(ctx, binding, existingNames, rec, collector, builder) } @@ -107,21 +111,25 @@ func (r *Runner) Run(ctx context.Context) int { // Step 4: build the final report. finalReport := builder.Build(collector.All()) - // Step 5: write JSON report when requested (IMP-CLI-021). + // Step 5: write JSON report when requested. if r.cfg.ReportJSON != "" { + r.status.Stage("Report", "writing JSON report") if err := builder.WriteJSON(r.cfg.ReportJSON, finalReport); err != nil { - r.printf("WARNING: failed to write JSON report to %q: %v\n", r.cfg.ReportJSON, err) + r.status.Warnf("failed to write JSON report to %q: %v", r.cfg.ReportJSON, err) + } else { + r.status.OKf("report written to %s", r.cfg.ReportJSON) } } - // Step 6: print console summary (IMP-CLI-020). + // Step 6: print console summary. + r.printf("\n") r.printSummary(finalReport) - // Step 7: determine exit code (IMP-CLI-017..019, IMP-ERR-003). + // Step 7: determine exit code. if finalReport.Counts.Failed > 0 || collector.HasErrors() { - return ExitPartialError // IMP-CLI-019 + return ExitPartialError } - return ExitSuccess // IMP-CLI-017 + return ExitSuccess } // processBinding handles a single ScanSettingBinding: fetches its ScanSetting, @@ -130,7 +138,7 @@ func (r *Runner) Run(ctx context.Context) int { func (r *Runner) processBinding( ctx context.Context, binding cofetch.ScanSettingBinding, - existingNames map[string]bool, + existingNames map[string]string, rec *reconcile.Reconciler, collector *problems.Collector, builder *report.Builder, @@ -145,9 +153,10 @@ func (r *Runner) processBinding( ScanSettingName: binding.ScanSettingName, } - // Fetch the referenced ScanSetting (IMP-MAP-008..010). + // Fetch the referenced ScanSetting. ss, err := r.coClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) if err != nil { + r.status.Failf("%s → ScanSetting %q not found", binding.Name, binding.ScanSettingName) collector.Add(models.Problem{ Severity: models.SeverityError, Category: models.CategoryInput, @@ -165,10 +174,10 @@ func (r *Runner) processBinding( return } - // Map the CO resources to an ACS create payload (IMP-MAP-001..015). + // Map the CO resources to an ACS create payload. result := mapping.MapBinding(binding, ss, r.cfg) if result.Problem != nil { - // IMP-MAP-012..015: mapping problem => skip + record. + r.status.Failf("%s → mapping error: %s", binding.Name, result.Problem.Description) collector.Add(*result.Problem) builder.RecordItem(models.ReportItem{ Source: source, @@ -179,9 +188,20 @@ func (r *Runner) processBinding( return } - // Reconcile: create or skip (IMP-IDEM-001..007, IMP-ERR-001..004). + // Reconcile: create, update, or skip. action := rec.Apply(ctx, *result.Payload, source, existingNames) + switch action.ActionType { + case "create": + r.status.OKf("%s → created", binding.Name) + case "update": + r.status.OKf("%s → updated", binding.Name) + case "skip": + r.status.Detailf("%s → skipped (already exists)", binding.Name) + case "fail": + r.status.Failf("%s → %s", binding.Name, action.Reason) + } + item := models.ReportItem{ Source: action.Source, Action: action.ActionType, @@ -199,15 +219,12 @@ func (r *Runner) processBinding( } } -// printSummary writes the console summary to the configured output (IMP-CLI-020). +// printSummary writes the console summary to the configured output. func (r *Runner) printSummary(rep models.Report) { - dryRunLabel := "no" + mode := "live" if r.cfg.DryRun { - dryRunLabel = "yes" + mode = "dry-run" } - r.printf("CO->ACS importer summary [dry-run: %s]:\n", dryRunLabel) - r.printf(" Discovered: %d bindings\n", rep.Counts.Discovered) - r.printf(" Created: %d\n", rep.Counts.Create) - r.printf(" Skipped: %d\n", rep.Counts.Skip) - r.printf(" Failed: %d\n", rep.Counts.Failed) + r.status.Stagef("Done", "%s | discovered: %d, created: %d, updated: %d, skipped: %d, failed: %d", + mode, rep.Counts.Discovered, rep.Counts.Create, rep.Counts.Update, rep.Counts.Skip, rep.Counts.Failed) } diff --git a/scripts/compliance-operator-importer/internal/run/run_test.go b/scripts/compliance-operator-importer/internal/run/run_test.go index e10c0d83e74ad..44372ea9f08c9 100644 --- a/scripts/compliance-operator-importer/internal/run/run_test.go +++ b/scripts/compliance-operator-importer/internal/run/run_test.go @@ -46,6 +46,17 @@ func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSC return id, nil } +func (m *mockACSClient) UpdateScanConfiguration(_ context.Context, _ string, _ models.ACSCreatePayload) error { + // For now, this is a no-op in run tests since we focus on create-only mode. + // Update-specific tests are in reconcile_test.go. + return nil +} + +func (m *mockACSClient) ListClusters(_ context.Context) ([]models.ACSClusterInfo, error) { + // Not used in run tests, return empty list + return []models.ACSClusterInfo{}, nil +} + // Compile-time check: mockACSClient satisfies models.ACSClient. var _ models.ACSClient = (*mockACSClient)(nil) @@ -176,8 +187,8 @@ func TestIMP_CLI_018_ListACSConfigsFatalExitOne(t *testing.T) { if code != run.ExitFatalError { t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) } - if !strings.Contains(output, "FATAL") { - t.Errorf("IMP-CLI-018: expected FATAL message in output, got: %q", output) + if !strings.Contains(output, "✗") { + t.Errorf("IMP-CLI-018: expected failure marker in output, got: %q", output) } } @@ -193,8 +204,8 @@ func TestIMP_CLI_018_ListBindingsFatalExitOne(t *testing.T) { if code != run.ExitFatalError { t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) } - if !strings.Contains(output, "FATAL") { - t.Errorf("IMP-CLI-018: expected FATAL message in output, got: %q", output) + if !strings.Contains(output, "✗") { + t.Errorf("IMP-CLI-018: expected failure marker in output, got: %q", output) } } @@ -306,10 +317,10 @@ func TestIMP_CLI_020_ConsoleSummaryIncludesAllCounters(t *testing.T) { _, output := runWithCapture(t, baseConfig(), acsClient, coClient) requiredPhrases := []string{ - "Discovered:", - "Created:", - "Skipped:", - "Failed:", + "discovered:", + "created:", + "skipped:", + "failed:", } for _, phrase := range requiredPhrases { if !strings.Contains(output, phrase) { @@ -332,8 +343,8 @@ func TestIMP_CLI_020_DryRunLabelInSummary(t *testing.T) { _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) - if !strings.Contains(output, "dry-run: yes") { - t.Errorf("IMP-CLI-020: expected 'dry-run: yes' in output, got:\n%s", output) + if !strings.Contains(output, "dry-run") { + t.Errorf("IMP-CLI-020: expected 'dry-run' in output, got:\n%s", output) } } @@ -350,8 +361,8 @@ func TestIMP_CLI_020_NonDryRunLabelInSummary(t *testing.T) { _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) - if !strings.Contains(output, "dry-run: no") { - t.Errorf("IMP-CLI-020: expected 'dry-run: no' in output, got:\n%s", output) + if !strings.Contains(output, "live") { + t.Errorf("IMP-CLI-020: expected 'live' in output, got:\n%s", output) } } @@ -376,17 +387,17 @@ func TestIMP_CLI_020_CorrectCountsInSummary(t *testing.T) { _, output := runWithCapture(t, baseConfig(), acsClient, coClient) - if !strings.Contains(output, "Discovered: 3") { - t.Errorf("IMP-CLI-020: expected 'Discovered: 3' in output, got:\n%s", output) + if !strings.Contains(output, "discovered: 3") { + t.Errorf("IMP-CLI-020: expected 'discovered: 3' in output, got:\n%s", output) } - if !strings.Contains(output, "Created: 2") { - t.Errorf("IMP-CLI-020: expected 'Created: 2' in output, got:\n%s", output) + if !strings.Contains(output, "created: 2") { + t.Errorf("IMP-CLI-020: expected 'created: 2' in output, got:\n%s", output) } - if !strings.Contains(output, "Skipped: 1") { - t.Errorf("IMP-CLI-020: expected 'Skipped: 1' in output, got:\n%s", output) + if !strings.Contains(output, "skipped: 1") { + t.Errorf("IMP-CLI-020: expected 'skipped: 1' in output, got:\n%s", output) } - if !strings.Contains(output, "Failed: 0") { - t.Errorf("IMP-CLI-020: expected 'Failed: 0' in output, got:\n%s", output) + if !strings.Contains(output, "failed: 0") { + t.Errorf("IMP-CLI-020: expected 'failed: 0' in output, got:\n%s", output) } } @@ -415,9 +426,9 @@ func TestIMP_ERR_004_APIErrorRecordedAsProblem(t *testing.T) { if code != run.ExitPartialError { t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) } - // Console summary must show 1 failed (IMP-CLI-020). - if !strings.Contains(output, "Failed: 1") { - t.Errorf("IMP-ERR-004: expected 'Failed: 1' in output, got:\n%s", output) + // Console summary must show 1 failed. + if !strings.Contains(output, "failed: 1") { + t.Errorf("IMP-ERR-004: expected 'failed: 1' in output, got:\n%s", output) } } @@ -447,11 +458,11 @@ func TestIMP_ERR_004_MissingScanSettingRecordedAsProblem(t *testing.T) { if code != run.ExitPartialError { t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) } - if !strings.Contains(output, "Failed: 1") { - t.Errorf("IMP-ERR-004: expected 'Failed: 1' in output, got:\n%s", output) + if !strings.Contains(output, "failed: 1") { + t.Errorf("IMP-ERR-004: expected 'failed: 1' in output, got:\n%s", output) } - if !strings.Contains(output, "Created: 1") { - t.Errorf("IMP-ERR-004: expected 'Created: 1' in output, got:\n%s", output) + if !strings.Contains(output, "created: 1") { + t.Errorf("IMP-ERR-004: expected 'created: 1' in output, got:\n%s", output) } } @@ -515,7 +526,7 @@ func TestIMP_CLI_007_DryRunReportedAsCreate(t *testing.T) { _, output := runWithCapture(t, cfg, acsClient, coClient) - if !strings.Contains(output, "Created: 1") { - t.Errorf("IMP-CLI-007: expected 'Created: 1' (planned) in dry-run output, got:\n%s", output) + if !strings.Contains(output, "created: 1") { + t.Errorf("IMP-CLI-007: expected 'created: 1' (planned) in dry-run output, got:\n%s", output) } } diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md index ad48be126d8cf..7257b29fca6d9 100644 --- a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -8,49 +8,79 @@ Define the importer interface so it can be implemented and tested predictably. ### Required inputs -- **IMP-CLI-001**: importer MUST accept ACS endpoint (`--acs-endpoint` or `ACS_ENDPOINT`). -- **IMP-CLI-002**: importer MUST support ACS auth modes: - - token mode (default): bearer token from env var (`--acs-token-env`, default `ACS_API_TOKEN`), - - basic mode (optional): username/password. -- **IMP-CLI-003**: importer MUST support source cluster selection like `kubectl`: - - by default, use current kube context, - - optionally accept `--source-kubecontext ` to pick a specific context. +Note: flag names and environment variables are aligned with `roxctl` conventions. + +- **IMP-CLI-001**: importer MUST accept Central endpoint (`--endpoint` or `ROX_ENDPOINT`). + - if the value does not contain a scheme, importer MUST prepend `https://`. + - if the value starts with `http://`, importer MUST error. +- **IMP-CLI-002**: importer MUST support auth modes, auto-inferred from available credentials + (aligned with `roxctl` behavior — no explicit `--auth-mode` flag, no env-var-name indirection): + - token mode: when `ROX_API_TOKEN` is set, + - basic mode: when `ROX_ADMIN_PASSWORD` is set, + - if both are set: error ("ambiguous auth"), + - if neither is set: error with help text listing both options. +- **IMP-CLI-003**: importer MUST support multi-cluster source selection via two mechanisms: + - `--kubeconfig ` (repeatable): each path is a separate source cluster, using that file's + current context. This is the primary mechanism for users with one kubeconfig file per cluster. + - `--kubecontext ` (repeatable): selects contexts within the active kubeconfig + (set via `KUBECONFIG` env var or `~/.kube/config`). Use when a single merged kubeconfig + contains unique context names for all clusters. + - `--kubecontext all`: iterates all contexts in the active kubeconfig. + - `--kubeconfig` and `--kubecontext` are mutually exclusive (error if both given). + - when neither `--kubeconfig` nor `--kubecontext` is given, importer MUST use + the current kubeconfig context (single-cluster mode, backward compatible). + - help text MUST suggest: + - using `--kubeconfig` (repeatable) when clusters have separate kubeconfig files, or + - merging kubeconfigs (`KUBECONFIG=a.yaml:b.yaml`) with unique context names + and using `--kubecontext`. - **IMP-CLI-004**: importer MUST support namespace scope: - - `--co-namespace ` for single namespace, or + - `--co-namespace ` (default `openshift-compliance`) for single namespace, or - `--co-all-namespaces` for cluster-wide scan. -- **IMP-CLI-005**: importer MUST accept one destination ACS cluster ID: - - `--acs-cluster-id `. - - all imported scan configs target this ACS cluster ID. +- **IMP-CLI-005**: importer MUST support ACS cluster identification via `--cluster`: + - by default (no `--cluster` flag), auto-discover the ACS cluster ID for each source + cluster (see IMP-MAP-016..018). + - `--cluster ` accepts three forms: + - UUID: used directly as the ACS cluster ID (single-cluster shorthand). + - name: resolved to an ACS cluster ID via `GET /v1/clusters` (single-cluster shorthand). + - `=` (repeatable): maps a specific kubeconfig context to + an ACS cluster, overriding auto-discovery for that context. ### Optional inputs -- **IMP-CLI-006**: importer mode is create-only for phase 1. +- **IMP-CLI-006**: importer default mode is create-only; `--overwrite-existing` enables update mode. - **IMP-CLI-007**: `--dry-run` MUST disable all ACS write operations. - **IMP-CLI-008**: `--report-json ` for structured report output. - **IMP-CLI-009**: `--request-timeout ` default `30s`. - **IMP-CLI-010**: `--max-retries ` default `5`, min `0`. - **IMP-CLI-011**: `--ca-cert-file ` optional. - **IMP-CLI-012**: `--insecure-skip-verify` default false; MUST require explicit flag. -- **IMP-CLI-023**: importer MUST accept `--acs-auth-mode` enum: - - `token` (default) - - `basic` -- **IMP-CLI-024**: for basic mode, importer MUST accept: - - `--acs-username` or `ACS_USERNAME` - - `--acs-password-env` (default `ACS_PASSWORD`) to read password from env var. -- **IMP-CLI-025**: importer MUST reject ambiguous auth config (for example, missing required values for chosen mode). +- **IMP-CLI-023**: (removed — auth mode is auto-inferred, see IMP-CLI-002). +- **IMP-CLI-024**: for basic mode: + - username is read from `--username` flag or `ROX_ADMIN_USER` env var (default `admin`). + - password is read from `ROX_ADMIN_PASSWORD` env var (no flag; aligned with roxctl). +- **IMP-CLI-025**: importer MUST reject ambiguous auth config: + - both `ROX_API_TOKEN` and `ROX_ADMIN_PASSWORD` are set → error, + - neither is set → error with help text. +- **IMP-CLI-027**: `--overwrite-existing` (default `false`): + - when `false`: existing ACS scan configs with matching `scanName` are skipped (create-only). + - when `true`: existing ACS scan configs with matching `scanName` are updated via + `PUT /v2/compliance/scan/configurations/{id}`. ## Preflight checks -- **IMP-CLI-013**: endpoint MUST be `https://`. -- **IMP-CLI-014**: auth material for selected mode MUST be non-empty: - - token mode: resolved token is non-empty, - - basic mode: username and password are non-empty. +- **IMP-CLI-013**: `--endpoint` MUST use HTTPS: + - bare hostname/port (no scheme) → `https://` is prepended automatically, + - `https://...` → accepted as-is, + - `http://...` → error. +- **IMP-CLI-014**: auth material for inferred mode MUST be non-empty: + - token mode: `ROX_API_TOKEN` is non-empty, + - basic mode: `ROX_ADMIN_PASSWORD` is non-empty (username defaults to `admin`). - **IMP-CLI-015**: importer MUST probe ACS auth with: - `GET /v2/compliance/scan/configurations?pagination.limit=1` - using selected auth mode, - success only on HTTP 200. - **IMP-CLI-016**: HTTP 401/403 at preflight MUST fail-fast with remediation message. -- **IMP-CLI-026**: when auth mode is not explicitly set, importer MUST default to `token`. +- **IMP-CLI-026**: (removed — auth mode is auto-inferred, see IMP-CLI-002). ## Output contract @@ -70,7 +100,7 @@ Define the importer interface so it can be implemented and tested predictably. ### JSON report shape - **IMP-CLI-021**: when `--report-json` is set, write valid JSON with: - - `meta` (timestamp, dryRun, namespaceScope, mode=`create-only`) + - `meta` (timestamp, dryRun, namespaceScope, mode=`create-only` | `create-or-update`) - `counts` (discovered, create, skip, failed) - `items[]`: - `source` (`namespace`, `bindingName`, `scanSettingName`) @@ -92,11 +122,17 @@ Define the importer interface so it can be implemented and tested predictably. - append one `problems[]` entry with `description` and `fixHint`, - continue processing other resources. -## Existing ACS config behavior (create-only) - -- **IMP-IDEM-001**: if `scanName` already exists in ACS, importer MUST skip that source resource. -- **IMP-IDEM-002**: skipped-existing resources MUST be added to `problems[]` with category `conflict` and a fix hint. -- **IMP-IDEM-003**: create-only phase MUST NOT send `PUT` updates. +## Existing ACS config behavior + +- **IMP-IDEM-001**: when `--overwrite-existing` is `false` (default) and `scanName` already exists + in ACS, importer MUST skip that source resource. +- **IMP-IDEM-002**: skipped-existing resources MUST be added to `problems[]` with category `conflict` + and a fix hint. +- **IMP-IDEM-003**: when `--overwrite-existing` is `false`, importer MUST NOT send `PUT` updates. +- **IMP-IDEM-008**: when `--overwrite-existing` is `true` and `scanName` already exists in ACS, + importer MUST update it via `PUT /v2/compliance/scan/configurations/{id}`. +- **IMP-IDEM-009**: when `--overwrite-existing` is `true` and `scanName` does not exist, + importer MUST create it via `POST` (same as IMP-IDEM-001 create path). Example minimal report skeleton: diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature index ccc5d737616ca..45b0821f6582b 100644 --- a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -47,12 +47,81 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat And payload.scanConfig.description SHOULD include settings reference context # IMP-MAP-006 @mapping @clusters - Scenario: Use single destination ACS cluster ID - Given importer flag --acs-cluster-id is "cluster-a" - When the importer builds the destination payload - Then payload.clusters MUST equal: - | value | - | cluster-a | # IMP-MAP-007 + Scenario: Auto-discover ACS cluster ID from admission-control ConfigMap + Given kubecontext "ctx-a" points to a secured cluster + And ConfigMap "admission-control" in namespace "stackrox" has data key "cluster-id" = "uuid-a" + When the importer resolves the ACS cluster ID for "ctx-a" + Then the resolved ACS cluster ID MUST be "uuid-a" # IMP-MAP-016 + + @mapping @clusters + Scenario: Fallback to OpenShift ClusterVersion for cluster matching + Given kubecontext "ctx-b" points to an OpenShift cluster + And ConfigMap "admission-control" is not readable + And ClusterVersion "version" has spec.clusterID "ocp-uuid-b" + And ACS cluster list contains a cluster with providerMetadata.cluster.id "ocp-uuid-b" and ACS ID "acs-uuid-b" + When the importer resolves the ACS cluster ID for "ctx-b" + Then the resolved ACS cluster ID MUST be "acs-uuid-b" # IMP-MAP-017 + + @mapping @clusters + Scenario: Fallback to helm-effective-cluster-name for cluster matching + Given kubecontext "ctx-c" points to a cluster + And ConfigMap "admission-control" is not readable + And ClusterVersion is not available + And Secret "helm-effective-cluster-name" has data key "cluster-name" = "my-cluster" + And ACS cluster list contains a cluster named "my-cluster" with ACS ID "acs-uuid-c" + When the importer resolves the ACS cluster ID for "ctx-c" + Then the resolved ACS cluster ID MUST be "acs-uuid-c" # IMP-MAP-018 + + @mapping @clusters + Scenario: Manual override via --cluster flag (context=name form) + Given importer flag --cluster is "ctx-a=my-acs-cluster" + And ACS cluster list contains a cluster named "my-acs-cluster" with ACS ID "acs-uuid-a" + When the importer resolves the ACS cluster ID for "ctx-a" + Then the resolved ACS cluster ID MUST be "acs-uuid-a" # IMP-MAP-007 + + @mapping @clusters + Scenario: Single-cluster shorthand via --cluster with name + Given importer flag --cluster is "my-acs-cluster" (no = separator) + And ACS cluster list contains a cluster named "my-acs-cluster" with ACS ID "acs-uuid-a" + When the importer resolves the ACS cluster ID + Then the resolved ACS cluster ID MUST be "acs-uuid-a" # IMP-MAP-022 + + @mapping @clusters + Scenario: Single-cluster shorthand via --cluster with UUID + Given importer flag --cluster is "acs-uuid-a" (no = separator, valid UUID) + When the importer resolves the ACS cluster ID + Then the resolved ACS cluster ID MUST be "acs-uuid-a" (used directly) # IMP-MAP-023 + + @mapping @clusters @multicluster + Scenario: Merge SSBs with same name across clusters + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" + And ctx-a resolves to ACS cluster ID "uuid-a" + And ctx-b resolves to ACS cluster ID "uuid-b" + When the importer merges SSBs across clusters + Then one ACS scan config MUST be created with scanName "cis-weekly" # IMP-MAP-019 + And payload.clusters MUST equal: + | value | + | uuid-a | + | uuid-b | # IMP-MAP-021 + + @mapping @clusters @multicluster @error + Scenario: Error when same-name SSBs have mismatched profiles + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis", "ocp4-moderate"] + When the importer merges SSBs across clusters + Then "cis-weekly" MUST be marked failed # IMP-MAP-020 + And problems list MUST include category "mapping" + And problem description MUST mention profile mismatch across clusters + + @mapping @clusters @multicluster @error + Scenario: Error when same-name SSBs have mismatched schedules + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with schedule "0 2 * * 0" + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with schedule "0 3 * * 1" + When the importer merges SSBs across clusters + Then "cis-weekly" MUST be marked failed # IMP-MAP-020 + And problems list MUST include category "mapping" + And problem description MUST mention schedule mismatch across clusters @validation @mapping Scenario: Missing ScanSetting reference fails only that binding diff --git a/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature index abbec98d476fe..55b2e69f56230 100644 --- a/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature +++ b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature @@ -15,14 +15,31 @@ Feature: Create-only idempotency dry-run behavior and retry policy And action MUST be "create" @idempotency - Scenario: Skip when scanName already exists + Scenario: Skip when scanName already exists (default mode) Given ACS has scan configuration with scanName "cis-weekly" + And --overwrite-existing is false When importer executes in apply mode Then importer MUST NOT send PUT # IMP-IDEM-003 And action MUST be "skip" And reason MUST include "already exists" And problems list MUST include conflict category # IMP-IDEM-002 + @idempotency @overwrite + Scenario: Update when scanName already exists and --overwrite-existing is true + Given ACS has scan configuration with scanName "cis-weekly" and id "existing-id" + And --overwrite-existing is true + When importer executes in apply mode + Then importer MUST send PUT /v2/compliance/scan/configurations/existing-id # IMP-IDEM-008 + And action MUST be "update" + + @idempotency @overwrite + Scenario: Create when scanName does not exist and --overwrite-existing is true + Given ACS has no scan configuration with scanName "new-scan" + And --overwrite-existing is true + When importer executes in apply mode + Then importer MUST send POST /v2/compliance/scan/configurations # IMP-IDEM-009 + And action MUST be "create" + @dryrun Scenario: Dry-run performs no writes Given importer is started with --dry-run diff --git a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md index 4adeaa012d51c..feeb5bd31f49d 100644 --- a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md +++ b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md @@ -6,19 +6,20 @@ This document is the acceptance test contract for real-cluster validation. - `kubectl`, `curl`, `jq` installed. - Logged into target cluster containing Compliance Operator resources. -- ACS endpoint reachable from runner. +- Central endpoint reachable from runner. - Importer binary built locally. Set environment: ```bash -export ACS_ENDPOINT="https://central.stackrox.example.com:443" -export ACS_API_TOKEN="" -export ACS_USERNAME="" -export ACS_PASSWORD="" +export ROX_ENDPOINT="https://central.stackrox.example.com:443" +export ROX_API_TOKEN="" +export ROX_ADMIN_USER="admin" +export ROX_ADMIN_PASSWORD="" export CO_NAMESPACE="openshift-compliance" export IMPORTER_BIN="./bin/co-acs-scan-importer" -export ACS_CLUSTER_ID="" +# For multi-cluster: merge kubeconfigs +export KUBECONFIG="~/.kube/config:~/.kube/config-secured-cluster" ``` ## Acceptance checks @@ -42,15 +43,15 @@ Pass condition: ### A2 - ACS auth preflight -- **IMP-ACC-002**: ACS token and endpoint MUST pass read probe. +- **IMP-ACC-002**: token and endpoint MUST pass read probe. - **IMP-ACC-013**: optional basic-auth mode MUST pass read probe in local/dev environments. Command: ```bash curl -ksS \ - -H "Authorization: Bearer ${ACS_API_TOKEN}" \ - "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . + -H "Authorization: Bearer ${ROX_API_TOKEN}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . ``` Pass condition: @@ -61,22 +62,19 @@ Optional local/dev basic-auth probe: ```bash curl -ksS \ - -u "${ACS_USERNAME}:${ACS_PASSWORD}" \ - "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . + -u "${ROX_ADMIN_USER}:${ROX_ADMIN_PASSWORD}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . ``` ### A3 - Dry-run side-effect safety - **IMP-ACC-003**: dry-run MUST produce no writes. -Command: +Command (auto-discovery mode): ```bash "${IMPORTER_BIN}" \ - --acs-endpoint "${ACS_ENDPOINT}" \ - --acs-token-env ACS_API_TOKEN \ - --co-namespace "${CO_NAMESPACE}" \ - --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --endpoint "${ROX_ENDPOINT}" \ --dry-run \ --report-json "/tmp/co-acs-import-dryrun.json" ``` @@ -88,18 +86,15 @@ Pass conditions: - actions listed as planned only (no applied create/update markers), - `problems[]` is present and contains `description` + `fixHint` for each problematic resource. -### A4 - Apply creates expected configs (create-only) +### A4 - Apply creates expected configs - **IMP-ACC-004**: apply mode MUST create missing target ACS configs. -Command: +Command (auto-discovery mode): ```bash "${IMPORTER_BIN}" \ - --acs-endpoint "${ACS_ENDPOINT}" \ - --acs-token-env ACS_API_TOKEN \ - --co-namespace "${CO_NAMESPACE}" \ - --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --endpoint "${ROX_ENDPOINT}" \ --report-json "/tmp/co-acs-import-apply.json" ``` @@ -107,8 +102,8 @@ Verify: ```bash curl -ksS \ - -H "Authorization: Bearer ${ACS_API_TOKEN}" \ - "${ACS_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=200" | \ + -H "Authorization: Bearer ${ROX_API_TOKEN}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=200" | \ jq '.configurations[] | {id, scanName, profiles: .scanConfig.profiles, description: .scanConfig.description}' ``` @@ -125,10 +120,7 @@ Command: ```bash "${IMPORTER_BIN}" \ - --acs-endpoint "${ACS_ENDPOINT}" \ - --acs-token-env ACS_API_TOKEN \ - --co-namespace "${CO_NAMESPACE}" \ - --acs-cluster-id "${ACS_CLUSTER_ID}" \ + --endpoint "${ROX_ENDPOINT}" \ --report-json "/tmp/co-acs-import-second-run.json" ``` @@ -137,16 +129,35 @@ Pass conditions: - report shows skip actions for already-existing scan names, - no net changes in ACS list output. -### A6 - Existing config behavior (create-only) +### A6 - Existing config behavior -- **IMP-ACC-006**: existing scan names MUST be skipped and recorded in `problems[]`. +- **IMP-ACC-006**: without `--overwrite-existing`, existing scan names MUST be skipped + and recorded in `problems[]`. +- **IMP-ACC-014**: with `--overwrite-existing`, existing scan names MUST be updated via PUT. -Procedure: +Procedure (create-only): 1. Manually modify one imported ACS scan config (name unchanged). -2. Re-run importer. +2. Re-run importer without `--overwrite-existing`. 3. Verify that modified existing config is not updated and is captured as skipped conflict. +Procedure (overwrite): + +1. Re-run importer with `--overwrite-existing`. +2. Verify that the modified config is updated back to the imported state. + +### A8 - Multi-cluster merge + +- **IMP-ACC-015**: when the same SSB name exists on multiple source clusters with matching + profiles and schedule, importer MUST create one scan config targeting all resolved cluster IDs. +- **IMP-ACC-016**: when the same SSB name exists on multiple source clusters with different + profiles or schedule, importer MUST error for that SSB name. + +### A9 - Auto-discovery + +- **IMP-ACC-017**: importer MUST auto-discover the ACS cluster ID from the admission-control + ConfigMap's `cluster-id` key when no `--cluster` override is given. + ### A7 - Failure paths - **IMP-ACC-007**: invalid token MUST fail-fast with exit code `1`. diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md index 6b32f278b34f1..94672cf4f47e9 100644 --- a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -4,11 +4,11 @@ Use this matrix to ensure complete implementation coverage. |Requirement ID|Spec source|Test level|Notes| |---|---|---|---| -|IMP-CLI-001..026|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, token/basic auth modes, create-only report + problems list| -|IMP-MAP-001..015|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule handling, skip+problem behavior| -|IMP-IDEM-001..007|`03-idempotency-dry-run-retries.feature`|Unit + integration|Create-only idempotency and dry-run reporting| +|IMP-CLI-001..027|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, auth modes, multi-cluster, --overwrite-existing| +|IMP-MAP-001..023|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging, --cluster shorthand| +|IMP-IDEM-001..009|`03-idempotency-dry-run-retries.feature`|Unit + integration|Idempotency, overwrite mode (PUT), dry-run reporting| |IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| -|IMP-ACC-001..013|`04-validation-and-acceptance.md`|Acceptance|Real cluster and ACS verification| +|IMP-ACC-001..017|`04-validation-and-acceptance.md`|Acceptance|Real cluster, ACS verification, multi-cluster merge, auto-discovery| ## Coverage rule diff --git a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md index bb058d4e086fe..f92c22aa04407 100644 --- a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md +++ b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md @@ -137,34 +137,155 @@ Centralize error handling/reporting and enforce run outcomes. - "Implement Slice D with tests first for IMP-CLI-017..022 and IMP-ERR-001..004. Ensure problem list and exit code semantics exactly match spec." -## Slice E - End-to-end acceptance and tooling +## Slice E - Multi-cluster support and auto-discovery ### E Goal -Make real-cluster validation repeatable and scriptable. +Support multiple source clusters, auto-discover ACS cluster IDs, merge SSBs across clusters. ### E Requirement IDs -- `IMP-ACC-001..012` +- `IMP-CLI-003` (updated), `IMP-CLI-005` (updated), `IMP-CLI-027` +- `IMP-MAP-016..021` +- `IMP-ACC-015..017` ### E Implementation targets (suggested) +- `scripts/compliance-operator-importer/internal/config/config.go` (new flags) +- `scripts/compliance-operator-importer/internal/discover/discover.go` (new package: ACS cluster ID auto-discovery) +- `scripts/compliance-operator-importer/internal/cofetch/client.go` (multi-context support) +- `scripts/compliance-operator-importer/internal/merge/merge.go` (new package: SSB merging + mismatch detection) +- `scripts/compliance-operator-importer/internal/run/run.go` (orchestrate multi-cluster flow) + +### E Tests to add + +- `internal/discover/discover_test.go` +- `internal/merge/merge_test.go` +- `internal/config/config_test.go` (new flag tests) +- `internal/run/run_test.go` (multi-cluster integration) + +### E Acceptance signal + +- Auto-discovery resolves ACS cluster IDs from admission-control ConfigMap on real clusters. +- SSBs with same name across clusters produce one merged scan config. +- SSBs with same name but different profiles/schedule produce an error. + +### E Agent prompt seed + +- "Implement Slice E: multi-cluster support. Add --kubecontext (repeatable), auto-discover ACS cluster ID via admission-control ConfigMap (fallback: ClusterVersion, helm-effective-cluster-name), merge SSBs by name across clusters, error on profile/schedule mismatch." + +## Slice F - Overwrite-existing mode (PUT support) + +### F Goal + +Allow importer to update existing ACS scan configs instead of skipping them. + +### F Requirement IDs + +- `IMP-CLI-027`, `IMP-IDEM-008..009`, `IMP-ACC-014` + +### F Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/models/models.go` (add UpdateScanConfiguration to ACSClient interface) +- `scripts/compliance-operator-importer/internal/acs/client.go` (implement PUT) +- `scripts/compliance-operator-importer/internal/reconcile/create_only.go` (rename to reconciler.go, add update path) +- `scripts/compliance-operator-importer/internal/config/config.go` (--overwrite-existing flag) + +### F Tests to add + +- `internal/reconcile/reconciler_test.go` (update path tests) +- `internal/acs/client_test.go` (PUT tests) + +### F Acceptance signal + +- With `--overwrite-existing`, existing scan configs are updated via PUT. +- Without the flag, behavior is unchanged (skip + conflict problem). + +### F Agent prompt seed + +- "Implement Slice F: --overwrite-existing flag. Add PUT to ACS client, update reconciler to call PUT when flag is set and scanName exists. Add UpdateScanConfiguration and DeleteScanConfiguration to ACSClient interface." + +## Slice G - End-to-end acceptance and tooling + +### G Goal + +Make real-cluster validation repeatable and scriptable. + +### G Requirement IDs + +- `IMP-ACC-001..017` + +### G Implementation targets (suggested) + - `scripts/compliance-operator-importer/hack/acceptance-run.sh` - `scripts/compliance-operator-importer/hack/check-report.sh` -### E Tests/checks to add +### G Tests/checks to add - lightweight script tests where practical. - documented manual acceptance evidence for cluster runs. -### E Acceptance signal +### G Acceptance signal - all commands/checks in `specs/04-validation-and-acceptance.md` are reproducible. -- include at least one real-cluster proof run against a live ACS endpoint (for example localhost:8443) with artifact output. +- include at least one real-cluster proof run against a live ACS endpoint with artifact output. +- multi-cluster and overwrite scenarios tested against real clusters. -### E Agent prompt seed +### G Agent prompt seed + +- "Implement Slice G automation helpers for IMP-ACC-001..017 and produce run artifacts paths for dry-run/apply/second-run/multi-cluster/overwrite checks." + +## Slice H - UX alignment with roxctl conventions + +### H Goal + +Rename all flags and env vars to match roxctl conventions. Remove unnecessary +indirection flags. Simplify auth inference and endpoint handling. + +### H Requirement IDs + +- `IMP-CLI-001` (updated: `--endpoint` / `ROX_ENDPOINT`, auto-prepend `https://`) +- `IMP-CLI-002` (updated: auto-infer auth from env vars, no `--auth-mode`) +- `IMP-CLI-005` (updated: unified `--cluster` flag accepting UUID, name, or ctx=value) +- `IMP-CLI-013` (updated: bare hostnames get `https://` prepended) +- `IMP-CLI-023` (removed: `--auth-mode`) +- `IMP-CLI-024` (updated: `--username` / `ROX_ADMIN_USER`, default `admin`; password from `ROX_ADMIN_PASSWORD`) +- `IMP-CLI-025` (updated: ambiguous = both token+password set) +- `IMP-CLI-026` (removed: auth mode inferred) +- `IMP-MAP-022..023` (new: `--cluster` single-value shorthand with name or UUID) + +### H Changes summary + +| Old | New | Notes | +|-----|-----|-------| +| `--acs-endpoint` / `ACS_ENDPOINT` | `--endpoint` / `ROX_ENDPOINT` | aligned with roxctl | +| `--acs-auth-mode` | (removed) | auto-inferred | +| `--acs-token-env` | (removed) | always reads `ROX_API_TOKEN` | +| `--acs-password-env` | (removed) | always reads `ROX_ADMIN_PASSWORD` | +| `--acs-username` / `ACS_USERNAME` | `--username` / `ROX_ADMIN_USER` (default `admin`) | aligned with roxctl | +| `--acs-cluster-id` | `--cluster` (UUID, name, or ctx=value) | unified | +| `--source-kubecontext` | (removed) | redundant with `--kubecontext` | + +### H Implementation targets + +- `internal/config/config.go` (flag renames, auth inference, endpoint normalization) +- `internal/models/models.go` (remove AuthMode, TokenEnv, PasswordEnv fields) +- `internal/preflight/preflight.go` (auth inference) +- `internal/acs/client.go` (read from fixed env vars) +- `internal/run/cluster_source.go` (unified `--cluster` parsing) +- `cmd/importer/main.go` + +### H Tests to update + +- `internal/config/config_test.go` +- `internal/config/config_multicluster_test.go` +- `internal/preflight/preflight_test.go` +- `internal/acs/client_test.go` +- `internal/run/run_test.go` + +### H Agent prompt seed -- "Implement Slice E automation helpers for IMP-ACC-001..012 and produce run artifacts paths for dry-run/apply/second-run checks." +- "Implement Slice H: rename all ACS-prefixed flags/env vars to roxctl conventions per the table above. Remove --auth-mode, --token-env, --password-env, --source-kubecontext. Auto-infer auth from env vars. Auto-prepend https:// for bare hostnames. Unify --cluster to accept UUID, name, or ctx=value." ## Cross-slice conventions @@ -172,16 +293,19 @@ Make real-cluster validation repeatable and scriptable. - Keep mapping logic side-effect free where possible. - Wrap external clients (k8s/ACS) behind interfaces for deterministic tests. - Never mutate CO resources. -- Keep create-only invariant explicit (guard rail test that fails on any `PUT` path). +- Guard rail test: without `--overwrite-existing`, no `PUT` is ever sent. - Verify behavior with real-world examples early and often, not only mocked tests. - Capture smoke-test commands and outputs in PR notes for traceability. ## Suggested execution order and ownership -1. Slice A (platform/entrypoint) -2. Slice B (domain mapping) -3. Slice C (ACS reconciliation) -4. Slice D (reporting + run orchestration) -5. Slice E (acceptance automation) +1. Slice A (platform/entrypoint) -- DONE +2. Slice B (domain mapping) -- DONE +3. Slice C (ACS reconciliation) -- DONE +4. Slice D (reporting + run orchestration) -- DONE +5. Slice E (multi-cluster + auto-discovery) +6. Slice F (overwrite-existing / PUT support) +7. Slice G (acceptance automation) +Slices E and F are independent and can be implemented in parallel. One agent per slice is ideal; if sequential, complete one slice fully before next. From 5ddea6c59ca009c424723d455d9f83c03fc74a43 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 00:57:24 +0100 Subject: [PATCH 05/24] feat(co-importer): add spec coverage check script hack/check-spec-coverage.sh extracts all IMP-* requirement IDs from specs/ and verifies each has at least one matching test in *_test.go. Handles range notation (IMP-CLI-001..016) and normalizes underscored Go test names to hyphenated form for matching. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../hack/check-spec-coverage.sh | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100755 scripts/compliance-operator-importer/hack/check-spec-coverage.sh diff --git a/scripts/compliance-operator-importer/hack/check-spec-coverage.sh b/scripts/compliance-operator-importer/hack/check-spec-coverage.sh new file mode 100755 index 0000000000000..5b2aaf90ad323 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/check-spec-coverage.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# Checks that every IMP-* requirement ID defined in specs/ appears in at least +# one *_test.go file. Reports gaps and exits non-zero when any are found. +# +# USAGE: hack/check-spec-coverage.sh +# Run from the compliance-operator-importer directory. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +SPECS_DIR="$ROOT/specs" +SRC_DIR="$ROOT/internal" + +# Extract unique IMP-*-NNN IDs from spec files (markdown + feature). +# Handles both "IMP-FOO-001" and range notation "IMP-FOO-001..005". +extract_spec_ids() { + local ids=() + + # Direct IDs: IMP-XXX-NNN + while IFS= read -r id; do + ids+=("$id") + done < <(grep -ohrE 'IMP-[A-Z]+-[0-9]+' "$SPECS_DIR" | sort -u) + + # Range IDs: IMP-XXX-NNN..MMM → expand to individual IDs + while IFS= read -r range_match; do + local prefix num_start num_end + prefix=$(echo "$range_match" | grep -oE 'IMP-[A-Z]+-') + num_start=$(echo "$range_match" | grep -oE '[0-9]+' | head -1) + num_end=$(echo "$range_match" | grep -oE '[0-9]+' | tail -1) + # Strip leading zeros for arithmetic + local start=$((10#$num_start)) + local end=$((10#$num_end)) + local width=${#num_start} + for ((i = start; i <= end; i++)); do + ids+=("$(printf "%s%0${width}d" "$prefix" "$i")") + done + done < <(grep -ohrE 'IMP-[A-Z]+-[0-9]+\.\.[0-9]+' "$SPECS_DIR" | sort -u) + + # Deduplicate and sort + printf '%s\n' "${ids[@]}" | sort -u +} + +# Extract IDs referenced in test files. +# Matches both IMP-CLI-001 (in comments) and IMP_CLI_001 (in Go identifiers). +extract_test_ids() { + grep -ohrE 'IMP[-_][A-Z]+[-_][0-9]+' "$SRC_DIR" --include='*_test.go' \ + | sed 's/_/-/g' \ + | sort -u +} + +# IDs explicitly marked as "(removed)" in specs — no test needed. +extract_removed_ids() { + grep -E '\(removed' "$SPECS_DIR"/*.md "$SPECS_DIR"/*.feature 2>/dev/null \ + | grep -oE 'IMP-[A-Z]+-[0-9]+' \ + | sort -u +} + +spec_ids=$(extract_spec_ids) +test_ids=$(extract_test_ids) +removed_ids=$(extract_removed_ids) + +# IMP-ACC-* are acceptance test IDs (real-cluster tests, not unit tests). +# They are tracked separately and excluded from the gap report. +missing=() +covered=0 +skipped=0 +total=0 + +while IFS= read -r id; do + total=$((total + 1)) + + # Skip acceptance test IDs (IMP-ACC-*) + if [[ "$id" == IMP-ACC-* ]]; then + skipped=$((skipped + 1)) + continue + fi + + # Skip removed IDs + if echo "$removed_ids" | grep -qxF "$id"; then + skipped=$((skipped + 1)) + continue + fi + + if echo "$test_ids" | grep -qxF "$id"; then + covered=$((covered + 1)) + else + missing+=("$id") + fi +done <<< "$spec_ids" + +echo "Spec coverage report" +echo "====================" +echo "Total IDs in specs: $total" +echo "Covered by tests: $covered" +echo "Skipped (ACC/removed): $skipped" +echo "Missing test coverage: ${#missing[@]}" +echo "" + +if [[ ${#missing[@]} -gt 0 ]]; then + echo "GAPS (IDs with no *_test.go reference):" + for id in "${missing[@]}"; do + # Show which spec file defines this ID. + file=$(grep -rlE "\b${id}\b" "$SPECS_DIR" | head -1 | xargs basename 2>/dev/null || echo "?") + echo " $id ($file)" + done + echo "" + echo "FAIL: ${#missing[@]} requirement(s) lack test coverage." + exit 1 +else + echo "OK: all testable requirements are covered." + exit 0 +fi From 63d9eabc75d4c0b04c47dab0316cc87e5315ec29 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 00:58:18 +0100 Subject: [PATCH 06/24] feat(co-importer): add e2e test framework for real-cluster testing Add e2e/e2e_test.go (build tag: e2e) and hack/run-e2e.sh wrapper. Tests run against a real ACS + Compliance Operator cluster using ROX_ENDPOINT and ROX_API_TOKEN/ROX_ADMIN_PASSWORD env vars. Covers IMP-ACC-001 (CO resources listable), ACC-002 (auth preflight), ACC-003 (dry-run no writes), ACC-004 (apply creates configs), ACC-005 (idempotent second run), ACC-007 (invalid token fails fast), ACC-012 (problems have fix hints), ACC-014 (overwrite updates), ACC-017 (auto-discover cluster ID). Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../compliance-operator-importer/e2e/doc.go | 3 + .../e2e/e2e_test.go | 611 ++++++++++++++++++ .../hack/run-e2e.sh | 51 ++ 3 files changed, 665 insertions(+) create mode 100644 scripts/compliance-operator-importer/e2e/doc.go create mode 100644 scripts/compliance-operator-importer/e2e/e2e_test.go create mode 100755 scripts/compliance-operator-importer/hack/run-e2e.sh diff --git a/scripts/compliance-operator-importer/e2e/doc.go b/scripts/compliance-operator-importer/e2e/doc.go new file mode 100644 index 0000000000000..0680e78e92b3c --- /dev/null +++ b/scripts/compliance-operator-importer/e2e/doc.go @@ -0,0 +1,3 @@ +// Package e2e contains end-to-end acceptance tests that run against a real +// ACS + Compliance Operator cluster. Tests require the "e2e" build tag. +package e2e diff --git a/scripts/compliance-operator-importer/e2e/e2e_test.go b/scripts/compliance-operator-importer/e2e/e2e_test.go new file mode 100644 index 0000000000000..5ac5dd5aad081 --- /dev/null +++ b/scripts/compliance-operator-importer/e2e/e2e_test.go @@ -0,0 +1,611 @@ +//go:build e2e + +// Package e2e runs acceptance tests against a real ACS + Compliance Operator +// cluster. These tests exercise the importer binary end-to-end. +// +// Required environment: +// +// ROX_ENDPOINT ACS Central URL (bare hostname OK, https:// prepended) +// ROX_ADMIN_PASSWORD Basic auth password (or ROX_API_TOKEN for token auth) +// +// Optional: +// +// CO_NAMESPACE CO namespace (default: openshift-compliance) +// E2E_KEEP_CONFIGS Set to "1" to skip cleanup of created scan configs +// +// Run: +// +// go test -tags e2e -v -count=1 ./e2e/ +// # or via the convenience wrapper: +// hack/run-e2e.sh +package e2e + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" +) + +// --------------------------------------------------------------------------- +// Global state set in TestMain +// --------------------------------------------------------------------------- + +var ( + importerBin string // path to compiled binary + endpoint string // ACS Central URL (with https://) + coNamespace string +) + +func TestMain(m *testing.M) { + endpoint = os.Getenv("ROX_ENDPOINT") + if endpoint == "" { + fmt.Fprintln(os.Stderr, "SKIP: ROX_ENDPOINT not set") + os.Exit(0) + } + if !strings.HasPrefix(endpoint, "https://") { + endpoint = "https://" + endpoint + } + + hasToken := os.Getenv("ROX_API_TOKEN") != "" + hasPassword := os.Getenv("ROX_ADMIN_PASSWORD") != "" + if !hasToken && !hasPassword { + fmt.Fprintln(os.Stderr, "SKIP: neither ROX_API_TOKEN nor ROX_ADMIN_PASSWORD set") + os.Exit(0) + } + + coNamespace = os.Getenv("CO_NAMESPACE") + if coNamespace == "" { + coNamespace = "openshift-compliance" + } + + // Build the importer binary. + tmpDir, err := os.MkdirTemp("", "co-importer-e2e-*") + if err != nil { + fmt.Fprintf(os.Stderr, "FAIL: mktemp: %v\n", err) + os.Exit(1) + } + importerBin = filepath.Join(tmpDir, "co-acs-scan-importer") + + cmd := exec.Command("go", "build", "-o", importerBin, "./cmd/importer/") + cmd.Dir = filepath.Join(mustGetwd(), "..") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "FAIL: build importer: %v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "Built importer: %s\n", importerBin) + + code := m.Run() + + os.RemoveAll(tmpDir) + os.Exit(code) +} + +func mustGetwd() string { + d, err := os.Getwd() + if err != nil { + panic(err) + } + return d +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +// importerResult captures a single importer invocation. +type importerResult struct { + exitCode int + stdout string + stderr string + report *report // nil if no --report-json +} + +// report mirrors the JSON report structure (subset). +type report struct { + Meta struct { + DryRun bool `json:"dryRun"` + NamespaceScope string `json:"namespaceScope"` + Mode string `json:"mode"` + } `json:"meta"` + Counts struct { + Discovered int `json:"discovered"` + Create int `json:"create"` + Update int `json:"update"` + Skip int `json:"skip"` + Failed int `json:"failed"` + } `json:"counts"` + Items []reportItem `json:"items"` + Problems []problem `json:"problems"` +} + +type reportItem struct { + Source struct { + Namespace string `json:"namespace"` + BindingName string `json:"bindingName"` + ScanSettingName string `json:"scanSettingName"` + } `json:"source"` + Action string `json:"action"` + Reason string `json:"reason"` + Attempts int `json:"attempts"` + ACSScanConfigID string `json:"acsScanConfigId"` + Error string `json:"error"` +} + +type problem struct { + Severity string `json:"severity"` + Category string `json:"category"` + ResourceRef string `json:"resourceRef"` + Description string `json:"description"` + FixHint string `json:"fixHint"` + Skipped bool `json:"skipped"` +} + +// runImporter executes the importer binary with the given extra args. +// It always passes --endpoint, --insecure-skip-verify, and --co-namespace. +// If reportJSON is true, a temp file is used and the report is parsed. +func runImporter(t *testing.T, reportJSON bool, extraArgs ...string) importerResult { + t.Helper() + + args := []string{ + "--endpoint", endpoint, + "--insecure-skip-verify", + "--co-namespace", coNamespace, + } + args = append(args, extraArgs...) + + var reportPath string + if reportJSON { + f, err := os.CreateTemp("", "e2e-report-*.json") + if err != nil { + t.Fatalf("create temp report file: %v", err) + } + f.Close() + reportPath = f.Name() + t.Cleanup(func() { os.Remove(reportPath) }) + args = append(args, "--report-json", reportPath) + } + + cmd := exec.CommandContext(context.Background(), importerBin, args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + t.Fatalf("exec importer: %v", err) + } + } + + result := importerResult{ + exitCode: exitCode, + stdout: stdout.String(), + stderr: stderr.String(), + } + + if reportJSON && reportPath != "" { + data, err := os.ReadFile(reportPath) + if err == nil && len(data) > 0 { + var r report + if err := json.Unmarshal(data, &r); err != nil { + t.Logf("WARNING: report JSON parse error: %v", err) + } else { + result.report = &r + } + } + } + + return result +} + +// acsConfigSummary is a scan config from the ACS list API. +type acsConfigSummary struct { + ID string `json:"id"` + ScanName string `json:"scanName"` +} + +// acsListConfigs returns all scan configurations from ACS. +func acsListConfigs(t *testing.T) []acsConfigSummary { + t.Helper() + body := acsGet(t, "/v2/compliance/scan/configurations?pagination.limit=1000") + + var resp struct { + Configurations []acsConfigSummary `json:"configurations"` + } + if err := json.Unmarshal(body, &resp); err != nil { + t.Fatalf("parse ACS list response: %v", err) + } + return resp.Configurations +} + +// acsDeleteConfig deletes a scan config by ID. +func acsDeleteConfig(t *testing.T, id string) { + t.Helper() + req := acsRequest(t, http.MethodDelete, "/v2/compliance/scan/configurations/"+id, nil) + resp, err := acsHTTPClient().Do(req) + if err != nil { + t.Logf("WARNING: delete scan config %s: %v", id, err) + return + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + t.Logf("WARNING: delete scan config %s: HTTP %d", id, resp.StatusCode) + } +} + +// acsGet does a GET request to ACS and returns the body. +func acsGet(t *testing.T, path string) []byte { + t.Helper() + req := acsRequest(t, http.MethodGet, path, nil) + resp, err := acsHTTPClient().Do(req) + if err != nil { + t.Fatalf("ACS GET %s: %v", path, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("ACS GET %s: HTTP %d", path, resp.StatusCode) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("ACS GET %s: read body: %v", path, err) + } + return body +} + +func acsRequest(t *testing.T, method, path string, body io.Reader) *http.Request { + t.Helper() + url := endpoint + path + req, err := http.NewRequest(method, url, body) + if err != nil { + t.Fatalf("build ACS request: %v", err) + } + req.Header.Set("Accept", "application/json") + + if token := os.Getenv("ROX_API_TOKEN"); token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } else { + user := os.Getenv("ROX_ADMIN_USER") + if user == "" { + user = "admin" + } + req.SetBasicAuth(user, os.Getenv("ROX_ADMIN_PASSWORD")) + } + return req +} + +func acsHTTPClient() *http.Client { + return &http.Client{ + Timeout: 30 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // e2e test + }, + } +} + +// configIDsByPrefix returns the IDs of scan configs whose name starts with prefix. +func configIDsByPrefix(t *testing.T, prefix string) []string { + t.Helper() + var ids []string + for _, c := range acsListConfigs(t) { + if strings.HasPrefix(c.ScanName, prefix) { + ids = append(ids, c.ID) + } + } + return ids +} + +// cleanupConfigsByPrefix deletes all scan configs matching prefix, unless +// E2E_KEEP_CONFIGS is set. +func cleanupConfigsByPrefix(t *testing.T, prefix string) { + if os.Getenv("E2E_KEEP_CONFIGS") == "1" { + t.Logf("E2E_KEEP_CONFIGS=1, skipping cleanup for prefix %q", prefix) + return + } + for _, id := range configIDsByPrefix(t, prefix) { + acsDeleteConfig(t, id) + t.Logf("cleaned up scan config %s", id) + } +} + +// scanConfigExists returns true if a scan config with the given name exists. +func scanConfigExists(t *testing.T, name string) bool { + t.Helper() + for _, c := range acsListConfigs(t) { + if c.ScanName == name { + return true + } + } + return false +} + +// countSSBs returns the number of ScanSettingBindings in CO_NAMESPACE. +func countSSBs(t *testing.T) int { + t.Helper() + cmd := exec.Command("kubectl", "get", "scansettingbindings.compliance.openshift.io", + "-n", coNamespace, "-o", "json") + out, err := cmd.Output() + if err != nil { + t.Fatalf("kubectl list SSBs: %v", err) + } + var list struct { + Items []json.RawMessage `json:"items"` + } + if err := json.Unmarshal(out, &list); err != nil { + t.Fatalf("parse SSB list: %v", err) + } + return len(list.Items) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +// TestIMP_ACC_001_COResourcesListable verifies that CO resources can be listed +// from the target cluster. +func TestIMP_ACC_001_COResourcesListable(t *testing.T) { + for _, resource := range []string{ + "scansettingbindings.compliance.openshift.io", + "scansettings.compliance.openshift.io", + "profiles.compliance.openshift.io", + } { + t.Run(resource, func(t *testing.T) { + cmd := exec.Command("kubectl", "get", resource, "-n", coNamespace) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("kubectl get %s failed: %v\n%s", resource, err, out) + } + }) + } +} + +// TestIMP_ACC_002_AuthPreflight verifies that the importer can authenticate +// with ACS (both preflight probe and actual listing work). +func TestIMP_ACC_002_AuthPreflight(t *testing.T) { + // Just verify the ACS API is reachable with current creds. + body := acsGet(t, "/v2/compliance/scan/configurations?pagination.limit=1") + if len(body) == 0 { + t.Fatal("empty response from ACS preflight probe") + } +} + +// TestIMP_ACC_003_DryRunNoWrites verifies that dry-run produces no changes. +func TestIMP_ACC_003_DryRunNoWrites(t *testing.T) { + // Snapshot existing configs. + before := acsListConfigs(t) + + result := runImporter(t, true, "--dry-run") + + if result.exitCode != 0 && result.exitCode != 2 { + t.Fatalf("IMP-ACC-003: expected exit code 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } + + if result.report == nil { + t.Fatal("IMP-ACC-003: expected report JSON to be written") + } + if !result.report.Meta.DryRun { + t.Error("IMP-ACC-003: report meta.dryRun should be true") + } + + // Verify no new configs were created. + after := acsListConfigs(t) + if len(after) != len(before) { + t.Errorf("IMP-ACC-003: config count changed from %d to %d during dry-run", + len(before), len(after)) + } +} + +// TestIMP_ACC_004_ApplyCreatesConfigs verifies that apply mode creates +// ACS scan configs for discovered SSBs. +func TestIMP_ACC_004_ApplyCreatesConfigs(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings found in namespace " + coNamespace) + } + + result := runImporter(t, true) + + if result.exitCode != 0 && result.exitCode != 2 { + t.Fatalf("IMP-ACC-004: expected exit code 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } + + if result.report == nil { + t.Fatal("IMP-ACC-004: expected report") + } + + t.Logf("Discovered: %d, Created: %d, Skipped: %d, Failed: %d", + result.report.Counts.Discovered, + result.report.Counts.Create, + result.report.Counts.Skip, + result.report.Counts.Failed, + ) + + if result.report.Counts.Discovered == 0 { + t.Error("IMP-ACC-004: expected at least 1 discovered binding") + } + + // Verify created configs exist in ACS. + for _, item := range result.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + t.Logf("Created: %s (id=%s)", item.Source.BindingName, item.ACSScanConfigID) + } + } + + // Cleanup: delete configs we created. + t.Cleanup(func() { + for _, item := range result.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + acsDeleteConfig(t, item.ACSScanConfigID) + } + } + }) +} + +// TestIMP_ACC_005_IdempotentSecondRun verifies that a second run with the same +// inputs produces only skip actions (no new creates). +func TestIMP_ACC_005_IdempotentSecondRun(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings") + } + + // First run: create. + r1 := runImporter(t, true) + if r1.exitCode != 0 && r1.exitCode != 2 { + t.Fatalf("first run exit code %d", r1.exitCode) + } + + var createdIDs []string + if r1.report != nil { + for _, item := range r1.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + createdIDs = append(createdIDs, item.ACSScanConfigID) + } + } + } + + t.Cleanup(func() { + for _, id := range createdIDs { + acsDeleteConfig(t, id) + } + }) + + // Second run: should be all skips. + r2 := runImporter(t, true) + if r2.exitCode != 0 && r2.exitCode != 2 { + t.Fatalf("IMP-ACC-005: second run exit code %d", r2.exitCode) + } + + if r2.report != nil && r2.report.Counts.Create > 0 { + t.Errorf("IMP-ACC-005: second run created %d configs (expected 0)", r2.report.Counts.Create) + } +} + +// TestIMP_ACC_007_InvalidTokenFailsFast verifies that an invalid token +// produces exit code 1 (fatal). +func TestIMP_ACC_007_InvalidTokenFailsFast(t *testing.T) { + // Override auth with a bad token. + cmd := exec.Command(importerBin, + "--endpoint", endpoint, + "--insecure-skip-verify", + "--co-namespace", coNamespace, + ) + cmd.Env = append(os.Environ(), + "ROX_API_TOKEN=definitely-not-a-valid-token", + "ROX_ADMIN_PASSWORD=", // clear password to avoid ambiguous auth + ) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } + + if exitCode != 1 { + t.Errorf("IMP-ACC-007: expected exit code 1 for invalid token, got %d\nstdout: %s\nstderr: %s", + exitCode, stdout.String(), stderr.String()) + } +} + +// TestIMP_ACC_012_ProblemsHaveFixHints verifies that all problems in the +// report include description and fixHint fields. +func TestIMP_ACC_012_ProblemsHaveFixHints(t *testing.T) { + result := runImporter(t, true, "--dry-run") + + if result.report == nil { + t.Skip("no report generated") + } + + for i, p := range result.report.Problems { + if p.Description == "" { + t.Errorf("IMP-ACC-012: problem[%d] has empty description", i) + } + if p.FixHint == "" { + t.Errorf("IMP-ACC-012: problem[%d] has empty fixHint (description: %s)", i, p.Description) + } + } +} + +// TestIMP_ACC_017_AutoDiscoverClusterID verifies that the importer can +// auto-discover the ACS cluster ID without --cluster. +func TestIMP_ACC_017_AutoDiscoverClusterID(t *testing.T) { + result := runImporter(t, true, "--dry-run") + + if result.exitCode == 1 { + // Check if it's an auto-discovery failure. + combined := result.stdout + result.stderr + if strings.Contains(combined, "discover cluster ID") { + t.Fatalf("IMP-ACC-017: auto-discovery failed:\n%s", combined) + } + } + + // If exit 0 or 2, auto-discovery succeeded (it's used implicitly when + // no --cluster is given). + if result.exitCode != 0 && result.exitCode != 2 { + t.Errorf("IMP-ACC-017: expected exit 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } +} + +// TestIMP_ACC_014_OverwriteExistingUpdates verifies that --overwrite-existing +// updates existing scan configs instead of skipping them. +func TestIMP_ACC_014_OverwriteExistingUpdates(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings") + } + + // First run: create. + r1 := runImporter(t, true) + if r1.report == nil || r1.report.Counts.Create == 0 { + // Nothing was created (maybe everything already exists). Create-then-overwrite + // test only makes sense when we create something. + t.Skip("no new configs created in first run") + } + + var createdIDs []string + for _, item := range r1.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + createdIDs = append(createdIDs, item.ACSScanConfigID) + } + } + t.Cleanup(func() { + for _, id := range createdIDs { + acsDeleteConfig(t, id) + } + }) + + // Second run with --overwrite-existing: should update, not skip. + r2 := runImporter(t, true, "--overwrite-existing") + if r2.exitCode != 0 && r2.exitCode != 2 { + t.Fatalf("overwrite run exit code %d", r2.exitCode) + } + + if r2.report == nil { + t.Fatal("IMP-ACC-014: expected report from overwrite run") + } + + if r2.report.Counts.Update == 0 && r2.report.Counts.Skip > 0 { + t.Error("IMP-ACC-014: expected updates with --overwrite-existing, got only skips") + } + t.Logf("Overwrite run: updated=%d, created=%d, skipped=%d", + r2.report.Counts.Update, r2.report.Counts.Create, r2.report.Counts.Skip) +} diff --git a/scripts/compliance-operator-importer/hack/run-e2e.sh b/scripts/compliance-operator-importer/hack/run-e2e.sh new file mode 100755 index 0000000000000..3b84a596e4b40 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/run-e2e.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Run e2e acceptance tests against a real ACS + Compliance Operator cluster. +# +# USAGE: +# hack/run-e2e.sh # run all e2e tests +# hack/run-e2e.sh -run TestIMP_ACC_003 # run a specific test +# +# Required environment: +# ROX_ENDPOINT ACS Central URL (bare hostname OK) +# ROX_ADMIN_PASSWORD Basic auth password (or ROX_API_TOKEN for token auth) +# +# Optional: +# CO_NAMESPACE CO namespace (default: openshift-compliance) +# E2E_KEEP_CONFIGS Set to "1" to skip cleanup of created scan configs +# +# The script builds the importer, then runs `go test -tags e2e` against e2e/. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$ROOT" + +# Validate prerequisites. +if [[ -z "${ROX_ENDPOINT:-}" ]]; then + echo "ERROR: ROX_ENDPOINT not set" >&2 + echo " export ROX_ENDPOINT=central-stackrox.apps.mycluster.example.com" >&2 + exit 1 +fi + +if [[ -z "${ROX_API_TOKEN:-}" ]] && [[ -z "${ROX_ADMIN_PASSWORD:-}" ]]; then + echo "ERROR: neither ROX_API_TOKEN nor ROX_ADMIN_PASSWORD is set" >&2 + echo " export ROX_API_TOKEN= # for token auth" >&2 + echo " export ROX_ADMIN_PASSWORD= # for basic auth" >&2 + exit 1 +fi + +command -v kubectl >/dev/null 2>&1 || { + echo "ERROR: kubectl not found in PATH" >&2 + exit 1 +} + +echo "=== E2E Test Configuration ===" +echo " ROX_ENDPOINT: ${ROX_ENDPOINT}" +echo " Auth mode: $(if [[ -n "${ROX_API_TOKEN:-}" ]]; then echo "token"; else echo "basic"; fi)" +echo " CO_NAMESPACE: ${CO_NAMESPACE:-openshift-compliance}" +echo " E2E_KEEP_CONFIGS: ${E2E_KEEP_CONFIGS:-0}" +echo "" + +# Run tests. Pass through any extra args (e.g. -run, -v). +exec go test -tags e2e -v -count=1 -timeout 5m ./e2e/ "$@" From 34dae17aea55f3c9db7c94453ca821f0ea5784d9 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 01:24:22 +0100 Subject: [PATCH 07/24] feat(co-importer): simplify cluster access to kubectl-native model (Slice I) Drop --kubeconfig, --kubecontext, and --cluster flags. The importer now uses all contexts from the merged kubeconfig by default, with --context (repeatable) as an opt-in filter. ACS cluster ID is always auto-discovered via admission-control ConfigMap, OpenShift ClusterVersion, or Helm secret. Specs cleaned: removed tombstone entries for deleted requirement IDs, removed Old/New change tables, removed all temporal language referencing past behavior that was never released. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../cmd/importer/main.go | 75 ++------ .../internal/cofetch/client.go | 26 --- .../internal/config/config.go | 133 +++---------- .../config/config_multicluster_test.go | 149 +++----------- .../internal/config/config_test.go | 179 ----------------- .../internal/discover/discover.go | 150 +++++++++++++++ .../internal/discover/discover_test.go | 169 ++++++++++++++++ .../internal/models/models.go | 33 ++-- .../internal/run/cluster_source.go | 181 +++++------------- .../specs/01-cli-and-config-contract.md | 38 +--- .../specs/02-co-to-acs-mapping.feature | 20 -- .../specs/04-validation-and-acceptance.md | 2 +- .../specs/05-traceability-matrix.md | 2 +- .../specs/06-implementation-backlog.md | 87 ++++----- 14 files changed, 499 insertions(+), 745 deletions(-) create mode 100644 scripts/compliance-operator-importer/internal/discover/discover.go create mode 100644 scripts/compliance-operator-importer/internal/discover/discover_test.go diff --git a/scripts/compliance-operator-importer/cmd/importer/main.go b/scripts/compliance-operator-importer/cmd/importer/main.go index 84078bb2920e9..9b59d0c6ba9a3 100644 --- a/scripts/compliance-operator-importer/cmd/importer/main.go +++ b/scripts/compliance-operator-importer/cmd/importer/main.go @@ -1,6 +1,6 @@ // Binary co-acs-scan-importer reads Compliance Operator ScanSettingBinding -// resources from one or more Kubernetes clusters and creates equivalent ACS -// compliance scan configurations through the ACS v2 API. +// resources from Kubernetes clusters and creates equivalent ACS compliance +// scan configurations through the ACS v2 API. // // Run with --help for full usage information and examples. package main @@ -11,7 +11,6 @@ import ( "os" "github.com/stackrox/co-acs-importer/internal/acs" - "github.com/stackrox/co-acs-importer/internal/cofetch" "github.com/stackrox/co-acs-importer/internal/config" "github.com/stackrox/co-acs-importer/internal/preflight" "github.com/stackrox/co-acs-importer/internal/run" @@ -49,64 +48,24 @@ func mainWithCode() int { return run.ExitFatalError } - // Resolve --cluster name lookup if needed (IMP-MAP-022). - if cfg.ClusterNameLookup != "" { - s.Stagef("Resolve", "looking up cluster %q in ACS", cfg.ClusterNameLookup) - clusters, err := acsClient.ListClusters(ctx) - if err != nil { - s.Failf("failed to list ACS clusters: %v", err) - return run.ExitFatalError - } - var found bool - for _, c := range clusters { - if c.Name == cfg.ClusterNameLookup { - cfg.ACSClusterID = c.ID - found = true - break - } - } - if !found { - s.Failf("cluster %q not found in ACS", cfg.ClusterNameLookup) - return run.ExitFatalError - } - s.OKf("resolved %q → %s", cfg.ClusterNameLookup, cfg.ACSClusterID) + // Build cluster sources from kubeconfig contexts. + if len(cfg.Contexts) > 0 { + s.Stagef("Discovery", "resolving %d specified contexts", len(cfg.Contexts)) + } else { + s.Stage("Discovery", "resolving all kubeconfig contexts") } - - // Multi-cluster mode or single-cluster with auto-discovery both use - // BuildClusterSources to resolve cluster IDs and create CO clients. - isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 - - if isMultiClusterMode || cfg.AutoDiscoverClusterID { - if isMultiClusterMode { - s.Stagef("Discovery", "resolving %d cluster sources", len(cfg.Kubeconfigs)+len(cfg.Kubecontexts)) - } else { - s.Stage("Discovery", "auto-discovering ACS cluster ID from current context") - } - sources, err := run.BuildClusterSources(ctx, cfg, acsClient) - if err != nil { - s.Failf("%v", err) - return run.ExitFatalError - } - for _, src := range sources { - s.OKf("%s → %s", src.Label, src.ACSClusterID) - } - - if isMultiClusterMode { - return run.NewRunner(cfg, acsClient, nil).RunMultiCluster(ctx, sources) - } - // Single-cluster with auto-discovered ID: use the resolved source. - cfg.ACSClusterID = sources[0].ACSClusterID - return run.NewRunner(cfg, acsClient, sources[0].COClient).Run(ctx) - } - - // Single-cluster mode with explicit --cluster UUID. - s.Stagef("Setup", "using cluster %s", cfg.ACSClusterID) - coClient, err := cofetch.NewClient(cfg) + sources, err := run.BuildClusterSources(ctx, cfg, acsClient) if err != nil { - s.Failf("failed to create CO client: %v", err) + s.Failf("%v", err) return run.ExitFatalError } - s.OK("CO client ready") + for _, src := range sources { + s.OKf("%s → %s", src.Label, src.ACSClusterID) + } - return run.NewRunner(cfg, acsClient, coClient).Run(ctx) + if len(sources) == 1 { + cfg.ACSClusterID = sources[0].ACSClusterID + return run.NewRunner(cfg, acsClient, sources[0].COClient).Run(ctx) + } + return run.NewRunner(cfg, acsClient, nil).RunMultiCluster(ctx, sources) } diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go index f92279b09bee8..f3e3d96f07337 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/client.go +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -60,32 +60,6 @@ func NewClient(cfg *models.Config) (COClient, error) { }, nil } -// NewClientForKubeconfig creates a COClient from a specific kubeconfig file. -func NewClientForKubeconfig(kubeconfigPath string, namespace string, allNamespaces bool) (COClient, error) { - loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath} - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) - - restConfig, err := kubeConfig.ClientConfig() - if err != nil { - return nil, fmt.Errorf("build kubeconfig from %q: %w", kubeconfigPath, err) - } - - dynClient, err := dynamic.NewForConfig(restConfig) - if err != nil { - return nil, fmt.Errorf("create dynamic client from %q: %w", kubeconfigPath, err) - } - - ns := namespace - if allNamespaces { - ns = "" - } - - return &k8sClient{ - dynamic: dynClient, - namespace: ns, - }, nil -} - // NewClientForContext creates a COClient for a specific context in the active kubeconfig. func NewClientForContext(contextName string, namespace string, allNamespaces bool) (COClient, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() diff --git a/scripts/compliance-operator-importer/internal/config/config.go b/scripts/compliance-operator-importer/internal/config/config.go index 8921f76df34ae..f37a265c24e75 100644 --- a/scripts/compliance-operator-importer/internal/config/config.go +++ b/scripts/compliance-operator-importer/internal/config/config.go @@ -7,7 +7,6 @@ import ( "flag" "fmt" "os" - "regexp" "strings" "time" @@ -18,9 +17,6 @@ import ( // Callers should treat this as a successful exit (code 0). var ErrHelpRequested = errors.New("help requested") -// uuidPattern matches a standard UUID (8-4-4-4-12 hex). -var uuidPattern = regexp.MustCompile(`^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$`) - const ( defaultTimeout = 30 * time.Second defaultMaxRetries = 5 @@ -92,23 +88,12 @@ func ParseAndValidate(args []string) (*models.Config, error) { insecureSkipVerify := fs.Bool("insecure-skip-verify", false, "Skip TLS certificate verification. Not recommended for production.") - // --- Multi-cluster mode --- - var kubeconfigs []string - var kubecontexts []string - var clusterValues []string - fs.Var(&repeatableStringFlag{values: &kubeconfigs}, "kubeconfig", - "Path to a kubeconfig file (repeatable). Each file represents one source cluster.\n"+ - "The current context in each file is used. Mutually exclusive with --kubecontext.") - fs.Var(&repeatableStringFlag{values: &kubecontexts}, "kubecontext", - "Kubernetes context name (repeatable). Use \"all\" to iterate every context.\n"+ - "Operates on the active kubeconfig (set via KUBECONFIG env var or ~/.kube/config).\n"+ - "Mutually exclusive with --kubeconfig.") - fs.Var(&repeatableStringFlag{values: &clusterValues}, "cluster", - "ACS cluster identification (repeatable). Accepts three forms:\n"+ - " UUID: used directly as the ACS cluster ID (single-cluster).\n"+ - " name: resolved via GET /v1/clusters (single-cluster).\n"+ - " ctx=name-or-uuid: maps a kubeconfig context to an ACS cluster (multi-cluster).\n"+ - "Omit to auto-discover the ACS cluster ID.") + // --- Context filter --- + var contexts []string + fs.Var(&repeatableStringFlag{values: &contexts}, "context", + "Kubernetes context name to process (repeatable).\n"+ + "By default all contexts from the merged kubeconfig are used.\n"+ + "Use --context to limit processing to specific contexts.") if err := fs.Parse(args); err != nil { if errors.Is(err, flag.ErrHelp) { @@ -138,13 +123,7 @@ func ParseAndValidate(args []string) (*models.Config, error) { CACertFile: *caCertFile, InsecureSkipVerify: *insecureSkipVerify, OverwriteExisting: *overwriteExisting, - Kubeconfigs: kubeconfigs, - Kubecontexts: kubecontexts, - } - - // Classify --cluster values into overrides vs single-cluster shorthand. - if err := classifyClusterValues(clusterValues, cfg); err != nil { - return nil, err + Contexts: contexts, } // IMP-CLI-002: auto-infer auth mode from env vars. @@ -158,40 +137,6 @@ func ParseAndValidate(args []string) (*models.Config, error) { return cfg, nil } -// classifyClusterValues processes --cluster flag values: -// - ctx=value → ClusterOverrides (for multi-cluster mode) -// - UUID → ACSClusterID (single-cluster shorthand) -// - name → ClusterNameLookup (single-cluster shorthand, resolved at runtime) -func classifyClusterValues(values []string, cfg *models.Config) error { - var overrides []string - var shorthands []string - - for _, v := range values { - if strings.Contains(v, "=") { - overrides = append(overrides, v) - } else { - shorthands = append(shorthands, v) - } - } - - if len(shorthands) > 1 { - return fmt.Errorf("at most one --cluster shorthand (UUID or name) allowed, got %d: %v", len(shorthands), shorthands) - } - - cfg.ClusterOverrides = overrides - - if len(shorthands) == 1 { - v := shorthands[0] - if uuidPattern.MatchString(v) { - cfg.ACSClusterID = v - } else { - cfg.ClusterNameLookup = v - } - } - - return nil -} - // inferAuthMode sets cfg.AuthMode based on which env vars are present (IMP-CLI-002). // - ROX_API_TOKEN set → token mode // - ROX_ADMIN_PASSWORD set → basic mode @@ -259,16 +204,6 @@ func validate(cfg *models.Config) error { cfg.CONamespace = "" // --co-all-namespaces overrides any namespace setting } - if len(cfg.Kubeconfigs) > 0 && len(cfg.Kubecontexts) > 0 { - return errors.New("--kubeconfig and --kubecontext are mutually exclusive") - } - - // In single-cluster mode without explicit --cluster, enable auto-discovery. - isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 - if !isMultiClusterMode && cfg.ACSClusterID == "" && cfg.ClusterNameLookup == "" { - cfg.AutoDiscoverClusterID = true - } - if cfg.MaxRetries < 0 { return fmt.Errorf("--max-retries must be >= 0 (got %d)", cfg.MaxRetries) } @@ -282,40 +217,32 @@ func printUsage(fs *flag.FlagSet) { fmt.Fprint(w, `co-acs-scan-importer - Import Compliance Operator scan schedules into ACS DESCRIPTION - Reads ScanSettingBinding resources from one or more Kubernetes clusters - running the Compliance Operator and creates equivalent scan configurations - in Red Hat Advanced Cluster Security (ACS) via the v2 API. + Reads ScanSettingBinding resources from Kubernetes clusters running the + Compliance Operator and creates equivalent scan configurations in Red Hat + Advanced Cluster Security (ACS) via the v2 API. - The importer auto-discovers the ACS cluster ID for each source cluster - by reading the admission-control ConfigMap, falling back to OpenShift - ClusterVersion metadata or the Helm effective cluster name secret. + By default, all contexts in the merged kubeconfig are processed. The ACS + cluster ID for each context is auto-discovered via the admission-control + ConfigMap, OpenShift ClusterVersion, or Helm cluster name secret. USAGE - # Single cluster (current kubeconfig context, auto-discovers ACS cluster ID): + # All clusters in kubeconfig (dry-run): co-acs-scan-importer \ --endpoint central.example.com \ --dry-run - # Multi-cluster with separate kubeconfig files: + # Specific clusters only: co-acs-scan-importer \ - --kubeconfig /path/to/cluster-a.kubeconfig \ - --kubeconfig /path/to/cluster-b.kubeconfig \ - --endpoint central.example.com - - # Multi-cluster with merged kubeconfig and named contexts: - KUBECONFIG=a.yaml:b.yaml:c.yaml co-acs-scan-importer \ - --kubecontext cluster-a \ - --kubecontext cluster-b \ - --endpoint central.example.com + --endpoint central.example.com \ + --context cluster-a \ + --context cluster-b - # All contexts in a merged kubeconfig: - co-acs-scan-importer \ - --kubecontext all \ + # Multiple kubeconfig files merged: + KUBECONFIG=a.yaml:b.yaml co-acs-scan-importer \ --endpoint central.example.com # Update existing ACS scan configs instead of skipping them: co-acs-scan-importer \ - --kubeconfig /path/to/cluster.kubeconfig \ --endpoint central.example.com \ --overwrite-existing @@ -332,15 +259,10 @@ AUTHENTICATION - Setting both is an error (ambiguous). - Setting neither is an error. -MULTI-CLUSTER NOTES - When clusters are spread across multiple kubeconfig files, use the - --kubeconfig flag once per file. Each file's current context is used. - - When a single merged kubeconfig contains all clusters with unique context - names, use --kubecontext to select them (or "all" to use every context). - Merge kubeconfigs via: KUBECONFIG=a.yaml:b.yaml:c.yaml - - --kubeconfig and --kubecontext are mutually exclusive. +MULTI-CLUSTER + The importer processes all contexts in the merged kubeconfig by default. + Use --context (repeatable) to limit processing to specific contexts. + Merge kubeconfig files via: KUBECONFIG=a.yaml:b.yaml:c.yaml ScanSettingBindings with the same name across multiple clusters are merged into a single ACS scan configuration targeting all matched clusters. The @@ -348,16 +270,13 @@ MULTI-CLUSTER NOTES reports an error if they differ. AUTO-DISCOVERY - In multi-cluster mode, the ACS cluster ID is auto-discovered for each - source cluster using the following chain (first success wins): + The ACS cluster ID is auto-discovered for each context using the + following chain (first success wins): 1. admission-control ConfigMap "cluster-id" key (namespace: stackrox) 2. OpenShift ClusterVersion spec.clusterID matched against ACS provider metadata 3. helm-effective-cluster-name secret matched against ACS cluster name - Use --cluster ctx=name-or-uuid to override auto-discovery for a - specific context. - EXIT CODES 0 All bindings processed successfully (or nothing to do). 1 Fatal error (bad config, auth failure, connectivity issue). diff --git a/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go index 8686f3d2ecbfd..9696bc29592cd 100644 --- a/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go +++ b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go @@ -4,151 +4,60 @@ import ( "testing" ) -// TestIMP_CLI_003_KubeconfigRepeatable verifies that --kubeconfig can be -// repeated multiple times for multi-cluster mode. -func TestIMP_CLI_003_KubeconfigRepeatable(t *testing.T) { +// TestIMP_CLI_003_ContextRepeatable verifies that --context can be +// repeated to filter which kubeconfig contexts are processed. +func TestIMP_CLI_003_ContextRepeatable(t *testing.T) { setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate([]string{ "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--kubeconfig", "/path/to/kube1.yaml", - "--kubeconfig", "/path/to/kube2.yaml", + "--context", "ctx-a", + "--context", "ctx-b", }) if err != nil { t.Fatalf("unexpected error: %v", err) } - if len(cfg.Kubeconfigs) != 2 { - t.Errorf("expected 2 kubeconfigs, got %d", len(cfg.Kubeconfigs)) + if len(cfg.Contexts) != 2 { + t.Errorf("expected 2 contexts, got %d", len(cfg.Contexts)) } - if cfg.Kubeconfigs[0] != "/path/to/kube1.yaml" { - t.Errorf("expected first kubeconfig path, got %q", cfg.Kubeconfigs[0]) + if cfg.Contexts[0] != "ctx-a" { + t.Errorf("expected first context 'ctx-a', got %q", cfg.Contexts[0]) } - if cfg.Kubeconfigs[1] != "/path/to/kube2.yaml" { - t.Errorf("expected second kubeconfig path, got %q", cfg.Kubeconfigs[1]) + if cfg.Contexts[1] != "ctx-b" { + t.Errorf("expected second context 'ctx-b', got %q", cfg.Contexts[1]) } } -// TestIMP_CLI_003_KubecontextRepeatable verifies that --kubecontext can be -// repeated multiple times for multi-cluster mode. -func TestIMP_CLI_003_KubecontextRepeatable(t *testing.T) { +// TestIMP_CLI_003_NoContextMeansAll verifies that omitting --context +// results in an empty Contexts slice (meaning "all contexts"). +func TestIMP_CLI_003_NoContextMeansAll(t *testing.T) { setenv(t, "ROX_API_TOKEN", "tok") cfg, err := ParseAndValidate([]string{ "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--kubecontext", "ctx1", - "--kubecontext", "ctx2", }) if err != nil { t.Fatalf("unexpected error: %v", err) } - if len(cfg.Kubecontexts) != 2 { - t.Errorf("expected 2 kubecontexts, got %d", len(cfg.Kubecontexts)) - } - if cfg.Kubecontexts[0] != "ctx1" { - t.Errorf("expected first context, got %q", cfg.Kubecontexts[0]) - } - if cfg.Kubecontexts[1] != "ctx2" { - t.Errorf("expected second context, got %q", cfg.Kubecontexts[1]) - } -} - -// TestIMP_CLI_003_KubecontextAll verifies that --kubecontext all signals -// iteration of all contexts in the active kubeconfig. -func TestIMP_CLI_003_KubecontextAll(t *testing.T) { - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate([]string{ - "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--kubecontext", "all", - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(cfg.Kubecontexts) != 1 || cfg.Kubecontexts[0] != "all" { - t.Errorf("expected kubecontext 'all', got %v", cfg.Kubecontexts) - } -} - -// TestIMP_CLI_003_ClusterOverrideRepeatable verifies that --cluster ctx=value -// can be repeated for manual cluster name mappings in multi-cluster mode. -func TestIMP_CLI_003_ClusterOverrideRepeatable(t *testing.T) { - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate([]string{ - "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--kubecontext", "ctx1", - "--cluster", "ctx1=acs-cluster-1", - "--cluster", "ctx2=acs-cluster-2", - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(cfg.ClusterOverrides) != 2 { - t.Errorf("expected 2 cluster overrides, got %d", len(cfg.ClusterOverrides)) - } - if cfg.ClusterOverrides[0] != "ctx1=acs-cluster-1" { - t.Errorf("expected first override, got %q", cfg.ClusterOverrides[0]) + if len(cfg.Contexts) != 0 { + t.Errorf("expected empty contexts (all), got %v", cfg.Contexts) } } -// TestIMP_CLI_003_KubeconfigContextMutuallyExclusive verifies that -// --kubeconfig and --kubecontext cannot be used together. -func TestIMP_CLI_003_KubeconfigContextMutuallyExclusive(t *testing.T) { +// TestIMP_CLI_003_RemovedFlagsRejected verifies that removed multi-cluster +// flags are not accepted. +func TestIMP_CLI_003_RemovedFlagsRejected(t *testing.T) { setenv(t, "ROX_API_TOKEN", "tok") - _, err := ParseAndValidate([]string{ - "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--kubeconfig", "/path/to/kube1.yaml", - "--kubecontext", "ctx1", - }) - if err == nil { - t.Fatal("expected error for both --kubeconfig and --kubecontext, got nil") - } -} - -// TestIMP_CLI_003_DefaultSingleClusterMode verifies that when no multi-cluster -// flags are provided, the importer uses the current context. -func TestIMP_CLI_003_DefaultSingleClusterMode(t *testing.T) { - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate([]string{ - "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23b", - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(cfg.Kubeconfigs) != 0 { - t.Errorf("expected no kubeconfigs in single-cluster mode, got %d", len(cfg.Kubeconfigs)) - } - if len(cfg.Kubecontexts) != 0 { - t.Errorf("expected no kubecontexts in single-cluster mode, got %d", len(cfg.Kubecontexts)) - } - if cfg.ACSClusterID != "65640fbb-ac7c-42a8-9e65-883c3f35f23b" { - t.Errorf("expected ACSClusterID, got %q", cfg.ACSClusterID) - } -} - -// TestSingleClusterAutoDiscoveryWhenNoCluster verifies that omitting -// --cluster in single-cluster mode enables auto-discovery. -func TestSingleClusterAutoDiscoveryWhenNoCluster(t *testing.T) { - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate([]string{ - "--endpoint", "https://central.example.com", - "--co-namespace", "openshift-compliance", - // No --cluster and no multi-cluster flags - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - if !cfg.AutoDiscoverClusterID { - t.Fatal("expected AutoDiscoverClusterID to be true") + for _, flag := range []string{"--kubeconfig", "--kubecontext", "--cluster"} { + t.Run(flag, func(t *testing.T) { + _, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + flag, "some-value", + }) + if err == nil { + t.Errorf("expected error for %s, got nil", flag) + } + }) } } diff --git a/scripts/compliance-operator-importer/internal/config/config_test.go b/scripts/compliance-operator-importer/internal/config/config_test.go index fcf41f1aa5584..ef1b2d1972039 100644 --- a/scripts/compliance-operator-importer/internal/config/config_test.go +++ b/scripts/compliance-operator-importer/internal/config/config_test.go @@ -429,185 +429,6 @@ func TestIMP_CLI_025_NeitherTokenNorPasswordErrors(t *testing.T) { } } -// =========================================================================== -// IMP-CLI-005 / IMP-MAP-022 / IMP-MAP-023: unified --cluster flag -// =========================================================================== - -func TestIMP_CLI_005_ClusterUUID(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23b", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if cfg.ACSClusterID != "65640fbb-ac7c-42a8-9e65-883c3f35f23b" { - t.Errorf("IMP-MAP-023: expected UUID used directly, got %q", cfg.ACSClusterID) - } - if cfg.AutoDiscoverClusterID { - t.Error("IMP-MAP-023: auto-discover should be disabled when UUID given") - } -} - -func TestIMP_CLI_005_ClusterUUID_UpperCase(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--cluster", "65640FBB-AC7C-42A8-9E65-883C3F35F23B", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if cfg.ACSClusterID != "65640FBB-AC7C-42A8-9E65-883C3F35F23B" { - t.Errorf("expected uppercase UUID accepted, got %q", cfg.ACSClusterID) - } -} - -func TestIMP_CLI_005_ClusterName(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--cluster", "production-cluster", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if cfg.ClusterNameLookup != "production-cluster" { - t.Errorf("IMP-MAP-022: expected ClusterNameLookup='production-cluster', got %q", cfg.ClusterNameLookup) - } - if cfg.ACSClusterID != "" { - t.Errorf("expected empty ACSClusterID for name lookup, got %q", cfg.ACSClusterID) - } - if cfg.AutoDiscoverClusterID { - t.Error("auto-discover should be disabled when cluster name given") - } -} - -func TestIMP_CLI_005_ClusterOverride(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--kubecontext", "ctx1", - "--cluster", "ctx1=my-cluster", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(cfg.ClusterOverrides) != 1 || cfg.ClusterOverrides[0] != "ctx1=my-cluster" { - t.Errorf("expected cluster override, got %v", cfg.ClusterOverrides) - } -} - -func TestIMP_CLI_005_MultipleOverrides(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--kubecontext", "ctx1", - "--kubecontext", "ctx2", - "--cluster", "ctx1=cluster-a", - "--cluster", "ctx2=cluster-b", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(cfg.ClusterOverrides) != 2 { - t.Errorf("expected 2 overrides, got %d", len(cfg.ClusterOverrides)) - } -} - -func TestIMP_CLI_005_MultipleShorthandsRejected(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - _, err := ParseAndValidate(minimalValidArgs( - "--cluster", "cluster-a", - "--cluster", "cluster-b", - )) - if err == nil { - t.Fatal("expected error for multiple --cluster shorthands, got nil") - } -} - -func TestIMP_CLI_005_MixedShorthandAndOverride(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - // One shorthand (name) + one override should work: - // shorthand applies to current context, override applies to named context. - cfg, err := ParseAndValidate(minimalValidArgs( - "--cluster", "production", - "--cluster", "ctx1=staging", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if cfg.ClusterNameLookup != "production" { - t.Errorf("expected shorthand name, got %q", cfg.ClusterNameLookup) - } - if len(cfg.ClusterOverrides) != 1 || cfg.ClusterOverrides[0] != "ctx1=staging" { - t.Errorf("expected override, got %v", cfg.ClusterOverrides) - } -} - -func TestIMP_CLI_005_NoClusterAutoDiscover(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !cfg.AutoDiscoverClusterID { - t.Fatal("expected AutoDiscoverClusterID=true when no --cluster given") - } -} - -func TestIMP_CLI_005_OverrideWithUUIDValue(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - cfg, err := ParseAndValidate(minimalValidArgs( - "--kubecontext", "ctx1", - "--cluster", "ctx1=65640fbb-ac7c-42a8-9e65-883c3f35f23b", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - // Should be stored as an override, not as a UUID shorthand. - if len(cfg.ClusterOverrides) != 1 { - t.Errorf("expected 1 override, got %d", len(cfg.ClusterOverrides)) - } - if cfg.ACSClusterID != "" { - t.Errorf("expected empty ACSClusterID (UUID in override, not shorthand), got %q", cfg.ACSClusterID) - } -} - -func TestIMP_CLI_005_NotUUID_LooksLikeUUID(t *testing.T) { - clearAuthEnv(t) - setenv(t, "ROX_API_TOKEN", "tok") - - // Missing one hex digit in the last segment — not a valid UUID. - cfg, err := ParseAndValidate(minimalValidArgs( - "--cluster", "65640fbb-ac7c-42a8-9e65-883c3f35f23", - )) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - // Should be treated as a name, not UUID. - if cfg.ACSClusterID != "" { - t.Errorf("expected empty ACSClusterID for invalid UUID, got %q", cfg.ACSClusterID) - } - if cfg.ClusterNameLookup != "65640fbb-ac7c-42a8-9e65-883c3f35f23" { - t.Errorf("expected name lookup for invalid UUID, got %q", cfg.ClusterNameLookup) - } -} - // =========================================================================== // Defaults and other flags (IMP-CLI-004, IMP-CLI-006..012) // =========================================================================== diff --git a/scripts/compliance-operator-importer/internal/discover/discover.go b/scripts/compliance-operator-importer/internal/discover/discover.go new file mode 100644 index 0000000000000..1537c8157d11e --- /dev/null +++ b/scripts/compliance-operator-importer/internal/discover/discover.go @@ -0,0 +1,150 @@ +// Package discover handles auto-discovery of ACS cluster IDs from Kubernetes +// clusters using multiple fallback methods. +package discover + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/models" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// k8sResourceReader abstracts Kubernetes resource lookups for testing. +type k8sResourceReader interface { + getAdmissionControlClusterID(ctx context.Context) (string, error) + getOpenShiftClusterID(ctx context.Context) (string, error) + getHelmSecretClusterName(ctx context.Context) (string, error) +} + +// k8sDiscoveryClient is the production implementation using a dynamic k8s client. +type k8sDiscoveryClient struct { + dynamic dynamic.Interface +} + +// NewK8sDiscoveryClient creates a k8sResourceReader from a dynamic k8s client. +func NewK8sDiscoveryClient(dynClient dynamic.Interface) k8sResourceReader { + return &k8sDiscoveryClient{dynamic: dynClient} +} + +// IMP-MAP-016: admission-control ConfigMap in stackrox namespace. +func (c *k8sDiscoveryClient) getAdmissionControlClusterID(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + obj, err := c.dynamic.Resource(gvr).Namespace("stackrox").Get(ctx, "admission-control", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get admission-control ConfigMap: %w", err) + } + + data, found, err := unstructured.NestedStringMap(obj.Object, "data") + if err != nil || !found { + return "", fmt.Errorf("parse ConfigMap data: %w", err) + } + + clusterID, ok := data["cluster-id"] + if !ok || clusterID == "" { + return "", errors.New("cluster-id not found in admission-control ConfigMap") + } + return clusterID, nil +} + +// IMP-MAP-017: OpenShift ClusterVersion resource. +func (c *k8sDiscoveryClient) getOpenShiftClusterID(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{ + Group: "config.openshift.io", + Version: "v1", + Resource: "clusterversions", + } + obj, err := c.dynamic.Resource(gvr).Get(ctx, "version", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get ClusterVersion: %w", err) + } + + clusterID, found, err := unstructured.NestedString(obj.Object, "spec", "clusterID") + if err != nil || !found { + return "", fmt.Errorf("parse ClusterVersion.spec.clusterID: %w", err) + } + if clusterID == "" { + return "", errors.New("ClusterVersion.spec.clusterID is empty") + } + return clusterID, nil +} + +// IMP-MAP-018: helm-effective-cluster-name secret in stackrox namespace. +func (c *k8sDiscoveryClient) getHelmSecretClusterName(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + obj, err := c.dynamic.Resource(gvr).Namespace("stackrox").Get(ctx, "helm-effective-cluster-name", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get helm-effective-cluster-name Secret: %w", err) + } + + data, found, err := unstructured.NestedStringMap(obj.Object, "data") + if err != nil || !found { + return "", fmt.Errorf("parse Secret data: %w", err) + } + + encodedName, ok := data["cluster-name"] + if !ok || encodedName == "" { + return "", errors.New("cluster-name not found in helm-effective-cluster-name Secret") + } + + // Kubernetes secrets are base64-encoded. + decoded, err := base64.StdEncoding.DecodeString(encodedName) + if err != nil { + return "", fmt.Errorf("decode cluster-name: %w", err) + } + return string(decoded), nil +} + +// DiscoverClusterID attempts to resolve the ACS cluster ID for the given source cluster. +// +// Discovery chain (try in order, use first success): +// 1. admission-control ConfigMap: direct ACS cluster UUID (IMP-MAP-016). +// 2. OpenShift ClusterVersion: match providerMetadata.cluster.id (IMP-MAP-017). +// 3. helm-effective-cluster-name secret: match by cluster name (IMP-MAP-018). +// +// Returns error if all methods fail. +func DiscoverClusterID( + ctx context.Context, + k8s k8sResourceReader, + acs models.ACSClient, +) (string, error) { + // IMP-MAP-016: admission-control ConfigMap. + if clusterID, err := k8s.getAdmissionControlClusterID(ctx); err == nil { + return clusterID, nil + } + + // IMP-MAP-017: OpenShift ClusterVersion. + if ocpClusterID, err := k8s.getOpenShiftClusterID(ctx); err == nil { + clusters, err := acs.ListClusters(ctx) + if err != nil { + return "", fmt.Errorf("list ACS clusters for OpenShift ID match: %w", err) + } + for _, c := range clusters { + if c.ProviderClusterID == ocpClusterID { + return c.ID, nil + } + } + return "", fmt.Errorf("OpenShift cluster ID %q not found in ACS clusters", ocpClusterID) + } + + // IMP-MAP-018: helm-effective-cluster-name secret. + if clusterName, err := k8s.getHelmSecretClusterName(ctx); err == nil { + clusters, err := acs.ListClusters(ctx) + if err != nil { + return "", fmt.Errorf("list ACS clusters for helm cluster name match: %w", err) + } + for _, c := range clusters { + if c.Name == clusterName { + return c.ID, nil + } + } + return "", fmt.Errorf("helm cluster name %q not found in ACS clusters", clusterName) + } + + return "", errors.New("all discovery methods failed to resolve ACS cluster ID") +} diff --git a/scripts/compliance-operator-importer/internal/discover/discover_test.go b/scripts/compliance-operator-importer/internal/discover/discover_test.go new file mode 100644 index 0000000000000..695f4fd3267b9 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/discover/discover_test.go @@ -0,0 +1,169 @@ +package discover + +import ( + "context" + "errors" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// mockK8sClient implements the k8sResourceReader interface for testing. +type mockK8sClient struct { + admissionControlCM map[string]string + admissionControlErr error + clusterVersionID string + clusterVersionErr error + helmSecretClusterName string + helmSecretErr error +} + +func (m *mockK8sClient) getAdmissionControlClusterID(ctx context.Context) (string, error) { + if m.admissionControlErr != nil { + return "", m.admissionControlErr + } + return m.admissionControlCM["cluster-id"], nil +} + +func (m *mockK8sClient) getOpenShiftClusterID(ctx context.Context) (string, error) { + if m.clusterVersionErr != nil { + return "", m.clusterVersionErr + } + return m.clusterVersionID, nil +} + +func (m *mockK8sClient) getHelmSecretClusterName(ctx context.Context) (string, error) { + if m.helmSecretErr != nil { + return "", m.helmSecretErr + } + return m.helmSecretClusterName, nil +} + +// mockACSClient implements the models.ACSClient interface for testing. +type mockACSClient struct { + clusters []models.ACSClusterInfo + err error +} + +func (m *mockACSClient) Preflight(ctx context.Context) error { + return nil +} + +func (m *mockACSClient) ListScanConfigurations(ctx context.Context) ([]models.ACSConfigSummary, error) { + return nil, nil +} + +func (m *mockACSClient) CreateScanConfiguration(ctx context.Context, payload models.ACSCreatePayload) (string, error) { + return "", nil +} + +func (m *mockACSClient) UpdateScanConfiguration(ctx context.Context, id string, payload models.ACSCreatePayload) error { + return nil +} + +func (m *mockACSClient) ListClusters(ctx context.Context) ([]models.ACSClusterInfo, error) { + return m.clusters, m.err +} + +// TestIMP_MAP_016_AdmissionControlConfigMap verifies discovery via admission-control ConfigMap. +func TestIMP_MAP_016_AdmissionControlConfigMap(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlCM: map[string]string{"cluster-id": "acs-uuid-12345"}, + } + acs := &mockACSClient{} + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-12345" { + t.Errorf("expected cluster ID from admission-control CM, got %q", clusterID) + } +} + +// TestIMP_MAP_017_OpenShiftClusterVersion verifies discovery via OpenShift ClusterVersion. +func TestIMP_MAP_017_OpenShiftClusterVersion(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionID: "ocp-cluster-abc", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-1", Name: "cluster-1", ProviderClusterID: "ocp-cluster-abc"}, + {ID: "acs-uuid-2", Name: "cluster-2", ProviderClusterID: "ocp-cluster-xyz"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-1" { + t.Errorf("expected cluster ID from OpenShift ClusterVersion match, got %q", clusterID) + } +} + +// TestIMP_MAP_018_HelmSecretClusterName verifies discovery via helm-effective-cluster-name secret. +func TestIMP_MAP_018_HelmSecretClusterName(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretClusterName: "production", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-1", Name: "production"}, + {ID: "acs-uuid-2", Name: "staging"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-1" { + t.Errorf("expected cluster ID from helm secret match, got %q", clusterID) + } +} + +// TestDiscoveryFallbackChain verifies the discovery chain tries methods in order. +func TestDiscoveryFallbackChain(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretClusterName: "fallback-cluster", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-fallback", Name: "fallback-cluster"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-fallback" { + t.Errorf("expected fallback cluster ID, got %q", clusterID) + } +} + +// TestDiscoveryAllMethodsFail verifies error when all discovery methods fail. +func TestDiscoveryAllMethodsFail(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretErr: errors.New("not found"), + } + acs := &mockACSClient{} + + _, err := DiscoverClusterID(ctx, k8s, acs) + if err == nil { + t.Fatal("expected error when all discovery methods fail, got nil") + } +} diff --git a/scripts/compliance-operator-importer/internal/models/models.go b/scripts/compliance-operator-importer/internal/models/models.go index 98cee36ce0edb..9c83f97215acc 100644 --- a/scripts/compliance-operator-importer/internal/models/models.go +++ b/scripts/compliance-operator-importer/internal/models/models.go @@ -15,25 +15,20 @@ const ( // Config holds all resolved configuration for a single importer run. type Config struct { - ACSEndpoint string // from --endpoint or ROX_ENDPOINT - AuthMode AuthMode // auto-inferred from env vars (ROX_API_TOKEN / ROX_ADMIN_PASSWORD) - Username string // from --username or ROX_ADMIN_USER (default "admin") - CONamespace string // empty when COAllNamespaces=true - COAllNamespaces bool - ACSClusterID string // resolved from --cluster UUID or auto-discovered - ClusterNameLookup string // non-UUID --cluster value for runtime resolution via ACS API - DryRun bool - ReportJSON string - RequestTimeout time.Duration - MaxRetries int - CACertFile string - InsecureSkipVerify bool - OverwriteExisting bool - AutoDiscoverClusterID bool // set by validate() when no --cluster in single-cluster mode - // Multi-cluster mode fields - Kubeconfigs []string // repeatable --kubeconfig paths - Kubecontexts []string // repeatable --kubecontext names - ClusterOverrides []string // repeatable --cluster ctx=acs-name-or-uuid + ACSEndpoint string // from --endpoint or ROX_ENDPOINT + AuthMode AuthMode // auto-inferred from env vars (ROX_API_TOKEN / ROX_ADMIN_PASSWORD) + Username string // from --username or ROX_ADMIN_USER (default "admin") + CONamespace string // empty when COAllNamespaces=true + COAllNamespaces bool + ACSClusterID string // auto-discovered per context; set at runtime during iteration + DryRun bool + ReportJSON string + RequestTimeout time.Duration + MaxRetries int + CACertFile string + InsecureSkipVerify bool + OverwriteExisting bool + Contexts []string // opt-in --context filter; empty means all contexts } // Severity classifies how severe a Problem is. diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source.go b/scripts/compliance-operator-importer/internal/run/cluster_source.go index cd0cb1093c8c2..a4cd817b716b2 100644 --- a/scripts/compliance-operator-importer/internal/run/cluster_source.go +++ b/scripts/compliance-operator-importer/internal/run/cluster_source.go @@ -14,158 +14,74 @@ import ( // ClusterSource represents a single source cluster with its CO client and ACS cluster ID. type ClusterSource struct { - Label string // kubeconfig path or context name, for logging + Label string // context name, for logging COClient cofetch.COClient ACSClusterID string } -// BuildClusterSources creates ClusterSource entries from the config. -// -// Logic: -// - If no multi-cluster flags: single-cluster mode using current context and cfg.ACSClusterID. -// - If --kubeconfig flags: one source per kubeconfig file, discover cluster ID. -// - If --kubecontext flags: one source per context (or all contexts if "all"), discover cluster ID. -// - Manual overrides from --cluster apply to matched contexts. +// BuildClusterSources creates ClusterSource entries by iterating all contexts +// in the merged kubeconfig. If cfg.Contexts is non-empty, only those contexts +// are used. func BuildClusterSources(ctx context.Context, cfg *models.Config, acsClient models.ACSClient) ([]ClusterSource, error) { - isMultiClusterMode := len(cfg.Kubeconfigs) > 0 || len(cfg.Kubecontexts) > 0 - - if !isMultiClusterMode { - // Single-cluster mode with auto-discovery. - coClient, err := cofetch.NewClient(cfg) - if err != nil { - return nil, fmt.Errorf("create CO client: %w", err) - } - - clusterID := cfg.ACSClusterID - if clusterID == "" { - // Auto-discover using default kubeconfig context. - dynClient, err := buildDynamicClientForContext("") - if err != nil { - return nil, fmt.Errorf("build dynamic client for current context: %w", err) - } - clusterID, err = discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, "") - if err != nil { - return nil, fmt.Errorf("discover cluster ID for current context: %w", err) - } - } - - return []ClusterSource{{ - Label: "current-context", - COClient: coClient, - ACSClusterID: clusterID, - }}, nil - } - - // Parse manual cluster overrides into a map: contextName -> acsClusterName. - overrides, err := parseClusterOverrides(cfg.ClusterOverrides) + allContexts, err := listAllContexts() if err != nil { return nil, err } - var sources []ClusterSource - - // Handle --kubeconfig mode. - if len(cfg.Kubeconfigs) > 0 { - for _, kubeconfigPath := range cfg.Kubeconfigs { - coClient, err := cofetch.NewClientForKubeconfig(kubeconfigPath, cfg.CONamespace, cfg.COAllNamespaces) - if err != nil { - return nil, fmt.Errorf("create CO client for kubeconfig %q: %w", kubeconfigPath, err) - } - - // Build dynamic client for discovery. - dynClient, err := buildDynamicClientForKubeconfig(kubeconfigPath) - if err != nil { - return nil, fmt.Errorf("build dynamic client for kubeconfig %q: %w", kubeconfigPath, err) - } - - // Check for manual override (match by kubeconfig path? Not practical. Skip for kubeconfig mode). - acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, "") - if err != nil { - return nil, fmt.Errorf("discover cluster ID for kubeconfig %q: %w", kubeconfigPath, err) - } - - sources = append(sources, ClusterSource{ - Label: kubeconfigPath, - COClient: coClient, - ACSClusterID: acsClusterID, - }) + contexts := allContexts + if len(cfg.Contexts) > 0 { + contexts = filterContexts(allContexts, cfg.Contexts) + if len(contexts) == 0 { + return nil, fmt.Errorf("none of the requested --context values match available contexts %v", allContexts) } - return sources, nil } - // Handle --kubecontext mode. - if len(cfg.Kubecontexts) > 0 { - contexts := cfg.Kubecontexts - if len(contexts) == 1 && contexts[0] == "all" { - // Expand "all" to all contexts in the active kubeconfig. - allContexts, err := listAllContexts() - if err != nil { - return nil, fmt.Errorf("list all contexts: %w", err) - } - contexts = allContexts + var sources []ClusterSource + for _, contextName := range contexts { + coClient, err := cofetch.NewClientForContext(contextName, cfg.CONamespace, cfg.COAllNamespaces) + if err != nil { + return nil, fmt.Errorf("create CO client for context %q: %w", contextName, err) } - for _, contextName := range contexts { - coClient, err := cofetch.NewClientForContext(contextName, cfg.CONamespace, cfg.COAllNamespaces) - if err != nil { - return nil, fmt.Errorf("create CO client for context %q: %w", contextName, err) - } - - // Build dynamic client for discovery. - dynClient, err := buildDynamicClientForContext(contextName) - if err != nil { - return nil, fmt.Errorf("build dynamic client for context %q: %w", contextName, err) - } - - // Check for manual override. - manualName := overrides[contextName] - acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient, manualName) - if err != nil { - return nil, fmt.Errorf("discover cluster ID for context %q: %w", contextName, err) - } + dynClient, err := buildDynamicClientForContext(contextName) + if err != nil { + return nil, fmt.Errorf("build dynamic client for context %q: %w", contextName, err) + } - sources = append(sources, ClusterSource{ - Label: contextName, - COClient: coClient, - ACSClusterID: acsClusterID, - }) + acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient) + if err != nil { + return nil, fmt.Errorf("discover cluster ID for context %q: %w", contextName, err) } - return sources, nil - } - return nil, errors.New("no cluster sources configured") -} + sources = append(sources, ClusterSource{ + Label: contextName, + COClient: coClient, + ACSClusterID: acsClusterID, + }) + } -// parseClusterOverrides parses --cluster flags into a map: contextName -> acsClusterName. -// Format: ctx=acs-name -func parseClusterOverrides(overrides []string) (map[string]string, error) { - result := make(map[string]string) - for _, override := range overrides { - parts := splitOnce(override, "=") - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return nil, fmt.Errorf("invalid --cluster format %q: expected ctx=acs-name", override) - } - result[parts[0]] = parts[1] + if len(sources) == 0 { + return nil, errors.New("no contexts found in kubeconfig") } - return result, nil + return sources, nil } -// splitOnce splits s on the first occurrence of sep. -func splitOnce(s, sep string) []string { - idx := -1 - for i := 0; i < len(s); i++ { - if s[i:i+len(sep)] == sep { - idx = i - break - } +// filterContexts returns only contexts whose names appear in the wanted set. +func filterContexts(all []string, wanted []string) []string { + set := make(map[string]bool, len(wanted)) + for _, w := range wanted { + set[w] = true } - if idx == -1 { - return []string{s} + var result []string + for _, c := range all { + if set[c] { + result = append(result, c) + } } - return []string{s[:idx], s[idx+len(sep):]} + return result } -// listAllContexts returns all context names from the active kubeconfig. +// listAllContexts returns all context names from the merged kubeconfig. func listAllContexts() ([]string, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() config, err := loadingRules.Load() @@ -183,17 +99,6 @@ func listAllContexts() ([]string, error) { return contexts, nil } -// buildDynamicClientForKubeconfig creates a dynamic k8s client for the given kubeconfig file. -func buildDynamicClientForKubeconfig(kubeconfigPath string) (dynamic.Interface, error) { - loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath} - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) - restConfig, err := kubeConfig.ClientConfig() - if err != nil { - return nil, fmt.Errorf("build rest config: %w", err) - } - return dynamic.NewForConfig(restConfig) -} - // buildDynamicClientForContext creates a dynamic k8s client for the given context. func buildDynamicClientForContext(contextName string) (dynamic.Interface, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md index 7257b29fca6d9..38e2091fd6f83 100644 --- a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -8,42 +8,26 @@ Define the importer interface so it can be implemented and tested predictably. ### Required inputs -Note: flag names and environment variables are aligned with `roxctl` conventions. - - **IMP-CLI-001**: importer MUST accept Central endpoint (`--endpoint` or `ROX_ENDPOINT`). - if the value does not contain a scheme, importer MUST prepend `https://`. - if the value starts with `http://`, importer MUST error. - **IMP-CLI-002**: importer MUST support auth modes, auto-inferred from available credentials - (aligned with `roxctl` behavior — no explicit `--auth-mode` flag, no env-var-name indirection): + (no explicit `--auth-mode` flag, no env-var-name indirection): - token mode: when `ROX_API_TOKEN` is set, - basic mode: when `ROX_ADMIN_PASSWORD` is set, - if both are set: error ("ambiguous auth"), - if neither is set: error with help text listing both options. -- **IMP-CLI-003**: importer MUST support multi-cluster source selection via two mechanisms: - - `--kubeconfig ` (repeatable): each path is a separate source cluster, using that file's - current context. This is the primary mechanism for users with one kubeconfig file per cluster. - - `--kubecontext ` (repeatable): selects contexts within the active kubeconfig - (set via `KUBECONFIG` env var or `~/.kube/config`). Use when a single merged kubeconfig - contains unique context names for all clusters. - - `--kubecontext all`: iterates all contexts in the active kubeconfig. - - `--kubeconfig` and `--kubecontext` are mutually exclusive (error if both given). - - when neither `--kubeconfig` nor `--kubecontext` is given, importer MUST use - the current kubeconfig context (single-cluster mode, backward compatible). - - help text MUST suggest: - - using `--kubeconfig` (repeatable) when clusters have separate kubeconfig files, or - - merging kubeconfigs (`KUBECONFIG=a.yaml:b.yaml`) with unique context names - and using `--kubecontext`. +- **IMP-CLI-003**: importer MUST use all contexts from the merged kubeconfig: + - kubeconfig loading follows standard kubectl rules: `KUBECONFIG` env var (colon-separated + paths) or `~/.kube/config`. + - by default, the importer iterates **all contexts** in the merged kubeconfig, treating + each context as a separate source cluster. + - `--context ` (repeatable, optional): filters which contexts to use. When given, + only the named contexts are processed; all others are skipped. + - for each context, the ACS cluster ID is auto-discovered (see IMP-MAP-016..018). - **IMP-CLI-004**: importer MUST support namespace scope: - `--co-namespace ` (default `openshift-compliance`) for single namespace, or - `--co-all-namespaces` for cluster-wide scan. -- **IMP-CLI-005**: importer MUST support ACS cluster identification via `--cluster`: - - by default (no `--cluster` flag), auto-discover the ACS cluster ID for each source - cluster (see IMP-MAP-016..018). - - `--cluster ` accepts three forms: - - UUID: used directly as the ACS cluster ID (single-cluster shorthand). - - name: resolved to an ACS cluster ID via `GET /v1/clusters` (single-cluster shorthand). - - `=` (repeatable): maps a specific kubeconfig context to - an ACS cluster, overriding auto-discovery for that context. ### Optional inputs @@ -54,10 +38,9 @@ Note: flag names and environment variables are aligned with `roxctl` conventions - **IMP-CLI-010**: `--max-retries ` default `5`, min `0`. - **IMP-CLI-011**: `--ca-cert-file ` optional. - **IMP-CLI-012**: `--insecure-skip-verify` default false; MUST require explicit flag. -- **IMP-CLI-023**: (removed — auth mode is auto-inferred, see IMP-CLI-002). - **IMP-CLI-024**: for basic mode: - username is read from `--username` flag or `ROX_ADMIN_USER` env var (default `admin`). - - password is read from `ROX_ADMIN_PASSWORD` env var (no flag; aligned with roxctl). + - password is read from `ROX_ADMIN_PASSWORD` env var (no flag). - **IMP-CLI-025**: importer MUST reject ambiguous auth config: - both `ROX_API_TOKEN` and `ROX_ADMIN_PASSWORD` are set → error, - neither is set → error with help text. @@ -80,7 +63,6 @@ Note: flag names and environment variables are aligned with `roxctl` conventions - using selected auth mode, - success only on HTTP 200. - **IMP-CLI-016**: HTTP 401/403 at preflight MUST fail-fast with remediation message. -- **IMP-CLI-026**: (removed — auth mode is auto-inferred, see IMP-CLI-002). ## Output contract diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature index 45b0821f6582b..e68b392b5728f 100644 --- a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -72,26 +72,6 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat When the importer resolves the ACS cluster ID for "ctx-c" Then the resolved ACS cluster ID MUST be "acs-uuid-c" # IMP-MAP-018 - @mapping @clusters - Scenario: Manual override via --cluster flag (context=name form) - Given importer flag --cluster is "ctx-a=my-acs-cluster" - And ACS cluster list contains a cluster named "my-acs-cluster" with ACS ID "acs-uuid-a" - When the importer resolves the ACS cluster ID for "ctx-a" - Then the resolved ACS cluster ID MUST be "acs-uuid-a" # IMP-MAP-007 - - @mapping @clusters - Scenario: Single-cluster shorthand via --cluster with name - Given importer flag --cluster is "my-acs-cluster" (no = separator) - And ACS cluster list contains a cluster named "my-acs-cluster" with ACS ID "acs-uuid-a" - When the importer resolves the ACS cluster ID - Then the resolved ACS cluster ID MUST be "acs-uuid-a" # IMP-MAP-022 - - @mapping @clusters - Scenario: Single-cluster shorthand via --cluster with UUID - Given importer flag --cluster is "acs-uuid-a" (no = separator, valid UUID) - When the importer resolves the ACS cluster ID - Then the resolved ACS cluster ID MUST be "acs-uuid-a" (used directly) # IMP-MAP-023 - @mapping @clusters @multicluster Scenario: Merge SSBs with same name across clusters Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" diff --git a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md index feeb5bd31f49d..1a65a92e07cc1 100644 --- a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md +++ b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md @@ -156,7 +156,7 @@ Procedure (overwrite): ### A9 - Auto-discovery - **IMP-ACC-017**: importer MUST auto-discover the ACS cluster ID from the admission-control - ConfigMap's `cluster-id` key when no `--cluster` override is given. + ConfigMap's `cluster-id` key. ### A7 - Failure paths diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md index 94672cf4f47e9..32049923197c0 100644 --- a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -5,7 +5,7 @@ Use this matrix to ensure complete implementation coverage. |Requirement ID|Spec source|Test level|Notes| |---|---|---|---| |IMP-CLI-001..027|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, auth modes, multi-cluster, --overwrite-existing| -|IMP-MAP-001..023|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging, --cluster shorthand| +|IMP-MAP-001..021|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging| |IMP-IDEM-001..009|`03-idempotency-dry-run-retries.feature`|Unit + integration|Idempotency, overwrite mode (PUT), dry-run reporting| |IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| |IMP-ACC-001..017|`04-validation-and-acceptance.md`|Acceptance|Real cluster, ACS verification, multi-cluster merge, auto-discovery| diff --git a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md index f92c22aa04407..e383bf849c2af 100644 --- a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md +++ b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md @@ -22,7 +22,7 @@ Provide a reliable entrypoint with strict validation and preflight checks. ### A Requirement IDs - `IMP-CLI-001..016` -- `IMP-CLI-023..026` +- `IMP-CLI-024..025` ### A Implementation targets (suggested) @@ -44,7 +44,7 @@ Provide a reliable entrypoint with strict validation and preflight checks. ### A Agent prompt seed -- "Implement Slice A for create-only importer. Start with tests for IMP-CLI-001..016 and IMP-CLI-023..026, then implement CLI/config/preflight with HTTPS and both token/basic auth mode support." +- "Implement Slice A for create-only importer. Start with tests for IMP-CLI-001..016 and IMP-CLI-024..025, then implement CLI/config/preflight with HTTPS and both token/basic auth mode support." ## Slice B - CO discovery and mapping core @@ -145,13 +145,13 @@ Support multiple source clusters, auto-discover ACS cluster IDs, merge SSBs acro ### E Requirement IDs -- `IMP-CLI-003` (updated), `IMP-CLI-005` (updated), `IMP-CLI-027` +- `IMP-CLI-003`, `IMP-CLI-027` - `IMP-MAP-016..021` - `IMP-ACC-015..017` ### E Implementation targets (suggested) -- `scripts/compliance-operator-importer/internal/config/config.go` (new flags) +- `scripts/compliance-operator-importer/internal/config/config.go` (--context filter) - `scripts/compliance-operator-importer/internal/discover/discover.go` (new package: ACS cluster ID auto-discovery) - `scripts/compliance-operator-importer/internal/cofetch/client.go` (multi-context support) - `scripts/compliance-operator-importer/internal/merge/merge.go` (new package: SSB merging + mismatch detection) @@ -172,7 +172,7 @@ Support multiple source clusters, auto-discover ACS cluster IDs, merge SSBs acro ### E Agent prompt seed -- "Implement Slice E: multi-cluster support. Add --kubecontext (repeatable), auto-discover ACS cluster ID via admission-control ConfigMap (fallback: ClusterVersion, helm-effective-cluster-name), merge SSBs by name across clusters, error on profile/schedule mismatch." +- "Implement Slice E: multi-cluster support. Iterate all contexts from merged kubeconfig, auto-discover ACS cluster ID via admission-control ConfigMap (fallback: ClusterVersion, helm-effective-cluster-name), merge SSBs by name across clusters, error on profile/schedule mismatch." ## Slice F - Overwrite-existing mode (PUT support) @@ -235,57 +235,46 @@ Make real-cluster validation repeatable and scriptable. - "Implement Slice G automation helpers for IMP-ACC-001..017 and produce run artifacts paths for dry-run/apply/second-run/multi-cluster/overwrite checks." -## Slice H - UX alignment with roxctl conventions +## Slice H - UX conventions -- DONE ### H Goal -Rename all flags and env vars to match roxctl conventions. Remove unnecessary -indirection flags. Simplify auth inference and endpoint handling. +Ensure all flags and env vars follow consistent conventions. Auth mode is +auto-inferred from available credentials. Endpoint handling prepends `https://` +when no scheme is provided. ### H Requirement IDs -- `IMP-CLI-001` (updated: `--endpoint` / `ROX_ENDPOINT`, auto-prepend `https://`) -- `IMP-CLI-002` (updated: auto-infer auth from env vars, no `--auth-mode`) -- `IMP-CLI-005` (updated: unified `--cluster` flag accepting UUID, name, or ctx=value) -- `IMP-CLI-013` (updated: bare hostnames get `https://` prepended) -- `IMP-CLI-023` (removed: `--auth-mode`) -- `IMP-CLI-024` (updated: `--username` / `ROX_ADMIN_USER`, default `admin`; password from `ROX_ADMIN_PASSWORD`) -- `IMP-CLI-025` (updated: ambiguous = both token+password set) -- `IMP-CLI-026` (removed: auth mode inferred) -- `IMP-MAP-022..023` (new: `--cluster` single-value shorthand with name or UUID) - -### H Changes summary - -| Old | New | Notes | -|-----|-----|-------| -| `--acs-endpoint` / `ACS_ENDPOINT` | `--endpoint` / `ROX_ENDPOINT` | aligned with roxctl | -| `--acs-auth-mode` | (removed) | auto-inferred | -| `--acs-token-env` | (removed) | always reads `ROX_API_TOKEN` | -| `--acs-password-env` | (removed) | always reads `ROX_ADMIN_PASSWORD` | -| `--acs-username` / `ACS_USERNAME` | `--username` / `ROX_ADMIN_USER` (default `admin`) | aligned with roxctl | -| `--acs-cluster-id` | `--cluster` (UUID, name, or ctx=value) | unified | -| `--source-kubecontext` | (removed) | redundant with `--kubecontext` | - -### H Implementation targets - -- `internal/config/config.go` (flag renames, auth inference, endpoint normalization) -- `internal/models/models.go` (remove AuthMode, TokenEnv, PasswordEnv fields) -- `internal/preflight/preflight.go` (auth inference) -- `internal/acs/client.go` (read from fixed env vars) -- `internal/run/cluster_source.go` (unified `--cluster` parsing) -- `cmd/importer/main.go` - -### H Tests to update +- `IMP-CLI-001` +- `IMP-CLI-002` +- `IMP-CLI-013` +- `IMP-CLI-024` +- `IMP-CLI-025` +## Slice I - Simplify cluster access model + +### I Goal + +Iterate all contexts from the merged kubeconfig by default, with an +opt-in `--context` filter. ACS cluster ID is always auto-discovered. + +### I Requirement IDs + +- `IMP-CLI-003` + +### I Implementation targets + +- `internal/models/models.go` (remove Kubeconfigs, Kubecontexts, ClusterOverrides, ClusterNameLookup, AutoDiscoverClusterID; add Contexts) +- `internal/config/config.go` (remove old flags, add --context, remove classifyClusterValues) +- `internal/run/cluster_source.go` (simplify: always load all contexts, filter by Contexts) +- `internal/cofetch/client.go` (remove NewClientForKubeconfig) +- `cmd/importer/main.go` (simplify: always BuildClusterSources + RunMultiCluster) - `internal/config/config_test.go` - `internal/config/config_multicluster_test.go` -- `internal/preflight/preflight_test.go` -- `internal/acs/client_test.go` -- `internal/run/run_test.go` -### H Agent prompt seed +### I Agent prompt seed -- "Implement Slice H: rename all ACS-prefixed flags/env vars to roxctl conventions per the table above. Remove --auth-mode, --token-env, --password-env, --source-kubecontext. Auto-infer auth from env vars. Auto-prepend https:// for bare hostnames. Unify --cluster to accept UUID, name, or ctx=value." +- "Implement Slice I: drop --kubeconfig, --kubecontext, --cluster. Default to all contexts from merged kubeconfig. Add --context (repeatable) as opt-in filter. Always auto-discover ACS cluster ID. Simplify BuildClusterSources and main.go accordingly." ## Cross-slice conventions @@ -303,9 +292,11 @@ indirection flags. Simplify auth inference and endpoint handling. 2. Slice B (domain mapping) -- DONE 3. Slice C (ACS reconciliation) -- DONE 4. Slice D (reporting + run orchestration) -- DONE -5. Slice E (multi-cluster + auto-discovery) -6. Slice F (overwrite-existing / PUT support) -7. Slice G (acceptance automation) +5. Slice E (multi-cluster + auto-discovery) -- DONE +6. Slice F (overwrite-existing / PUT support) -- DONE +7. Slice G (acceptance automation) -- DONE +8. Slice H (UX conventions) -- DONE +9. Slice I (simplify cluster access model) Slices E and F are independent and can be implemented in parallel. One agent per slice is ideal; if sequential, complete one slice fully before next. From 2f7003533208235c6ffd341648ee4393fdeb0a5e Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 09:54:47 +0100 Subject: [PATCH 08/24] fix(co-importer): show TLS-specific hints for self-signed cert errors (IMP-CLI-016a) When preflight fails due to a TLS certificate verification error (e.g. self-signed cert), the error message now suggests --ca-cert-file or --insecure-skip-verify instead of the generic "check network connectivity" hint, which was misleading. Adds spec requirement IMP-CLI-016a and a test that verifies the hint content for untrusted certificates. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/preflight/preflight.go | 11 +++++-- .../internal/preflight/preflight_test.go | 30 +++++++++++++++++++ .../specs/01-cli-and-config-contract.md | 3 ++ 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight.go b/scripts/compliance-operator-importer/internal/preflight/preflight.go index e082a995c134f..dbf2fef57822f 100644 --- a/scripts/compliance-operator-importer/internal/preflight/preflight.go +++ b/scripts/compliance-operator-importer/internal/preflight/preflight.go @@ -55,10 +55,15 @@ func Run(ctx context.Context, cfg *models.Config) error { resp, err := client.Do(req) if err != nil { + fix := "Fix: check network connectivity and that --endpoint is correct" + var certErr *tls.CertificateVerificationError + if errors.As(err, &certErr) { + fix = "Fix: if ACS uses a self-signed certificate, supply the CA with --ca-cert-file " + + "or pass --insecure-skip-verify to disable TLS verification" + } return fmt.Errorf( - "preflight failed: could not reach ACS at %s: %w\n"+ - "Fix: check network connectivity and that --endpoint is correct", - cfg.ACSEndpoint, err, + "preflight failed: could not reach ACS at %s: %w\n%s", + cfg.ACSEndpoint, err, fix, ) } defer resp.Body.Close() diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go index a52d5b63f6c5e..ba2bee9524e97 100644 --- a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go +++ b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go @@ -96,6 +96,36 @@ func TestIMP_CLI_016_403ReturnsRemediationError(t *testing.T) { } } +// TestTLSCertErrorHintsSelfSigned verifies that when the server presents a +// certificate not trusted by the client, the error message hints at +// --ca-cert-file and --insecure-skip-verify rather than network connectivity. +func TestTLSCertErrorHintsSelfSigned(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "tok") + + cfg := minimalTokenConfig(srv.URL) + // InsecureSkipVerify defaults to false — the self-signed cert will fail. + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected TLS error, got nil") + } + msg := err.Error() + if !strings.Contains(msg, "--ca-cert-file") { + t.Errorf("expected hint about --ca-cert-file, got: %q", msg) + } + if !strings.Contains(msg, "--insecure-skip-verify") { + t.Errorf("expected hint about --insecure-skip-verify, got: %q", msg) + } + if strings.Contains(msg, "check network connectivity") { + t.Errorf("should not suggest network connectivity for TLS cert error, got: %q", msg) + } +} + // TestIMP_CLI_013_NonHTTPSEndpointRejected verifies that a non-https endpoint // is rejected before any network call is made (IMP-CLI-013). func TestIMP_CLI_013_NonHTTPSEndpointRejected(t *testing.T) { diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md index 38e2091fd6f83..cf98bd8d70c3e 100644 --- a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -63,6 +63,9 @@ Define the importer interface so it can be implemented and tested predictably. - using selected auth mode, - success only on HTTP 200. - **IMP-CLI-016**: HTTP 401/403 at preflight MUST fail-fast with remediation message. +- **IMP-CLI-016a**: TLS certificate verification failures at preflight MUST hint at + `--ca-cert-file` and `--insecure-skip-verify` (not generic network connectivity), + since the most common cause is a self-signed or internal CA certificate. ## Output contract From 7a9b5c7345a4ab1315e5a8af0d27124442201fae Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 09:55:27 +0100 Subject: [PATCH 09/24] docs(co-importer): mark Slice I done, add Slice J (container image) to backlog Update traceability matrix with IMP-IMG-001..006 from the container image spec. Add Slice J definition to the implementation backlog. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../specs/05-traceability-matrix.md | 1 + .../specs/06-implementation-backlog.md | 24 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md index 32049923197c0..b6c58f4f3fa77 100644 --- a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -9,6 +9,7 @@ Use this matrix to ensure complete implementation coverage. |IMP-IDEM-001..009|`03-idempotency-dry-run-retries.feature`|Unit + integration|Idempotency, overwrite mode (PUT), dry-run reporting| |IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| |IMP-ACC-001..017|`04-validation-and-acceptance.md`|Acceptance|Real cluster, ACS verification, multi-cluster merge, auto-discovery| +|IMP-IMG-001..006|`07-container-image.md`|Build + smoke|Dockerfile, static binary, multi-arch manifest, image size| ## Coverage rule diff --git a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md index e383bf849c2af..2272d0f6f9ebb 100644 --- a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md +++ b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md @@ -276,6 +276,27 @@ opt-in `--context` filter. ACS cluster ID is always auto-discovered. - "Implement Slice I: drop --kubeconfig, --kubecontext, --cluster. Default to all contexts from merged kubeconfig. Add --context (repeatable) as opt-in filter. Always auto-discover ACS cluster ID. Simplify BuildClusterSources and main.go accordingly." +## Slice J - Container image packaging + +### J Goal + +Package the importer as a minimal, multi-arch container image for distribution. + +### J Requirement IDs + +- `IMP-IMG-001..005` + +### J Implementation targets + +- `scripts/compliance-operator-importer/Dockerfile` +- `scripts/compliance-operator-importer/Makefile` (image, image-multiarch, image-push targets) +- `scripts/compliance-operator-importer/.dockerignore` + +### J Acceptance signal + +- `make image` builds and `docker run --rm $IMAGE --help` prints usage. +- `make image-push` builds per-arch images and creates manifest list. + ## Cross-slice conventions - Requirement IDs must appear in test names or comments. @@ -296,7 +317,8 @@ opt-in `--context` filter. ACS cluster ID is always auto-discovered. 6. Slice F (overwrite-existing / PUT support) -- DONE 7. Slice G (acceptance automation) -- DONE 8. Slice H (UX conventions) -- DONE -9. Slice I (simplify cluster access model) +9. Slice I (simplify cluster access model) -- DONE +10. Slice J (container image packaging) Slices E and F are independent and can be implemented in parallel. One agent per slice is ideal; if sequential, complete one slice fully before next. From 34fad871a214b241b734029a6a49f561fc6140ec Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 09:55:33 +0100 Subject: [PATCH 10/24] docs(co-importer): rewrite README with usage, flags, and mapping reference Replace the spec-process-only README with a practical user guide covering quick start, authentication, multi-cluster usage, flags reference, mapping rules, exit codes, JSON report shape, and demo instructions. Also ignore the built binary in .gitignore. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../compliance-operator-importer/.gitignore | 1 + .../compliance-operator-importer/README.md | 164 ++++++++++++++++-- 2 files changed, 146 insertions(+), 19 deletions(-) diff --git a/scripts/compliance-operator-importer/.gitignore b/scripts/compliance-operator-importer/.gitignore index e660fd93d3196..4deffd5466199 100644 --- a/scripts/compliance-operator-importer/.gitignore +++ b/scripts/compliance-operator-importer/.gitignore @@ -1 +1,2 @@ bin/ +/compliance-operator-importer diff --git a/scripts/compliance-operator-importer/README.md b/scripts/compliance-operator-importer/README.md index 7860b08d75525..071691fba9cc3 100644 --- a/scripts/compliance-operator-importer/README.md +++ b/scripts/compliance-operator-importer/README.md @@ -1,26 +1,152 @@ -# Compliance Operator -> ACS Importer (Spec Set) +# CO → ACS Scheduled Scan Importer -This directory contains **specifications only** for a standalone importer that reads existing Compliance Operator resources and creates equivalent ACS compliance scan configurations via ACS API. +Reads Compliance Operator `ScanSettingBinding` resources from one or more +Kubernetes clusters and creates equivalent scan configurations in Red Hat +Advanced Cluster Security (ACS) via the v2 API. -No runtime changes to Sensor/Central are in scope for this work item. -Phase 1 mode is **create-only** (no ACS updates). +## Quick start -## Spec-driven workflow +```bash +# Build +go build -o compliance-operator-importer ./cmd/importer -Implement in this order: +# Dry run (preview, no changes) +ROX_API_TOKEN= ./compliance-operator-importer \ + --endpoint central.example.com \ + --dry-run -1. Read `DECISIONS.md` (frozen v1 scope and non-goals). -2. Read `specs/00-spec-process.md` (process and quality gates). -3. Use `specs/06-implementation-backlog.md` to execute slice-by-slice. -4. Implement CLI contract from `specs/01-cli-and-config-contract.md`. -5. Implement behavior scenarios in: - - `specs/02-co-to-acs-mapping.feature` - - `specs/03-idempotency-dry-run-retries.feature` -6. Validate with `specs/04-validation-and-acceptance.md`. +# Import for real +ROX_API_TOKEN= ./compliance-operator-importer \ + --endpoint central.example.com +``` -Definition of done: +## Authentication -- every MUST statement in spec docs is implemented, -- every `Scenario` in `.feature` files has an automated test, -- resource-level issues are skipped and captured in `problems[]` with fix hints, -- acceptance commands in `specs/04-validation-and-acceptance.md` pass on a real cluster. +Auth mode is auto-inferred from environment variables: + +| Variable | Mode | Typical use | +|----------|------|-------------| +| `ROX_API_TOKEN` | API token (Bearer) | Production | +| `ROX_ADMIN_PASSWORD` | Basic auth | Development/testing | + +Setting both is an error. Setting neither is an error. + +For basic auth the username defaults to `admin`; override with `--username` +or `ROX_ADMIN_USER`. + +## Multi-cluster + +By default all contexts in the merged kubeconfig are processed. Merge +multiple kubeconfig files via the standard `KUBECONFIG` variable: + +```bash +KUBECONFIG=cluster-a.yaml:cluster-b.yaml ./compliance-operator-importer --endpoint central.example.com +``` + +Use `--context` (repeatable) to limit processing to specific contexts: + +```bash +./compliance-operator-importer --endpoint central.example.com \ + --context prod-east \ + --context prod-west +``` + +When the same `ScanSettingBinding` name appears across multiple clusters, +the importer merges them into a single ACS scan configuration targeting all +matched clusters (profiles and schedules must match). + +## Cluster ID auto-discovery + +The ACS cluster ID for each context is auto-discovered using the first +successful method: + +1. `admission-control` ConfigMap → `cluster-id` key (namespace: `stackrox`) +2. OpenShift `ClusterVersion` `spec.clusterID` → matched against ACS provider metadata +3. `helm-effective-cluster-name` Secret → matched against ACS cluster name + +## Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `--endpoint` | `ROX_ENDPOINT` | ACS Central URL (bare hostnames get `https://` prepended) | +| `--username` | `admin` | Basic auth username (`ROX_ADMIN_USER`) | +| `--context` | all | Kubeconfig context to process (repeatable) | +| `--co-namespace` | `openshift-compliance` | Namespace for CO resources | +| `--co-all-namespaces` | `false` | Read CO resources from all namespaces | +| `--dry-run` | `false` | Preview actions without changes | +| `--overwrite-existing` | `false` | Update existing ACS configs instead of skipping | +| `--report-json` | — | Write structured JSON report to file | +| `--max-retries` | `5` | Retry attempts for transient API errors (429, 502–504) | +| `--request-timeout` | `30s` | Per-request HTTP timeout | +| `--ca-cert-file` | — | PEM CA bundle for TLS | +| `--insecure-skip-verify` | `false` | Skip TLS verification | + +## Behaviour + +- **Create-only (default):** existing ACS scan configs with the same name + are skipped with a warning. +- **Overwrite mode** (`--overwrite-existing`): existing configs are updated + via PUT to match the cluster SSBs. +- **Idempotent:** re-running produces the same result; no duplicates. +- **Dry run:** all discovery and mapping runs normally; no POST/PUT issued. + +## Exit codes + +| Code | Meaning | +|------|---------| +| `0` | All bindings processed (or nothing to do) | +| `1` | Fatal error (config, auth, connectivity) | +| `2` | Partial success (some bindings failed; see report) | + +## Mapping rules + +Each `ScanSettingBinding` maps to one ACS scan configuration: + +| ACS field | Source | +|-----------|--------| +| `scanName` | `ScanSettingBinding.metadata.name` | +| `profiles` | Sorted, deduplicated profile names from the binding | +| `scanSchedule` | Converted from the referenced `ScanSetting.schedule` (cron) | +| `clusters` | Auto-discovered ACS cluster ID(s) | +| `description` | `"Imported from CO ScanSettingBinding / (ScanSetting: )"` | + +Supported cron patterns: daily (`M H * * *`), weekly (`M H * * DOW`), +monthly (`M H DOM * *`). Step and range notation are not supported. + +## JSON report + +When `--report-json` is set, a structured report is written: + +```json +{ + "meta": { "timestamp": "...", "dryRun": false, "mode": "create-only" }, + "counts": { "discovered": 3, "create": 2, "update": 0, "skip": 1, "failed": 0 }, + "items": [ { "source": {...}, "action": "create", ... } ], + "problems": [] +} +``` + +## Demo / testing + +Seed demo fixtures (2 ACS scans + 3 SSBs, 1 conflicting): + +```bash +ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./hack/demo-seed.sh up +./hack/demo-seed.sh status +./hack/demo-seed.sh down +``` + +Interactive walkthrough: + +```bash +ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./hack/demo.sh +``` + +## Development + +Specs live in `specs/` and are the source of truth. Tests reference spec IDs +(`IMP-*`). Run tests: + +```bash +go test ./... +``` From 11e593233b1614c14994d574ab96e0802da37f43 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 10:03:34 +0100 Subject: [PATCH 11/24] fix(co-importer): show per-method errors when cluster ID discovery fails When all three discovery methods fail (admission-control ConfigMap, OpenShift ClusterVersion, helm-effective-cluster-name Secret), the error now lists each method's failure reason instead of a generic "all methods failed" message. This makes it immediately clear whether the issue is auth (e.g. expired kubeconfig credentials), missing resources, or something else. Before: all discovery methods failed to resolve ACS cluster ID After: all discovery methods failed to resolve ACS cluster ID: - admission-control ConfigMap: ... Unauthorized - OpenShift ClusterVersion: ... Unauthorized - helm-effective-cluster-name Secret: ... Unauthorized Adds spec scenario IMP-MAP-016a for the detailed error contract. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/discover/discover.go | 45 ++++++++++++++----- .../specs/02-co-to-acs-mapping.feature | 9 ++++ 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/discover/discover.go b/scripts/compliance-operator-importer/internal/discover/discover.go index 1537c8157d11e..777149e4582bb 100644 --- a/scripts/compliance-operator-importer/internal/discover/discover.go +++ b/scripts/compliance-operator-importer/internal/discover/discover.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "errors" "fmt" + "strings" "github.com/stackrox/co-acs-importer/internal/models" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -113,38 +114,58 @@ func DiscoverClusterID( k8s k8sResourceReader, acs models.ACSClient, ) (string, error) { + var errs []error + // IMP-MAP-016: admission-control ConfigMap. - if clusterID, err := k8s.getAdmissionControlClusterID(ctx); err == nil { + clusterID, err := k8s.getAdmissionControlClusterID(ctx) + if err == nil { return clusterID, nil } + errs = append(errs, fmt.Errorf("admission-control ConfigMap: %w", err)) // IMP-MAP-017: OpenShift ClusterVersion. - if ocpClusterID, err := k8s.getOpenShiftClusterID(ctx); err == nil { - clusters, err := acs.ListClusters(ctx) - if err != nil { - return "", fmt.Errorf("list ACS clusters for OpenShift ID match: %w", err) + ocpClusterID, err := k8s.getOpenShiftClusterID(ctx) + if err == nil { + clusters, listErr := acs.ListClusters(ctx) + if listErr != nil { + return "", fmt.Errorf("list ACS clusters for OpenShift ID match: %w", listErr) } for _, c := range clusters { if c.ProviderClusterID == ocpClusterID { return c.ID, nil } } - return "", fmt.Errorf("OpenShift cluster ID %q not found in ACS clusters", ocpClusterID) + errs = append(errs, fmt.Errorf("OpenShift cluster ID %q not found in ACS clusters", ocpClusterID)) + } else { + errs = append(errs, fmt.Errorf("OpenShift ClusterVersion: %w", err)) } // IMP-MAP-018: helm-effective-cluster-name secret. - if clusterName, err := k8s.getHelmSecretClusterName(ctx); err == nil { - clusters, err := acs.ListClusters(ctx) - if err != nil { - return "", fmt.Errorf("list ACS clusters for helm cluster name match: %w", err) + clusterName, err := k8s.getHelmSecretClusterName(ctx) + if err == nil { + clusters, listErr := acs.ListClusters(ctx) + if listErr != nil { + return "", fmt.Errorf("list ACS clusters for helm cluster name match: %w", listErr) } for _, c := range clusters { if c.Name == clusterName { return c.ID, nil } } - return "", fmt.Errorf("helm cluster name %q not found in ACS clusters", clusterName) + errs = append(errs, fmt.Errorf("helm cluster name %q not found in ACS clusters", clusterName)) + } else { + errs = append(errs, fmt.Errorf("helm-effective-cluster-name Secret: %w", err)) } - return "", errors.New("all discovery methods failed to resolve ACS cluster ID") + return "", fmt.Errorf("all discovery methods failed to resolve ACS cluster ID:\n - %s", + joinErrors(errs)) +} + +// joinErrors formats a slice of errors as a newline+bullet list. +func joinErrors(errs []error) string { + parts := make([]string, len(errs)) + for i, e := range errs { + parts[i] = e.Error() + } + return strings.Join(parts, "\n - ") } diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature index e68b392b5728f..a98a270c60cea 100644 --- a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -72,6 +72,15 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat When the importer resolves the ACS cluster ID for "ctx-c" Then the resolved ACS cluster ID MUST be "acs-uuid-c" # IMP-MAP-018 + @mapping @clusters + Scenario: All discovery methods fail with detailed per-method errors + Given kubecontext "ctx-d" points to a cluster + And ConfigMap "admission-control" is not readable (returns "Unauthorized") + And ClusterVersion is not available (returns "Unauthorized") + And Secret "helm-effective-cluster-name" is not readable (returns "Unauthorized") + When the importer resolves the ACS cluster ID for "ctx-d" + Then the error MUST list each method's failure reason # IMP-MAP-016a + @mapping @clusters @multicluster Scenario: Merge SSBs with same name across clusters Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" From 2d4a60e6aac886b19a0cdd9c9c2bd1cde1e13553 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 10:19:50 +0100 Subject: [PATCH 12/24] feat(co-importer): load kubeconfig files independently to avoid credential collisions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When KUBECONFIG lists multiple files (e.g. config:config-secured-cluster), kubectl merges them into a single view. If both files define a user named "admin" with different certificates, the merge silently picks one and the other cluster gets wrong credentials — causing confusing auth failures. The importer now loads each kubeconfig file in isolation via ExplicitPath, so user/cluster/context entries in one file never interfere with another. Duplicate context names across files are allowed and both are processed with their own credentials. Changes: - cluster_source.go: split KUBECONFIG into individual files, load each independently via clientcmd.ExplicitPath, enumerate contexts per file - cofetch/client.go: add NewClientFromRestConfig to accept a pre-built rest.Config (avoids re-loading merged kubeconfig) - spec IMP-CLI-003: updated to specify per-file isolation semantics - cluster_source_test.go: new tests for per-file loading, credential isolation, duplicate context handling, and --context filtering Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/cofetch/client.go | 10 +- .../internal/run/cluster_source.go | 144 ++++++++++---- .../internal/run/cluster_source_test.go | 183 ++++++++++++++++++ .../specs/01-cli-and-config-contract.md | 17 +- 4 files changed, 306 insertions(+), 48 deletions(-) create mode 100644 scripts/compliance-operator-importer/internal/run/cluster_source_test.go diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go index f3e3d96f07337..eed2160c15ce1 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/client.go +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -9,6 +9,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -71,9 +72,16 @@ func NewClientForContext(contextName string, namespace string, allNamespaces boo return nil, fmt.Errorf("build kubeconfig for context %q: %w", contextName, err) } + return NewClientFromRestConfig(restConfig, namespace, allNamespaces) +} + +// NewClientFromRestConfig creates a COClient from an existing rest.Config. +// This avoids kubeconfig merging, preventing credential collisions when +// multiple kubeconfig files define the same user name. +func NewClientFromRestConfig(restConfig *rest.Config, namespace string, allNamespaces bool) (COClient, error) { dynClient, err := dynamic.NewForConfig(restConfig) if err != nil { - return nil, fmt.Errorf("create dynamic client for context %q: %w", contextName, err) + return nil, fmt.Errorf("create dynamic client: %w", err) } ns := namespace diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source.go b/scripts/compliance-operator-importer/internal/run/cluster_source.go index a4cd817b716b2..1796aaca501a1 100644 --- a/scripts/compliance-operator-importer/internal/run/cluster_source.go +++ b/scripts/compliance-operator-importer/internal/run/cluster_source.go @@ -4,11 +4,15 @@ import ( "context" "errors" "fmt" + "os" + "path/filepath" + "strings" "github.com/stackrox/co-acs-importer/internal/cofetch" "github.com/stackrox/co-acs-importer/internal/discover" "github.com/stackrox/co-acs-importer/internal/models" "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -19,42 +23,53 @@ type ClusterSource struct { ACSClusterID string } -// BuildClusterSources creates ClusterSource entries by iterating all contexts -// in the merged kubeconfig. If cfg.Contexts is non-empty, only those contexts -// are used. +// contextRef pairs a kubeconfig file with one of its contexts. +type contextRef struct { + Context string + KubeconfigFile string +} + +// BuildClusterSources creates ClusterSource entries by loading each kubeconfig +// file independently (no merging). If cfg.Contexts is non-empty, only matching +// contexts are used. func BuildClusterSources(ctx context.Context, cfg *models.Config, acsClient models.ACSClient) ([]ClusterSource, error) { - allContexts, err := listAllContexts() + allRefs, err := listContextRefs() if err != nil { return nil, err } - contexts := allContexts + refs := allRefs if len(cfg.Contexts) > 0 { - contexts = filterContexts(allContexts, cfg.Contexts) - if len(contexts) == 0 { - return nil, fmt.Errorf("none of the requested --context values match available contexts %v", allContexts) + refs = filterRefs(allRefs, cfg.Contexts) + if len(refs) == 0 { + return nil, fmt.Errorf("none of the requested --context values match available contexts %v", contextNames(allRefs)) } } var sources []ClusterSource - for _, contextName := range contexts { - coClient, err := cofetch.NewClientForContext(contextName, cfg.CONamespace, cfg.COAllNamespaces) + for _, ref := range refs { + restCfg, err := restConfigForRef(ref) if err != nil { - return nil, fmt.Errorf("create CO client for context %q: %w", contextName, err) + return nil, fmt.Errorf("build rest config for context %q: %w", ref.Context, err) } - dynClient, err := buildDynamicClientForContext(contextName) + coClient, err := cofetch.NewClientFromRestConfig(restCfg, cfg.CONamespace, cfg.COAllNamespaces) if err != nil { - return nil, fmt.Errorf("build dynamic client for context %q: %w", contextName, err) + return nil, fmt.Errorf("create CO client for context %q: %w", ref.Context, err) + } + + dynClient, err := dynamic.NewForConfig(restCfg) + if err != nil { + return nil, fmt.Errorf("build dynamic client for context %q: %w", ref.Context, err) } acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient) if err != nil { - return nil, fmt.Errorf("discover cluster ID for context %q: %w", contextName, err) + return nil, fmt.Errorf("discover cluster ID for context %q: %w", ref.Context, err) } sources = append(sources, ClusterSource{ - Label: contextName, + Label: ref.Context, COClient: coClient, ACSClusterID: acsClusterID, }) @@ -66,47 +81,92 @@ func BuildClusterSources(ctx context.Context, cfg *models.Config, acsClient mode return sources, nil } -// filterContexts returns only contexts whose names appear in the wanted set. -func filterContexts(all []string, wanted []string) []string { +// filterRefs returns refs whose context name appears in the wanted set. +func filterRefs(all []contextRef, wanted []string) []contextRef { set := make(map[string]bool, len(wanted)) for _, w := range wanted { set[w] = true } - var result []string - for _, c := range all { - if set[c] { - result = append(result, c) + var result []contextRef + for _, r := range all { + if set[r.Context] { + result = append(result, r) } } return result } -// listAllContexts returns all context names from the merged kubeconfig. -func listAllContexts() ([]string, error) { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - config, err := loadingRules.Load() - if err != nil { - return nil, fmt.Errorf("load kubeconfig: %w", err) +func contextNames(refs []contextRef) []string { + names := make([]string, len(refs)) + for i, r := range refs { + names[i] = r.Context + } + return names +} + +// listContextRefs enumerates contexts from each kubeconfig file independently. +// Each file is loaded in isolation so that user/cluster entries with the same +// name in different files don't collide. +func listContextRefs() ([]contextRef, error) { + files := kubeconfigFiles() + if len(files) == 0 { + return nil, errors.New("no kubeconfig files found (check KUBECONFIG or ~/.kube/config)") } - var contexts []string - for name := range config.Contexts { - contexts = append(contexts, name) + var refs []contextRef + for _, file := range files { + cfg, err := clientcmd.LoadFromFile(file) + if err != nil { + return nil, fmt.Errorf("load kubeconfig %q: %w", file, err) + } + for ctxName := range cfg.Contexts { + refs = append(refs, contextRef{Context: ctxName, KubeconfigFile: file}) + } } - if len(contexts) == 0 { - return nil, errors.New("no contexts found in kubeconfig") + + if len(refs) == 0 { + return nil, errors.New("no contexts found in kubeconfig files") } - return contexts, nil + return refs, nil } -// buildDynamicClientForContext creates a dynamic k8s client for the given context. -func buildDynamicClientForContext(contextName string) (dynamic.Interface, error) { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - overrides := &clientcmd.ConfigOverrides{CurrentContext: contextName} - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) - restConfig, err := kubeConfig.ClientConfig() - if err != nil { - return nil, fmt.Errorf("build rest config: %w", err) +// kubeconfigFiles returns the list of kubeconfig file paths from the KUBECONFIG +// env var, or falls back to ~/.kube/config. +func kubeconfigFiles() []string { + env := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if env == "" { + home, err := os.UserHomeDir() + if err != nil { + return nil + } + defaultPath := filepath.Join(home, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName) + if _, err := os.Stat(defaultPath); err == nil { + return []string{defaultPath} + } + return nil } - return dynamic.NewForConfig(restConfig) + + parts := filepath.SplitList(env) + var files []string + for _, p := range parts { + p = strings.TrimSpace(p) + if p == "" { + continue + } + if _, err := os.Stat(p); err == nil { + files = append(files, p) + } + } + return files +} + +// restConfigForRef builds a rest.Config from a specific kubeconfig file and context, +// without merging with other kubeconfig files. +func restConfigForRef(ref contextRef) (*rest.Config, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{ + ExplicitPath: ref.KubeconfigFile, + } + overrides := &clientcmd.ConfigOverrides{CurrentContext: ref.Context} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + return kubeConfig.ClientConfig() } diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source_test.go b/scripts/compliance-operator-importer/internal/run/cluster_source_test.go new file mode 100644 index 0000000000000..4d591d7fad107 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/cluster_source_test.go @@ -0,0 +1,183 @@ +package run + +import ( + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/tools/clientcmd" +) + +// writeMinimalKubeconfig writes a kubeconfig with the given contexts to a file. +// Each context gets a unique cluster and user entry within the file. +func writeMinimalKubeconfig(t *testing.T, dir, filename string, contextNames []string) string { + t.Helper() + + var clusters, contexts, users string + for _, name := range contextNames { + clusterName := "cluster-" + name + userName := "user-" + name + clusters += ` +- cluster: + server: https://` + name + `.example.com:6443 + name: ` + clusterName + contexts += ` +- context: + cluster: ` + clusterName + ` + user: ` + userName + ` + name: ` + name + users += ` +- name: ` + userName + ` + user: + token: token-` + filename + `-` + name + } + + content := `apiVersion: v1 +kind: Config +clusters:` + clusters + ` +contexts:` + contexts + ` +current-context: ` + contextNames[0] + ` +users:` + users + ` +` + path := filepath.Join(dir, filename) + if err := os.WriteFile(path, []byte(content), 0600); err != nil { + t.Fatalf("write kubeconfig %s: %v", path, err) + } + return path +} + +// TestIMP_CLI_003_SingleFileAllContexts verifies that all contexts from a +// single kubeconfig file are discovered. +func TestIMP_CLI_003_SingleFileAllContexts(t *testing.T) { + dir := t.TempDir() + path := writeMinimalKubeconfig(t, dir, "config", []string{"ctx-a", "ctx-b"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs, got %d", len(refs)) + } + names := contextNames(refs) + for _, want := range []string{"ctx-a", "ctx-b"} { + found := false + for _, n := range names { + if n == want { + found = true + } + } + if !found { + t.Errorf("expected context %q in %v", want, names) + } + } +} + +// TestIMP_CLI_003_MultiFileUniqueContexts verifies that contexts from multiple +// kubeconfig files are all discovered when names are unique. +func TestIMP_CLI_003_MultiFileUniqueContexts(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config-a", []string{"ctx-a"}) + path2 := writeMinimalKubeconfig(t, dir, "config-b", []string{"ctx-b"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs, got %d", len(refs)) + } +} + +// TestIMP_CLI_003_MultiFileDuplicateContextsBothProcessed verifies that when +// the same context name appears in multiple files, both are returned. +func TestIMP_CLI_003_MultiFileDuplicateContextsBothProcessed(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin"}) + path2 := writeMinimalKubeconfig(t, dir, "config-secured-cluster", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs (one per file), got %d", len(refs)) + } + if refs[0].KubeconfigFile == refs[1].KubeconfigFile { + t.Error("expected refs from different files") + } +} + +// TestIMP_CLI_003_PerFileIsolation verifies that each file is loaded +// independently: a user named "user-admin" in file A gets its own credentials, +// not file B's. +func TestIMP_CLI_003_PerFileIsolation(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin"}) + path2 := writeMinimalKubeconfig(t, dir, "config-cluster-2", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + + // Build rest.Config for each ref and verify they use their own file's token. + for _, ref := range refs { + cfg, err := restConfigForRef(ref) + if err != nil { + t.Fatalf("restConfigForRef(%s from %s): %v", ref.Context, ref.KubeconfigFile, err) + } + expectedToken := "token-" + filepath.Base(ref.KubeconfigFile) + "-admin" + if cfg.BearerToken != expectedToken { + t.Errorf("ref from %s: expected token %q, got %q (credential isolation broken)", + filepath.Base(ref.KubeconfigFile), expectedToken, cfg.BearerToken) + } + } +} + +// TestIMP_CLI_003_FilterByContextName verifies that --context filtering matches +// context names across all files. +func TestIMP_CLI_003_FilterByContextName(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin", "staging"}) + path2 := writeMinimalKubeconfig(t, dir, "config-cluster-2", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + + // Filter by "admin" — should match both files. + filtered := filterRefs(refs, []string{"admin"}) + if len(filtered) != 2 { + t.Errorf("filter by 'admin': expected 2 matches, got %d", len(filtered)) + } + + // Filter by "staging" — should match one. + filtered = filterRefs(refs, []string{"staging"}) + if len(filtered) != 1 { + t.Errorf("filter by 'staging': expected 1 match, got %d", len(filtered)) + } + + // Filter by nonexistent — should match none. + filtered = filterRefs(refs, []string{"nonexistent"}) + if len(filtered) != 0 { + t.Errorf("filter by 'nonexistent': expected 0 matches, got %d", len(filtered)) + } +} + +// TestIMP_CLI_003_NoKubeconfigFiles verifies clear error when no files exist. +func TestIMP_CLI_003_NoKubeconfigFiles(t *testing.T) { + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, "/nonexistent/path") + t.Setenv("HOME", t.TempDir()) + + _, err := listContextRefs() + if err == nil { + t.Fatal("expected error when no kubeconfig files exist") + } +} diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md index cf98bd8d70c3e..d23626d9b83e8 100644 --- a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -17,13 +17,20 @@ Define the importer interface so it can be implemented and tested predictably. - basic mode: when `ROX_ADMIN_PASSWORD` is set, - if both are set: error ("ambiguous auth"), - if neither is set: error with help text listing both options. -- **IMP-CLI-003**: importer MUST use all contexts from the merged kubeconfig: - - kubeconfig loading follows standard kubectl rules: `KUBECONFIG` env var (colon-separated +- **IMP-CLI-003**: importer MUST load each kubeconfig file independently (no merging): + - file discovery follows standard kubectl rules: `KUBECONFIG` env var (colon-separated paths) or `~/.kube/config`. - - by default, the importer iterates **all contexts** in the merged kubeconfig, treating + - each file in the `KUBECONFIG` path is loaded in isolation. Contexts, users, and + clusters defined in one file never interact with entries in another file. This + prevents credential collisions when multiple files define the same user name + (e.g. `admin`) with different certificates. + - by default, the importer iterates **all contexts** across all files, treating each context as a separate source cluster. - - `--context ` (repeatable, optional): filters which contexts to use. When given, - only the named contexts are processed; all others are skipped. + - when the same context name appears in multiple files, both are processed + independently with their own credentials. + - `--context ` (repeatable, optional): filters which contexts to use. Matches + against context names across all files. When given, only matching contexts are + processed; all others are skipped. - for each context, the ACS cluster ID is auto-discovered (see IMP-MAP-016..018). - **IMP-CLI-004**: importer MUST support namespace scope: - `--co-namespace ` (default `openshift-compliance`) for single namespace, or From 35d45f4d21ba3483fc1b0bd04bf113f93cde5e65 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:12:10 +0100 Subject: [PATCH 13/24] fix(co-importer): print merge conflict warnings to console (IMP-MAP-020a) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Merge conflict problems (e.g. same-name SSBs with different profiles across clusters) were collected into the report but never printed to the console, leaving users with a cryptic "merged into 0" message and no explanation. Add r.status.Warnf() for each merge problem so the conflict reason is visible inline. Also includes RunMultiCluster orchestrator (multi_cluster.go) and updates specs: - Fix merge conflict scenarios: category "mapping" → "conflict" (matches code and models.CategoryConflict) - Add IMP-MAP-020a console output requirement - Add adopt and wire-format spec scenarios - Update traceability matrix with IMP-MAP-020a Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/run/multi_cluster.go | 228 ++++++++++++++++++ .../specs/02-co-to-acs-mapping.feature | 78 +++++- .../specs/05-traceability-matrix.md | 2 +- 3 files changed, 303 insertions(+), 5 deletions(-) create mode 100644 scripts/compliance-operator-importer/internal/run/multi_cluster.go diff --git a/scripts/compliance-operator-importer/internal/run/multi_cluster.go b/scripts/compliance-operator-importer/internal/run/multi_cluster.go new file mode 100644 index 0000000000000..8c5e6b8e6a099 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/multi_cluster.go @@ -0,0 +1,228 @@ +package run + +import ( + "context" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/adopt" + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/mapping" + "github.com/stackrox/co-acs-importer/internal/merge" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" + "github.com/stackrox/co-acs-importer/internal/reconcile" + "github.com/stackrox/co-acs-importer/internal/report" +) + +// RunMultiCluster executes the importer in multi-cluster mode. +// +// Steps: +// 1. List existing ACS scan configs to build the existingNames set. +// 2. For each cluster source: +// a. List ScanSettingBindings. +// b. Map each SSB to an ACS payload, using the cluster's ACS ID. +// 3. Merge SSBs across clusters by name. +// 4. Reconcile merged payloads against ACS. +// 5. Build and write report. +// 6. Print console summary. +// 7. Return exit code. +func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) int { + collector := problems.NewCollector() + builder := report.NewBuilder(r.cfg) + + // Step 1: list existing ACS scan configs. + r.status.Stage("Inventory", "listing existing ACS scan configurations") + summaries, err := r.acsClient.ListScanConfigurations(ctx) + if err != nil { + r.status.Failf("failed to list ACS scan configurations: %v", err) + return ExitFatalError + } + existingNames := make(map[string]string, len(summaries)) + for _, s := range summaries { + existingNames[s.ScanName] = s.ID + } + r.status.OKf("found %d existing scan configurations", len(summaries)) + + // ssbClusterInfo tracks per-SSB per-cluster metadata needed for adoption. + type ssbClusterInfo struct { + namespace string + oldSettingRef string + clusterLabel string + coClient cofetch.COClient + } + // Key: SSB name, value: list of cluster infos (one per cluster that has the SSB). + ssbAdoptionMap := make(map[string][]ssbClusterInfo) + + // Step 2: collect SSBs from all clusters and map them. + clusterSSBs := make(map[string][]merge.MappedSSB) + + for _, source := range sources { + r.status.Stagef("Scan", "cluster %s (ACS ID: %s)", source.Label, source.ACSClusterID) + + bindings, err := source.COClient.ListScanSettingBindings(ctx) + if err != nil { + r.status.Warnf("failed to list ScanSettingBindings from %s: %v", source.Label, err) + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: source.Label, + Description: fmt.Sprintf("Failed to list ScanSettingBindings from cluster %q: %v", source.Label, err), + FixHint: "Check cluster connectivity and permissions.", + Skipped: true, + }) + continue + } + + r.status.OKf("found %d ScanSettingBindings", len(bindings)) + + for _, binding := range bindings { + // Fetch the ScanSetting. + ss, err := source.COClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) + if err != nil { + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: fmt.Sprintf("%s:%s/%s", source.Label, binding.Namespace, binding.Name), + Description: fmt.Sprintf("ScanSetting %q referenced by binding %q in cluster %q could not be fetched: %v", binding.ScanSettingName, binding.Name, source.Label, err), + FixHint: fmt.Sprintf("Ensure ScanSetting %q exists in namespace %q on cluster %q.", binding.ScanSettingName, binding.Namespace, source.Label), + Skipped: true, + }) + continue + } + + // Map the binding to an ACS payload. + // Create a temporary config with the cluster ID for this source. + tempCfg := *r.cfg + tempCfg.ACSClusterID = source.ACSClusterID + + result := mapping.MapBinding(binding, ss, &tempCfg) + if result.Problem != nil { + collector.Add(*result.Problem) + continue + } + + // Track metadata for adoption. + ssbAdoptionMap[binding.Name] = append(ssbAdoptionMap[binding.Name], ssbClusterInfo{ + namespace: binding.Namespace, + oldSettingRef: binding.ScanSettingName, + clusterLabel: source.Label, + coClient: source.COClient, + }) + + // Add to the cluster's SSB list for merging. + clusterSSBs[source.ACSClusterID] = append(clusterSSBs[source.ACSClusterID], merge.MappedSSB{ + Name: binding.Name, + Profiles: extractProfileNames(binding), + Payload: *result.Payload, + }) + } + } + + // Step 3: merge SSBs across clusters. + r.status.Stage("Merge", "combining ScanSettingBindings across clusters") + mergeResult := merge.MergeSSBs(clusterSSBs) + + for _, problem := range mergeResult.Problems { + collector.Add(problem) + r.status.Warnf("%s: %s", problem.ResourceRef, problem.Description) + } + + r.status.OKf("merged into %d unique scan configurations", len(mergeResult.Merged)) + + // Step 4: reconcile merged payloads. + r.status.Stage("Reconcile", "applying scan configurations to ACS") + maxRetries := r.cfg.MaxRetries + if maxRetries < 1 { + maxRetries = 1 + } + rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun, r.cfg.OverwriteExisting) + + var adoptRequests []adopt.Request + + for _, merged := range mergeResult.Merged { + source := models.ReportItemSource{ + BindingName: merged.Name, + // For multi-cluster, namespace and scanSettingName are per-cluster, so we leave them generic. + Namespace: "multi-cluster", + ScanSettingName: "merged", + } + + action := rec.Apply(ctx, merged.Payload, source, existingNames) + + switch action.ActionType { + case "create": + r.status.OKf("%s → created (%d clusters)", merged.Name, len(merged.Payload.Clusters)) + case "update": + r.status.OKf("%s → updated (%d clusters)", merged.Name, len(merged.Payload.Clusters)) + case "skip": + r.status.Detailf("%s → skipped (already exists)", merged.Name) + case "fail": + r.status.Failf("%s → %s", merged.Name, action.Reason) + } + + item := models.ReportItem{ + Source: action.Source, + Action: action.ActionType, + Reason: action.Reason, + Attempts: action.Attempts, + ACSScanConfigID: action.ACSScanConfigID, + } + if action.Err != nil { + item.Error = action.Err.Error() + } + builder.RecordItem(item) + + if action.Problem != nil { + collector.Add(*action.Problem) + } + + // Collect adoption requests for successfully created scan configs. + if action.ActionType == "create" && !r.cfg.DryRun { + for _, info := range ssbAdoptionMap[merged.Name] { + adoptRequests = append(adoptRequests, adopt.Request{ + SSBName: merged.Name, + SSBNamespace: info.namespace, + OldSettingRef: info.oldSettingRef, + ClusterLabel: info.clusterLabel, + COClient: info.coClient, + }) + } + } + } + + // Step 4b: adopt SSBs whose scan configs were just created. + if len(adoptRequests) > 0 { + r.runAdoption(ctx, adoptRequests) + } + + // Step 5: build and write report. + finalReport := builder.Build(collector.All()) + + if r.cfg.ReportJSON != "" { + r.status.Stage("Report", "writing JSON report") + if err := builder.WriteJSON(r.cfg.ReportJSON, finalReport); err != nil { + r.status.Warnf("failed to write JSON report to %q: %v", r.cfg.ReportJSON, err) + } else { + r.status.OKf("report written to %s", r.cfg.ReportJSON) + } + } + + // Step 6: print console summary. + r.printf("\n") + r.printSummary(finalReport) + + // Step 7: determine exit code. + if finalReport.Counts.Failed > 0 || collector.HasErrors() { + return ExitPartialError + } + return ExitSuccess +} + +// extractProfileNames extracts profile names from a binding. +func extractProfileNames(binding cofetch.ScanSettingBinding) []string { + var names []string + for _, p := range binding.Profiles { + names = append(names, p.Name) + } + return names +} diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature index a98a270c60cea..5f5b4fef3d574 100644 --- a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -39,6 +39,25 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat Then payload.scanConfig.oneTimeScan MUST be false # IMP-MAP-003 And payload.scanConfig.scanSchedule MUST be present # IMP-MAP-004 + @mapping @schedule @wire-format + Scenario Outline: Schedule JSON wire format matches ACS API proto + Given ScanSetting "sched" has schedule "" + And ScanSettingBinding "binding" references "sched" + When the importer builds the ACS payload and serializes it to JSON + Then the JSON scanSchedule object MUST contain only fields defined in proto/api/v2/common.proto Schedule: + intervalType, hour, minute, daysOfWeek, daysOfMonth # IMP-MAP-004a + And the JSON scanSchedule.intervalType MUST be "" + And for WEEKLY: scanSchedule.daysOfWeek.days MUST be present # IMP-MAP-004b + And for MONTHLY: scanSchedule.daysOfMonth.days MUST be present # IMP-MAP-004c + And the full payload JSON field names MUST match ComplianceScanConfiguration proto: + id, scanName, scanConfig, clusters # IMP-MAP-004d + + Examples: + | cron | intervalType | + | 0 2 * * * | DAILY | + | 0 2 * * 0 | WEEKLY | + | 0 2 1 * * | MONTHLY | + @mapping @description Scenario: Build helpful description without ownership marker Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" @@ -100,8 +119,9 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis", "ocp4-moderate"] When the importer merges SSBs across clusters Then "cis-weekly" MUST be marked failed # IMP-MAP-020 - And problems list MUST include category "mapping" - And problem description MUST mention profile mismatch across clusters + And problems list MUST include category "conflict" + And problem description MUST mention mismatch across clusters + And the console MUST print a warning with the conflict reason # IMP-MAP-020a @mapping @clusters @multicluster @error Scenario: Error when same-name SSBs have mismatched schedules @@ -109,8 +129,9 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with schedule "0 3 * * 1" When the importer merges SSBs across clusters Then "cis-weekly" MUST be marked failed # IMP-MAP-020 - And problems list MUST include category "mapping" - And problem description MUST mention schedule mismatch across clusters + And problems list MUST include category "conflict" + And problem description MUST mention mismatch across clusters + And the console MUST print a warning with the conflict reason # IMP-MAP-020a @validation @mapping Scenario: Missing ScanSetting reference fails only that binding @@ -121,6 +142,55 @@ Feature: Map Compliance Operator scheduled scan resources to ACS scan configurat And that problem entry MUST include a fix hint # IMP-MAP-010 And other valid bindings MUST still be processed # IMP-MAP-011 + @mapping @adopt + Scenario: Adopt SSB after successful ACS scan config creation + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "my-old-setting" + And the importer successfully creates ACS scan config "cis-weekly" + And ACS creates ScanSetting "cis-weekly" on the cluster + When the importer runs the adoption step + Then SSB "cis-weekly" settingsRef.name MUST be patched to "cis-weekly" # IMP-ADOPT-001 + And the importer MUST log an info message about the adoption # IMP-ADOPT-002 + + @mapping @adopt + Scenario: Skip adoption when SSB already references the correct ScanSetting + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "cis-weekly" + And the importer successfully creates ACS scan config "cis-weekly" + When the importer runs the adoption step + Then SSB "cis-weekly" settingsRef.name MUST NOT be modified # IMP-ADOPT-003 + + @mapping @adopt @timeout + Scenario: Adoption warns on timeout waiting for ScanSetting + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "my-old-setting" + And the importer successfully creates ACS scan config "cis-weekly" + And ACS has NOT yet created ScanSetting "cis-weekly" on the cluster + When the adoption poll times out + Then the importer MUST log a warning # IMP-ADOPT-004 + And the SSB MUST NOT be modified # IMP-ADOPT-005 + And the importer MUST NOT exit with an error # IMP-ADOPT-006 + + @mapping @adopt @multicluster + Scenario: Adoption patches SSBs independently per cluster + Given kubecontext "ctx-a" has SSB "cis-weekly" referencing ScanSetting "setting-a" + And kubecontext "ctx-b" has SSB "cis-weekly" referencing ScanSetting "setting-b" + And the importer creates one ACS scan config "cis-weekly" for both clusters + And ACS creates ScanSetting "cis-weekly" on both clusters + When the importer runs the adoption step + Then SSB "cis-weekly" on ctx-a MUST be patched to reference "cis-weekly" # IMP-ADOPT-007 + And SSB "cis-weekly" on ctx-b MUST be patched to reference "cis-weekly" # IMP-ADOPT-007 + + @mapping @adopt @multicluster @partial + Scenario: Partial adoption succeeds when one cluster times out + Given kubecontext "ctx-a" has SSB "cis-weekly" referencing ScanSetting "setting-a" + And kubecontext "ctx-b" has SSB "cis-weekly" referencing ScanSetting "setting-b" + And ACS creates ScanSetting "cis-weekly" on ctx-a but NOT on ctx-b + When the importer runs the adoption step + Then SSB "cis-weekly" on ctx-a MUST be patched # IMP-ADOPT-008 + And the importer MUST warn about ctx-b timeout # IMP-ADOPT-008 + And the importer MUST NOT exit with an error # IMP-ADOPT-006 + @mapping @schedule @problems Scenario: Invalid schedule is collected as problem and skipped Given ScanSetting "bad-schedule" has schedule "every day at noon" diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md index b6c58f4f3fa77..9e24a1ca5da18 100644 --- a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -5,7 +5,7 @@ Use this matrix to ensure complete implementation coverage. |Requirement ID|Spec source|Test level|Notes| |---|---|---|---| |IMP-CLI-001..027|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, auth modes, multi-cluster, --overwrite-existing| -|IMP-MAP-001..021|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging| +|IMP-MAP-001..021, IMP-MAP-020a|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging, merge conflict console output| |IMP-IDEM-001..009|`03-idempotency-dry-run-retries.feature`|Unit + integration|Idempotency, overwrite mode (PUT), dry-run reporting| |IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| |IMP-ACC-001..017|`04-validation-and-acceptance.md`|Acceptance|Real cluster, ACS verification, multi-cluster merge, auto-discovery| From fee330178f3d1b7a1246beae2285f2c0e8f9db3c Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:21:55 +0100 Subject: [PATCH 14/24] fix(co-importer): demo conflict scenario uses k8s-side drift not ACS-side MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous demo modified the ACS scan config schedule directly via API, but after SSB adoption ACS pushes its schedule back down to the cluster ScanSetting — so there was no real conflict visible. Changed to simulate drift from the Kubernetes side: update the original ScanSetting schedule (02:00 → 05:00) and patch the SSB's settingsRef back to it. Now the importer reads 05:00 from k8s while ACS has 02:00, showing a genuine drift that --overwrite-existing resolves. Also fixed cleanup to delete ACS-created ScanSettings (named after SSBs). Tested end-to-end in non-interactive mode (DEMO_AUTO=1 DEMO_PAUSE=0). Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../compliance-operator-importer/hack/demo.sh | 498 ++++++++++++++++++ 1 file changed, 498 insertions(+) create mode 100755 scripts/compliance-operator-importer/hack/demo.sh diff --git a/scripts/compliance-operator-importer/hack/demo.sh b/scripts/compliance-operator-importer/hack/demo.sh new file mode 100755 index 0000000000000..546a119997d15 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/demo.sh @@ -0,0 +1,498 @@ +#!/usr/bin/env bash +# demo.sh — Interactive demo of the CO → ACS scheduled scan importer. +# +# Prerequisites: +# - kubectl configured with at least one context pointing to an OCP cluster +# with the Compliance Operator installed +# - ACS Central reachable from this machine +# - ROX_ADMIN_PASSWORD or ROX_API_TOKEN set +# - ROX_ENDPOINT set (or passed via --endpoint) +# - The importer binary built: +# cd scripts/compliance-operator-importer && go build -o compliance-operator-importer ./cmd/importer +# +# Usage: +# ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./demo.sh +# +# Non-interactive mode (for CI/testing): +# DEMO_AUTO=1 ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./demo.sh +# DEMO_AUTO=1 DEMO_PAUSE=0 ... # no pauses at all + +set -euo pipefail + +# ───────────────────────────────────────────────────────────────────────────── +# Configuration +# ───────────────────────────────────────────────────────────────────────────── + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPORTER="${SCRIPT_DIR}/../compliance-operator-importer" +CO_NS="openshift-compliance" + +# Resolve ACS endpoint — strip scheme, the importer adds it back. +ACS_ENDPOINT="${ROX_ENDPOINT:?ROX_ENDPOINT must be set}" +ACS_URL="https://${ACS_ENDPOINT#https://}" +ACS_URL="${ACS_URL#http://}" +ACS_URL="https://${ACS_URL#https://}" + +# Auth for curl calls (basic auth only for this demo). +CURL_AUTH=(-u "admin:${ROX_ADMIN_PASSWORD:?ROX_ADMIN_PASSWORD must be set}") + +# Importer flags. +IMPORTER_FLAGS=(--endpoint "$ACS_ENDPOINT" --insecure-skip-verify) + +# Demo SSB names — prefixed to avoid collisions with real workloads. +DEMO_PREFIX="demo-import" +SSB_CIS="${DEMO_PREFIX}-cis-scan" +SSB_MODERATE="${DEMO_PREFIX}-moderate-scan" +SSB_PCI="${DEMO_PREFIX}-pci-dss-scan" + +# ───────────────────────────────────────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────────────────────────────────────── + +# Terminal colours. +BOLD='\033[1m' +DIM='\033[2m' +CYAN='\033[36m' +GREEN='\033[32m' +YELLOW='\033[33m' +RED='\033[31m' +MAGENTA='\033[35m' +RESET='\033[0m' + +banner() { + local width=72 + echo "" + echo -e "${CYAN}${BOLD}$(printf '═%.0s' $(seq 1 $width))${RESET}" + echo "$1" + echo -e "${CYAN}${BOLD}$(printf '═%.0s' $(seq 1 $width))${RESET}" + echo "" +} + +section() { + echo "" + echo -e "${MAGENTA}${BOLD}── $1 ──${RESET}" + echo "" +} + +info() { + echo -e "${DIM}$1${RESET}" +} + +narrate() { + echo -e "${YELLOW}$1${RESET}" +} + +success() { + echo -e "${GREEN} ✓ $1${RESET}" +} + +fail_msg() { + echo -e "${RED} ✗ $1${RESET}" +} + +pause() { + echo "" + if [[ "${DEMO_AUTO:-}" == "1" ]]; then + sleep "${DEMO_PAUSE:-2}" + else + echo -ne "${DIM}Press ENTER to continue...${RESET}" + read -r + fi + echo "" +} + +run_cmd() { + echo -e "${BOLD}\$ $*${RESET}" + "$@" 2>&1 || true + echo "" +} + +acs_api() { + local method="$1" path="$2" + shift 2 + curl -sk "${CURL_AUTH[@]}" -X "$method" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + "${ACS_URL}${path}" "$@" +} + +# ───────────────────────────────────────────────────────────────────────────── +# Cleanup helper — removes all demo resources +# ───────────────────────────────────────────────────────────────────────────── + +cleanup_demo_resources() { + local quiet="${1:-}" + + [[ -z "$quiet" ]] && info "Cleaning up demo resources..." + + # Delete demo SSBs from the cluster. + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + kubectl delete scansettingbinding "$ssb" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + done + + # Delete demo ScanSettings (original + ACS-created ones named after SSBs). + kubectl delete scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + kubectl delete scansetting "$ssb" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + done + + # Delete demo scan configs from ACS. + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + local config_id + config_id=$(echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '$ssb': + print(c['id']) + break +" 2> /dev/null || true) + if [[ -n "$config_id" ]]; then + acs_api DELETE "/v2/compliance/scan/configurations/$config_id" > /dev/null 2>&1 || true + fi + done + + [[ -z "$quiet" ]] && success "Done" + return 0 +} + +# ───────────────────────────────────────────────────────────────────────────── +# Trap — clean up on exit or interrupt +# ───────────────────────────────────────────────────────────────────────────── + +trap 'echo ""; cleanup_demo_resources' EXIT + +# ═════════════════════════════════════════════════════════════════════════════ +# DEMO START +# ═════════════════════════════════════════════════════════════════════════════ + +clear +banner "CO → ACS Scheduled Scan Importer — Interactive Demo" + +narrate "This demo walks through the importer tool that reads Compliance Operator" +narrate "ScanSettingBinding resources from Kubernetes and creates equivalent scan" +narrate "configurations in Red Hat Advanced Cluster Security (ACS)." +echo "" +narrate "We will:" +narrate " 1. Create demo ScanSettingBindings on the cluster" +narrate " 2. Run the importer in dry-run mode" +narrate " 3. Run the importer for real (happy path)" +narrate " 4. Run again to see skip behaviour (idempotency)" +narrate " 5. Simulate schedule drift on the Kubernetes side" +narrate " 6. Run without --overwrite-existing (drift preserved)" +narrate " 7. Run with --overwrite-existing (drift resolved)" +echo "" +info "Cluster: $(kubectl config current-context)" +info "ACS: $ACS_URL" +info "CO NS: $CO_NS" + +pause + +# Pre-clean: silently remove leftovers from a previous run. +cleanup_demo_resources quiet + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 1: Create demo ScanSetting and ScanSettingBindings +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 1: Create Demo Resources" + +narrate "First, we create a ScanSetting with a daily schedule (02:00 UTC)," +narrate "then three ScanSettingBindings that reference it — each binding" +narrate "targets a different compliance profile." + +pause + +section "Creating ScanSetting: ${DEMO_PREFIX}-setting" +info "Schedule: 0 2 * * * (daily at 02:00)" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSetting +metadata: + name: ${DEMO_PREFIX}-setting + namespace: ${CO_NS} +schedule: "0 2 * * *" +roles: + - worker + - master +rawResultStorage: + rotation: 3 + size: 1Gi +EOF + +section "Creating ScanSettingBinding: ${SSB_CIS}" +info "Profile: ocp4-cis" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_CIS} + namespace: ${CO_NS} +profiles: + - name: ocp4-cis + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Creating ScanSettingBinding: ${SSB_MODERATE}" +info "Profile: ocp4-moderate" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_MODERATE} + namespace: ${CO_NS} +profiles: + - name: ocp4-moderate + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Creating ScanSettingBinding: ${SSB_PCI}" +info "Profile: ocp4-pci-dss" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_PCI} + namespace: ${CO_NS} +profiles: + - name: ocp4-pci-dss + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Verify: resources on the cluster" +run_cmd kubectl get scansettingbindings.compliance.openshift.io -n "$CO_NS" \ + -l '!app.kubernetes.io/managed-by' \ + -o custom-columns='NAME:.metadata.name,SETTING:.settingsRef.name,PROFILES:.profiles[*].name' + +narrate "Three ScanSettingBindings created, each referencing the demo ScanSetting." +narrate "The importer will read these and create matching ACS scan configurations." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 2: Dry run +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 2: Dry Run" + +narrate "Before making any changes, let's preview what the importer would do." +narrate "The --dry-run flag shows planned actions without touching ACS." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" --dry-run + +narrate "The importer discovered our 3 demo SSBs, mapped them to ACS scan" +narrate "configurations, and reported that it would create all three." +narrate "No changes were made to ACS." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 3: Happy path — real import +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 3: Import (Happy Path)" + +narrate "Now let's run the importer for real. It will create three scan" +narrate "configurations in ACS, one for each ScanSettingBinding." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +section "Verify: scan configurations in ACS" +info "Querying ACS API for our demo scan configs..." +echo "" + +for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + local_configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + found=$(echo "$local_configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '$ssb': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + profiles = c.get('scanConfig', {}).get('profiles', []) + print(f\" Name: {c['scanName']}\") + print(f\" ID: {c['id']}\") + print(f\" Schedule: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + print(f\" Profiles: {', '.join(profiles)}\") + break +" 2> /dev/null || true) + if [[ -n "$found" ]]; then + success "Found in ACS:" + echo "$found" + echo "" + fi +done + +narrate "All three scan configurations were created successfully in ACS." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 4: Idempotency — run again, expect skips +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 4: Idempotency" + +narrate "What happens if we run the importer again? Since the scan configurations" +narrate "already exist in ACS, the importer should skip them gracefully." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +narrate "All three were skipped — the importer is idempotent by default." +narrate "It detects existing scan configs by name and does not create duplicates." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 5: Simulate schedule drift on the Kubernetes side +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 5: Simulate Schedule Drift" + +narrate "After the initial import, each SSB was adopted — its settingsRef now" +narrate "points to an ACS-managed ScanSetting (same name as the scan config)." +narrate "" +narrate "Let's simulate a real-world scenario: a cluster admin changes the" +narrate "schedule on the Kubernetes side. We'll:" +narrate " 1. Update the original ScanSetting schedule from 02:00 → 05:00" +narrate " 2. Patch the CIS SSB's settingsRef back to the original ScanSetting" +narrate "" +narrate "Now the cluster says DAILY 05:00 but ACS still has DAILY 02:00." + +pause + +section "Updating ScanSetting schedule on the cluster" +info "Changing schedule: 0 2 * * * → 0 5 * * * (daily at 05:00)" + +run_cmd kubectl patch scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" \ + --type merge -p '{"schedule": "0 5 * * *"}' + +section "Pointing CIS SSB back to the original ScanSetting" +info "Changing settingsRef: ${SSB_CIS} → ${DEMO_PREFIX}-setting" + +run_cmd kubectl patch scansettingbinding "${SSB_CIS}" -n "$CO_NS" \ + --type merge -p "{\"settingsRef\": {\"name\": \"${DEMO_PREFIX}-setting\"}}" + +section "Verify: cluster vs ACS" +echo "" +echo -e "${BOLD}On the cluster:${RESET}" +kubectl get scansettingbinding "${SSB_CIS}" -n "$CO_NS" \ + -o custom-columns='SSB:.metadata.name,SETTINGS_REF:.settingsRef.name' --no-headers +kubectl get scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" \ + -o custom-columns='SCANSETTING:.metadata.name,SCHEDULE:.schedule' --no-headers +echo "" +echo -e "${BOLD}In ACS:${RESET}" +acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '${SSB_CIS}': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + print(f\" {c['scanName']}: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + break +" 2> /dev/null +echo "" + +narrate "The cluster admin changed the schedule to 05:00, but ACS still has 02:00." +narrate "This is schedule drift — the importer can detect and fix it." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 6: Run without --overwrite-existing (skip conflict) +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 6: Default Behaviour (Skip Conflicts)" + +narrate "Running the importer without --overwrite-existing. The scan config" +narrate "already exists in ACS, so the importer will skip it — even though" +narrate "the schedule has drifted on the cluster." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +narrate "All three were skipped — the importer found existing configs by name" +narrate "and left them untouched. The drifted CIS config was NOT updated." +narrate "This is the safe default: no surprises, no overwrites." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 7: Run with --overwrite-existing (resolve drift) +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 7: Overwrite Mode (Resolve Drift)" + +narrate "Now let's run with --overwrite-existing. This tells the importer to" +narrate "update existing ACS scan configs to match what's on the cluster." +narrate "The CIS config in ACS will be updated from 02:00 → 05:00." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" --overwrite-existing + +section "Verify: ACS now matches the cluster" +acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '${SSB_CIS}': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + print(f\" Name: {c['scanName']}\") + print(f\" Schedule: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + break +" 2> /dev/null +echo "" + +narrate "The CIS scan config has been updated to DAILY 05:00 — matching the" +narrate "cluster's ScanSetting. The --overwrite-existing flag ensures ACS" +narrate "stays in sync with the Compliance Operator source of truth." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# Done — EXIT trap handles cleanup automatically +# ───────────────────────────────────────────────────────────────────────────── + +banner "Demo Complete" + +narrate "Summary of what we demonstrated:" +echo "" +echo -e " ${GREEN}1.${RESET} Created CO resources (ScanSetting + 3 ScanSettingBindings)" +echo -e " ${GREEN}2.${RESET} Dry-run mode: preview without side effects" +echo -e " ${GREEN}3.${RESET} Happy path: imported all SSBs into ACS scan configs + adoption" +echo -e " ${GREEN}4.${RESET} Idempotency: re-run skips existing configs safely" +echo -e " ${GREEN}5.${RESET} Schedule drift: changed ScanSetting schedule on the cluster" +echo -e " ${GREEN}6.${RESET} Default skip: drift preserved without --overwrite-existing" +echo -e " ${GREEN}7.${RESET} Overwrite mode: drift resolved, ACS re-synced to cluster" +echo "" + +# The EXIT trap handles cleanup automatically. From f63097737fa87776335146622853cc8b401a82f8 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:48:46 +0100 Subject: [PATCH 15/24] fix(co-importer): demo drift edits ACS-managed ScanSetting on cluster MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The drift scenario now patches the ACS-managed ScanSetting directly on the cluster (kubectl patch scansetting ). ACS does not detect this change, so the UI still shows the original schedule while scans actually run on the new one — a silent drift. This is more realistic than the previous version (which patched the SSB's settingsRef back to a different ScanSetting) and demonstrates the exact gap the importer fills: reading the actual cluster state and syncing ACS to match. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../compliance-operator-importer/hack/demo.sh | 35 +++++++------------ 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/scripts/compliance-operator-importer/hack/demo.sh b/scripts/compliance-operator-importer/hack/demo.sh index 546a119997d15..b4f5d59584629 100755 --- a/scripts/compliance-operator-importer/hack/demo.sh +++ b/scripts/compliance-operator-importer/hack/demo.sh @@ -379,36 +379,27 @@ banner "Step 5: Simulate Schedule Drift" narrate "After the initial import, each SSB was adopted — its settingsRef now" narrate "points to an ACS-managed ScanSetting (same name as the scan config)." narrate "" -narrate "Let's simulate a real-world scenario: a cluster admin changes the" -narrate "schedule on the Kubernetes side. We'll:" -narrate " 1. Update the original ScanSetting schedule from 02:00 → 05:00" -narrate " 2. Patch the CIS SSB's settingsRef back to the original ScanSetting" -narrate "" -narrate "Now the cluster says DAILY 05:00 but ACS still has DAILY 02:00." +narrate "Let's simulate a real-world scenario: someone edits the ACS-managed" +narrate "ScanSetting directly on the cluster (e.g. via kubectl). ACS does NOT" +narrate "detect this change — the UI still shows the original schedule, but" +narrate "scans actually run on the new schedule. A silent drift." pause -section "Updating ScanSetting schedule on the cluster" -info "Changing schedule: 0 2 * * * → 0 5 * * * (daily at 05:00)" +section "Editing ACS-managed ScanSetting directly on the cluster" +info "ScanSetting '${SSB_CIS}' was created by ACS with schedule 0 2 * * *" +info "Patching it to 0 5 * * * (daily at 05:00)" -run_cmd kubectl patch scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" \ +run_cmd kubectl patch scansetting "${SSB_CIS}" -n "$CO_NS" \ --type merge -p '{"schedule": "0 5 * * *"}' -section "Pointing CIS SSB back to the original ScanSetting" -info "Changing settingsRef: ${SSB_CIS} → ${DEMO_PREFIX}-setting" - -run_cmd kubectl patch scansettingbinding "${SSB_CIS}" -n "$CO_NS" \ - --type merge -p "{\"settingsRef\": {\"name\": \"${DEMO_PREFIX}-setting\"}}" - section "Verify: cluster vs ACS" echo "" -echo -e "${BOLD}On the cluster:${RESET}" -kubectl get scansettingbinding "${SSB_CIS}" -n "$CO_NS" \ - -o custom-columns='SSB:.metadata.name,SETTINGS_REF:.settingsRef.name' --no-headers -kubectl get scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" \ +echo -e "${BOLD}On the cluster (actual behaviour):${RESET}" +kubectl get scansetting "${SSB_CIS}" -n "$CO_NS" \ -o custom-columns='SCANSETTING:.metadata.name,SCHEDULE:.schedule' --no-headers echo "" -echo -e "${BOLD}In ACS:${RESET}" +echo -e "${BOLD}In ACS (what the UI shows):${RESET}" acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null | python3 -c " import sys, json data = json.load(sys.stdin) @@ -420,8 +411,8 @@ for c in data.get('configurations', []): " 2> /dev/null echo "" -narrate "The cluster admin changed the schedule to 05:00, but ACS still has 02:00." -narrate "This is schedule drift — the importer can detect and fix it." +narrate "The cluster now scans at 05:00, but ACS still thinks it's 02:00." +narrate "This silent drift is exactly what the importer can detect and fix." pause From b91157e5c5601a42d117ea99742ad33c58af362a Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:51:17 +0100 Subject: [PATCH 16/24] fix(co-importer): use DaysOfWeek instead of Weekly to match ACS proto (IMP-MAP-004a) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ACS v2 Schedule proto defines daysOfWeek (repeated int32) not a "weekly" wrapper. The previous ACSWeekly struct serialized to {"weekly":{"day":0}} which the gRPC gateway silently ignored — weekly scans were treated as daily. Replace ACSWeekly with ACSDaysOfWeek{Days []int32} matching the proto. Add wire-format tests (IMP-MAP-004a..d) that serialize payloads to JSON and assert field names match proto/api/v2/common.proto. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/mapping/mapping_test.go | 197 ++++++++++++++++++ .../internal/mapping/schedule.go | 2 +- .../internal/mapping/schedule_test.go | 20 +- .../internal/models/models.go | 9 +- 4 files changed, 210 insertions(+), 18 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/mapping/mapping_test.go b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go index 64136c337bf71..a179cf6fa93ea 100644 --- a/scripts/compliance-operator-importer/internal/mapping/mapping_test.go +++ b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go @@ -1,6 +1,7 @@ package mapping import ( + "encoding/json" "strings" "testing" @@ -305,3 +306,199 @@ func TestIMP_MAP_015_InvalidCronFixHintMentionsCron(t *testing.T) { t.Errorf("Problem.FixHint: want it to mention %q, got %q", "cron", result.Problem.FixHint) } } + +// ─── Wire-format tests (IMP-MAP-004a..d) ───────────────────────────────────── +// +// These tests serialize the ACS payload to JSON and verify that field names +// match the proto/api/v2 schema. They would have caught the Weekly vs DaysOfWeek +// bug: the ACS API proto has "daysOfWeek" but no "weekly" field, so a JSON +// containing "weekly" would be silently ignored by the gRPC gateway. + +// allowedScheduleKeys are the JSON keys allowed in a serialized ACSSchedule, +// matching proto/api/v2/common.proto message Schedule. +var allowedScheduleKeys = map[string]bool{ + "intervalType": true, + "hour": true, + "minute": true, + "daysOfWeek": true, + "daysOfMonth": true, +} + +// allowedPayloadKeys are the top-level JSON keys allowed in a serialized +// ACSCreatePayload, matching proto ComplianceScanConfiguration. +var allowedPayloadKeys = map[string]bool{ + "scanName": true, + "scanConfig": true, + "clusters": true, +} + +// allowedScanConfigKeys are the JSON keys allowed in a serialized +// ACSBaseScanConfig, matching proto BaseComplianceScanConfigurationSettings. +var allowedScanConfigKeys = map[string]bool{ + "oneTimeScan": true, + "profiles": true, + "scanSchedule": true, + "description": true, +} + +// TestIMP_MAP_004a_PayloadWireFormat_AllScheduleTypes verifies that the full +// ACS payload serializes to JSON with only proto-valid field names for each +// schedule type: DAILY, WEEKLY, MONTHLY. +func TestIMP_MAP_004a_PayloadWireFormat_AllScheduleTypes(t *testing.T) { + cases := []struct { + name string + cron string + wantInterval string + wantDOW bool // expect daysOfWeek present + wantDOM bool // expect daysOfMonth present + }{ + {name: "DAILY", cron: "0 2 * * *", wantInterval: "DAILY"}, + {name: "WEEKLY_Sunday", cron: "0 2 * * 0", wantInterval: "WEEKLY", wantDOW: true}, + {name: "WEEKLY_Friday", cron: "30 14 * * 5", wantInterval: "WEEKLY", wantDOW: true}, + {name: "MONTHLY_1st", cron: "0 2 1 * *", wantInterval: "MONTHLY", wantDOM: true}, + {name: "MONTHLY_15th", cron: "0 6 15 * *", wantInterval: "MONTHLY", wantDOM: true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + binding := cofetch.ScanSettingBinding{ + Namespace: "ns", + Name: "b", + ScanSettingName: "ss", + Profiles: []cofetch.ProfileRef{{Name: "ocp4-cis", Kind: "Profile"}}, + } + ss := &cofetch.ScanSetting{Namespace: "ns", Name: "ss", Schedule: tc.cron} + cfg := &models.Config{ACSClusterID: "cluster-1"} + + result := MapBinding(binding, ss, cfg) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + + // Serialize to JSON — this is the wire format sent to the ACS API. + data, err := json.Marshal(result.Payload) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + + // Parse back to a generic map to inspect field names. + var raw map[string]json.RawMessage + if err := json.Unmarshal(data, &raw); err != nil { + t.Fatalf("unmarshal payload: %v", err) + } + + // IMP-MAP-004d: top-level payload keys must match proto. + for key := range raw { + if !allowedPayloadKeys[key] { + t.Errorf("payload contains unexpected JSON key %q (not in ComplianceScanConfiguration proto)", key) + } + } + + // Parse scanConfig. + var scanConfig map[string]json.RawMessage + if err := json.Unmarshal(raw["scanConfig"], &scanConfig); err != nil { + t.Fatalf("unmarshal scanConfig: %v", err) + } + for key := range scanConfig { + if !allowedScanConfigKeys[key] { + t.Errorf("scanConfig contains unexpected JSON key %q (not in BaseComplianceScanConfigurationSettings proto)", key) + } + } + + // Parse scanSchedule. + schedRaw, ok := scanConfig["scanSchedule"] + if !ok { + t.Fatal("scanSchedule missing from JSON") + } + var sched map[string]json.RawMessage + if err := json.Unmarshal(schedRaw, &sched); err != nil { + t.Fatalf("unmarshal scanSchedule: %v", err) + } + + // IMP-MAP-004a: schedule keys must only be proto-valid. + for key := range sched { + if !allowedScheduleKeys[key] { + t.Errorf("scanSchedule contains unexpected JSON key %q (not in Schedule proto; would be silently ignored by gRPC gateway)", key) + } + } + + // Verify intervalType. + var intervalType string + if err := json.Unmarshal(sched["intervalType"], &intervalType); err != nil { + t.Fatalf("unmarshal intervalType: %v", err) + } + if intervalType != tc.wantInterval { + t.Errorf("intervalType: want %q, got %q", tc.wantInterval, intervalType) + } + + // IMP-MAP-004b: WEEKLY must have daysOfWeek. + if tc.wantDOW { + if _, ok := sched["daysOfWeek"]; !ok { + t.Error("WEEKLY schedule missing daysOfWeek in JSON (API would have no day-of-week info)") + } + } + + // IMP-MAP-004c: MONTHLY must have daysOfMonth. + if tc.wantDOM { + if _, ok := sched["daysOfMonth"]; !ok { + t.Error("MONTHLY schedule missing daysOfMonth in JSON (API would have no day-of-month info)") + } + } + + // DAILY should NOT have daysOfWeek or daysOfMonth. + if !tc.wantDOW && !tc.wantDOM { + if _, ok := sched["daysOfWeek"]; ok { + t.Error("DAILY schedule should not have daysOfWeek in JSON") + } + if _, ok := sched["daysOfMonth"]; ok { + t.Error("DAILY schedule should not have daysOfMonth in JSON") + } + } + }) + } +} + +// TestIMP_MAP_004b_WeeklyDaysOfWeekValue verifies the daysOfWeek.days array +// contains the correct day-of-week integer for weekly schedules. +func TestIMP_MAP_004b_WeeklyDaysOfWeekValue(t *testing.T) { + cases := []struct { + cron string + wantDay int32 + }{ + {"0 0 * * 0", 0}, // Sunday + {"0 0 * * 1", 1}, // Monday + {"0 0 * * 6", 6}, // Saturday + } + + for _, tc := range cases { + t.Run(string(rune('0'+tc.wantDay)), func(t *testing.T) { + ss := &cofetch.ScanSetting{Namespace: "ns", Name: "s", Schedule: tc.cron} + binding := cofetch.ScanSettingBinding{ + Namespace: "ns", Name: "b", ScanSettingName: "s", + Profiles: []cofetch.ProfileRef{{Name: "p", Kind: "Profile"}}, + } + result := MapBinding(binding, ss, &models.Config{ACSClusterID: "c"}) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + + data, _ := json.Marshal(result.Payload) + var parsed struct { + ScanConfig struct { + ScanSchedule struct { + DaysOfWeek struct { + Days []int32 `json:"days"` + } `json:"daysOfWeek"` + } `json:"scanSchedule"` + } `json:"scanConfig"` + } + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("unmarshal: %v", err) + } + days := parsed.ScanConfig.ScanSchedule.DaysOfWeek.Days + if len(days) != 1 || days[0] != tc.wantDay { + t.Errorf("daysOfWeek.days: want [%d], got %v", tc.wantDay, days) + } + }) + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule.go b/scripts/compliance-operator-importer/internal/mapping/schedule.go index 58a2e5211f312..004a07939f7d5 100644 --- a/scripts/compliance-operator-importer/internal/mapping/schedule.go +++ b/scripts/compliance-operator-importer/internal/mapping/schedule.go @@ -94,7 +94,7 @@ func ConvertCronToACSSchedule(cron string) (*models.ACSSchedule, error) { IntervalType: "WEEKLY", Hour: hourVal, Minute: minVal, - Weekly: &models.ACSWeekly{Day: dowVal}, + DaysOfWeek: &models.ACSDaysOfWeek{Days: []int32{dowVal}}, }, nil default: diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule_test.go b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go index 0dd28f8f17938..462a1bc4890bd 100644 --- a/scripts/compliance-operator-importer/internal/mapping/schedule_test.go +++ b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go @@ -25,8 +25,8 @@ func TestIMP_MAP_003_IMP_MAP_004_DailySchedule(t *testing.T) { if got.Minute != 0 { t.Errorf("Minute: want 0, got %d", got.Minute) } - if got.Weekly != nil { - t.Errorf("Weekly: want nil for DAILY, got %+v", got.Weekly) + if got.DaysOfWeek != nil { + t.Errorf("DaysOfWeek: want nil for DAILY, got %+v", got.DaysOfWeek) } if got.DaysOfMonth != nil { t.Errorf("DaysOfMonth: want nil for DAILY, got %+v", got.DaysOfMonth) @@ -67,11 +67,11 @@ func TestIMP_MAP_003_IMP_MAP_004_WeeklySchedule(t *testing.T) { if got.Minute != 0 { t.Errorf("Minute: want 0, got %d", got.Minute) } - if got.Weekly == nil { - t.Fatal("Weekly: want non-nil for WEEKLY schedule") + if got.DaysOfWeek == nil { + t.Fatal("DaysOfWeek: want non-nil for WEEKLY schedule") } - if got.Weekly.Day != 0 { - t.Errorf("Weekly.Day: want 0 (Sunday), got %d", got.Weekly.Day) + if len(got.DaysOfWeek.Days) != 1 || got.DaysOfWeek.Days[0] != 0 { + t.Errorf("DaysOfWeek.Days: want [0] (Sunday), got %v", got.DaysOfWeek.Days) } } @@ -84,11 +84,11 @@ func TestIMP_MAP_003_IMP_MAP_004_WeeklyScheduleSaturday(t *testing.T) { if got.IntervalType != "WEEKLY" { t.Errorf("IntervalType: want WEEKLY, got %q", got.IntervalType) } - if got.Weekly == nil { - t.Fatal("Weekly: want non-nil") + if got.DaysOfWeek == nil { + t.Fatal("DaysOfWeek: want non-nil for WEEKLY schedule") } - if got.Weekly.Day != 6 { - t.Errorf("Weekly.Day: want 6 (Saturday), got %d", got.Weekly.Day) + if len(got.DaysOfWeek.Days) != 1 || got.DaysOfWeek.Days[0] != 6 { + t.Errorf("DaysOfWeek.Days: want [6] (Saturday), got %v", got.DaysOfWeek.Days) } } diff --git a/scripts/compliance-operator-importer/internal/models/models.go b/scripts/compliance-operator-importer/internal/models/models.go index 9c83f97215acc..9dd1a2b617e2b 100644 --- a/scripts/compliance-operator-importer/internal/models/models.go +++ b/scripts/compliance-operator-importer/internal/models/models.go @@ -63,21 +63,16 @@ type Problem struct { } // ACSSchedule is the schedule portion of an ACS scan configuration. +// Fields map to the v2.Schedule proto message in proto/api/v2/common.proto. type ACSSchedule struct { IntervalType string `json:"intervalType,omitempty"` Hour int32 `json:"hour"` Minute int32 `json:"minute"` - Weekly *ACSWeekly `json:"weekly,omitempty"` DaysOfWeek *ACSDaysOfWeek `json:"daysOfWeek,omitempty"` DaysOfMonth *ACSDaysOfMonth `json:"daysOfMonth,omitempty"` } -// ACSWeekly holds the day-of-week for a weekly ACS schedule. -type ACSWeekly struct { - Day int32 `json:"day"` -} - -// ACSDaysOfWeek holds multiple days for a multi-day-of-week ACS schedule. +// ACSDaysOfWeek holds days for a weekly ACS schedule (Sunday=0 .. Saturday=6). type ACSDaysOfWeek struct { Days []int32 `json:"days"` } From 9a532e6a1e04b9d293d657df5c07a2da751388fd Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:51:27 +0100 Subject: [PATCH 17/24] fix(co-importer): include response body in ACS HTTP error messages When CreateScanConfiguration or UpdateScanConfiguration returns a non-2xx status, the error message now includes a snippet of the response body. This makes it clear *why* the ACS API rejected the request (e.g. "Unable to find all profiles for scan configuration"). Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/acs/client.go | 29 ++++++++++- .../internal/acs/client_test.go | 50 +++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/acs/client.go b/scripts/compliance-operator-importer/internal/acs/client.go index 749e6693f1255..3d9e40049d717 100644 --- a/scripts/compliance-operator-importer/internal/acs/client.go +++ b/scripts/compliance-operator-importer/internal/acs/client.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "os" "time" @@ -196,7 +197,7 @@ func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACS defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - return "", &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("POST /v2/compliance/scan/configurations returned HTTP %d", resp.StatusCode)} + return "", &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("POST /v2/compliance/scan/configurations returned HTTP %d: %s", resp.StatusCode, readBodySnippet(resp))} } var created complianceScanConfigurationResponse @@ -237,7 +238,7 @@ func (c *client) UpdateScanConfiguration(ctx context.Context, id string, payload defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("PUT /v2/compliance/scan/configurations/%s returned HTTP %d", id, resp.StatusCode)} + return &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("PUT /v2/compliance/scan/configurations/%s returned HTTP %d: %s", id, resp.StatusCode, readBodySnippet(resp))} } return nil @@ -306,6 +307,30 @@ func (c *client) ListClusters(ctx context.Context) ([]models.ACSClusterInfo, err return result, nil } +// readBodySnippet reads up to 512 bytes from the response body for error reporting. +func readBodySnippet(resp *http.Response) string { + const maxBytes = 512 + body, err := io.ReadAll(io.LimitReader(resp.Body, maxBytes)) + if err != nil || len(body) == 0 { + return "(no response body)" + } + snippet := string(body) + // Try to extract a cleaner message from JSON error responses. + var parsed struct { + Message string `json:"message"` + Error string `json:"error"` + } + if json.Unmarshal(body, &parsed) == nil { + if parsed.Message != "" { + return parsed.Message + } + if parsed.Error != "" { + return parsed.Error + } + } + return snippet +} + // HTTPError is returned by CreateScanConfiguration and UpdateScanConfiguration when the server responds with // a non-success HTTP status. The reconciler uses StatusCode() to decide whether // to retry (transient: 429,502,503,504) or abort (non-transient: 400,401,403,404). diff --git a/scripts/compliance-operator-importer/internal/acs/client_test.go b/scripts/compliance-operator-importer/internal/acs/client_test.go index a12ef7b2d7522..99d8258cd862d 100644 --- a/scripts/compliance-operator-importer/internal/acs/client_test.go +++ b/scripts/compliance-operator-importer/internal/acs/client_test.go @@ -6,6 +6,7 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -204,3 +205,52 @@ func TestNoPUTMethodOnInterface(t *testing.T) { // IMP-IDEM-003: This test documents the invariant. t.Log("IMP-IDEM-003: ACSClient interface has no PUT method - enforced by interface definition") } + +// TestCreateScanConfiguration_400_IncludesResponseBody verifies that HTTP 400 +// errors include the server's response body in the error message. +func TestCreateScanConfiguration_400_IncludesResponseBody(t *testing.T) { + const apiError = "Unable to find all profiles for scan configuration named \"cis-weekly\"." + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + _ = json.NewEncoder(w).Encode(map[string]string{"message": apiError}) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + payload := models.ACSCreatePayload{ + ScanName: "cis-weekly", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } + + _, err = client.CreateScanConfiguration(context.Background(), payload) + if err == nil { + t.Fatal("expected error for HTTP 400, got nil") + } + + // Verify the error message contains the API error text. + errMsg := err.Error() + if !strings.Contains(errMsg, apiError) { + t.Errorf("error message should contain API error %q, got: %s", apiError, errMsg) + } + + // Verify it's an HTTPError with correct status code. + type statusCoder interface{ StatusCode() int } + sc, ok := err.(statusCoder) + if !ok { + t.Fatal("expected error to satisfy StatusCode() interface") + } + if sc.StatusCode() != 400 { + t.Errorf("expected status code 400, got %d", sc.StatusCode()) + } +} From 9b4327ddc7d2e9ae91a4054e3fc5ab8e43ace6d6 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:54:00 +0100 Subject: [PATCH 18/24] feat(co-importer): adopt SSBs after ACS scan config creation (IMP-ADOPT-001..008) After the importer creates a scan config in ACS, ACS pushes a ScanSetting to the cluster with the same name. The SSB still references the old ScanSetting, so it's not managed by ACS yet. The adoption step: 1. Polls for the ACS-created ScanSetting to appear on the cluster 2. Patches the SSB's settingsRef.name to the new ScanSetting 3. Handles timeouts (warning, not error) and partial multi-cluster success independently per cluster New packages: - internal/adopt: adoption logic with poll/timeout/patch - internal/merge: SSB merging across clusters with conflict detection - internal/status: stage-by-stage console progress output Also adds PatchSSBSettingsRef to the COClient interface and wires adoption into both single-cluster (Run) and multi-cluster (RunMultiCluster) paths. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/adopt/adopt.go | 134 +++++++ .../internal/adopt/adopt_test.go | 342 ++++++++++++++++++ .../internal/cofetch/client.go | 22 ++ .../internal/cofetch/types.go | 3 +- .../internal/merge/merge.go | 191 ++++++++++ .../internal/merge/merge_test.go | 295 +++++++++++++++ .../internal/run/run.go | 51 ++- .../internal/run/run_test.go | 8 + .../internal/status/status.go | 74 ++++ 9 files changed, 1114 insertions(+), 6 deletions(-) create mode 100644 scripts/compliance-operator-importer/internal/adopt/adopt.go create mode 100644 scripts/compliance-operator-importer/internal/adopt/adopt_test.go create mode 100644 scripts/compliance-operator-importer/internal/merge/merge.go create mode 100644 scripts/compliance-operator-importer/internal/merge/merge_test.go create mode 100644 scripts/compliance-operator-importer/internal/status/status.go diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt.go b/scripts/compliance-operator-importer/internal/adopt/adopt.go new file mode 100644 index 0000000000000..58f88a4a761c1 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/adopt/adopt.go @@ -0,0 +1,134 @@ +// Package adopt patches ScanSettingBinding resources on each cluster to +// reference the ScanSetting that ACS creates when a scan configuration is +// pushed to Sensor. This completes the "handover" so the SSB is fully +// managed by ACS going forward. +package adopt + +import ( + "context" + "fmt" + "time" + + "github.com/stackrox/co-acs-importer/internal/cofetch" +) + +// DefaultPollInterval is how often to check for the ScanSetting. +const DefaultPollInterval = 3 * time.Second + +// DefaultPollTimeout is the maximum time to wait for the ScanSetting to appear. +const DefaultPollTimeout = 60 * time.Second + +// Request describes one SSB that should be adopted after an ACS scan config +// was successfully created. +type Request struct { + SSBName string // ScanSettingBinding name (= ACS scan config name) + SSBNamespace string + OldSettingRef string // current settingsRef.name on the SSB + ClusterLabel string // kubeconfig context name, for logging + COClient cofetch.COClient // k8s client scoped to this cluster +} + +// Result records the outcome for one adoption request. +type Result struct { + SSBName string + ClusterLabel string + Adopted bool // true if the SSB was patched + Skipped bool // true if settingsRef already correct + TimedOut bool // true if the ScanSetting didn't appear in time + Err error // non-nil on unexpected failure + Message string // human-readable description of what happened +} + +// Adopter runs the adoption step for a batch of requests. +type Adopter struct { + PollInterval time.Duration + PollTimeout time.Duration +} + +// New creates an Adopter with default poll settings. +func New() *Adopter { + return &Adopter{ + PollInterval: DefaultPollInterval, + PollTimeout: DefaultPollTimeout, + } +} + +// Adopt processes a list of adoption requests. Each request is handled +// independently — a failure or timeout on one cluster does not block others. +func (a *Adopter) Adopt(ctx context.Context, requests []Request) []Result { + results := make([]Result, 0, len(requests)) + for _, req := range requests { + results = append(results, a.adoptOne(ctx, req)) + } + return results +} + +func (a *Adopter) adoptOne(ctx context.Context, req Request) Result { + newSettingName := req.SSBName // ACS creates a ScanSetting with the same name as the scan config + + // IMP-ADOPT-003: skip if already pointing to the right ScanSetting. + if req.OldSettingRef == newSettingName { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Skipped: true, + Message: fmt.Sprintf("SSB %s/%s already references ScanSetting %q, no patch needed", req.SSBNamespace, req.SSBName, newSettingName), + } + } + + // Poll for the ACS-created ScanSetting to appear on the cluster. + if err := a.waitForScanSetting(ctx, req.COClient, req.SSBNamespace, newSettingName); err != nil { + // IMP-ADOPT-004, IMP-ADOPT-005, IMP-ADOPT-006: timeout is a warning, not an error. + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + TimedOut: true, + Message: fmt.Sprintf("timed out waiting for ScanSetting %q to appear on cluster %s; SSB %s/%s was NOT patched (settingsRef still %q)", + newSettingName, req.ClusterLabel, req.SSBNamespace, req.SSBName, req.OldSettingRef), + } + } + + // IMP-ADOPT-001: patch the SSB's settingsRef to point to the new ScanSetting. + if err := req.COClient.PatchSSBSettingsRef(ctx, req.SSBNamespace, req.SSBName, newSettingName); err != nil { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Err: err, + Message: fmt.Sprintf("failed to patch SSB %s/%s settingsRef on cluster %s: %v", + req.SSBNamespace, req.SSBName, req.ClusterLabel, err), + } + } + + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Adopted: true, + Message: fmt.Sprintf("adopted SSB %s/%s on cluster %s: settingsRef changed from %q to %q", + req.SSBNamespace, req.SSBName, req.ClusterLabel, req.OldSettingRef, newSettingName), + } +} + +// waitForScanSetting polls until the named ScanSetting exists or the timeout expires. +func (a *Adopter) waitForScanSetting(ctx context.Context, client cofetch.COClient, namespace, name string) error { + deadline := time.After(a.PollTimeout) + ticker := time.NewTicker(a.PollInterval) + defer ticker.Stop() + + // Check immediately before first tick. + if _, err := client.GetScanSetting(ctx, namespace, name); err == nil { + return nil + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-deadline: + return fmt.Errorf("ScanSetting %q not found after %s", name, a.PollTimeout) + case <-ticker.C: + if _, err := client.GetScanSetting(ctx, namespace, name); err == nil { + return nil + } + } + } +} diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt_test.go b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go new file mode 100644 index 0000000000000..081b9ea6a9c3d --- /dev/null +++ b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go @@ -0,0 +1,342 @@ +package adopt + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/cofetch" +) + +// mockCOClient is a test double for cofetch.COClient that supports: +// - controllable ScanSetting existence (via scanSettings map) +// - tracking PatchSSBSettingsRef calls +// - simulating errors +type mockCOClient struct { + mu sync.Mutex + scanSettings map[string]*cofetch.ScanSetting // key: "namespace/name" + patches []patchCall // recorded PatchSSBSettingsRef calls + patchErr error // if non-nil, PatchSSBSettingsRef returns this +} + +type patchCall struct { + Namespace string + SSBName string + NewSettingsRefName string +} + +func newMockCOClient() *mockCOClient { + return &mockCOClient{ + scanSettings: make(map[string]*cofetch.ScanSetting), + } +} + +func (m *mockCOClient) addScanSetting(namespace, name string) { + m.mu.Lock() + defer m.mu.Unlock() + m.scanSettings[namespace+"/"+name] = &cofetch.ScanSetting{ + Namespace: namespace, + Name: name, + } +} + +func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.ScanSettingBinding, error) { + return nil, nil +} + +func (m *mockCOClient) GetScanSetting(_ context.Context, namespace, name string) (*cofetch.ScanSetting, error) { + m.mu.Lock() + defer m.mu.Unlock() + ss, ok := m.scanSettings[namespace+"/"+name] + if !ok { + return nil, fmt.Errorf("ScanSetting %q not found in namespace %q", name, namespace) + } + return ss, nil +} + +func (m *mockCOClient) PatchSSBSettingsRef(_ context.Context, namespace, ssbName, newSettingsRefName string) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.patchErr != nil { + return m.patchErr + } + m.patches = append(m.patches, patchCall{ + Namespace: namespace, + SSBName: ssbName, + NewSettingsRefName: newSettingsRefName, + }) + return nil +} + +// Compile-time check. +var _ cofetch.COClient = (*mockCOClient)(nil) + +// TestIMP_ADOPT_001_PatchSettingsRef verifies that the SSB's settingsRef is +// patched to the scan config name after ACS creates the ScanSetting. +func TestIMP_ADOPT_001_PatchSettingsRef(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + if len(results) != 1 { + t.Fatalf("expected 1 result, got %d", len(results)) + } + r := results[0] + if !r.Adopted { + t.Errorf("expected Adopted=true, got false; message: %s", r.Message) + } + if len(client.patches) != 1 { + t.Fatalf("expected 1 patch call, got %d", len(client.patches)) + } + p := client.patches[0] + if p.NewSettingsRefName != "cis-weekly" { + t.Errorf("patch newSettingsRefName: want %q, got %q", "cis-weekly", p.NewSettingsRefName) + } + if p.SSBName != "cis-weekly" { + t.Errorf("patch SSBName: want %q, got %q", "cis-weekly", p.SSBName) + } +} + +// TestIMP_ADOPT_002_LogMessage verifies the result message mentions the adoption. +func TestIMP_ADOPT_002_LogMessage(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if r.Message == "" { + t.Error("expected non-empty message for adopted SSB") + } + // Message should mention old and new setting names. + for _, want := range []string{"my-old-setting", "cis-weekly", "adopted"} { + if !containsStr(r.Message, want) { + t.Errorf("message should contain %q, got %q", want, r.Message) + } + } +} + +// TestIMP_ADOPT_003_SkipAlreadyAdopted verifies that no patch is issued when +// the SSB already references the correct ScanSetting. +func TestIMP_ADOPT_003_SkipAlreadyAdopted(t *testing.T) { + client := newMockCOClient() + // ScanSetting doesn't even need to exist — we skip before polling. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "cis-weekly", // already correct! + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.Skipped { + t.Error("expected Skipped=true when settingsRef already matches") + } + if r.Adopted { + t.Error("expected Adopted=false when skipped") + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls, got %d", len(client.patches)) + } +} + +// TestIMP_ADOPT_004_005_006_Timeout verifies that a timeout waiting for the +// ScanSetting results in a warning (not an error), and no patch. +func TestIMP_ADOPT_004_005_006_Timeout(t *testing.T) { + client := newMockCOClient() + // Don't add the ScanSetting — it never appears. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 50 * time.Millisecond} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.TimedOut { + t.Error("expected TimedOut=true") + } + if r.Adopted { + t.Error("expected Adopted=false on timeout") + } + if r.Err != nil { + t.Errorf("expected Err=nil on timeout (warning, not error), got %v", r.Err) + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls on timeout, got %d", len(client.patches)) + } +} + +// TestIMP_ADOPT_007_MultiClusterIndependent verifies that adoption patches +// SSBs on each cluster independently. +func TestIMP_ADOPT_007_MultiClusterIndependent(t *testing.T) { + clientA := newMockCOClient() + clientA.addScanSetting("openshift-compliance", "cis-weekly") + + clientB := newMockCOClient() + clientB.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{ + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-a", + ClusterLabel: "ctx-a", + COClient: clientA, + }, + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-b", + ClusterLabel: "ctx-b", + COClient: clientB, + }, + }) + + if len(results) != 2 { + t.Fatalf("expected 2 results, got %d", len(results)) + } + for i, r := range results { + if !r.Adopted { + t.Errorf("results[%d]: expected Adopted=true, got false; message: %s", i, r.Message) + } + } + if len(clientA.patches) != 1 { + t.Errorf("clientA: expected 1 patch, got %d", len(clientA.patches)) + } + if len(clientB.patches) != 1 { + t.Errorf("clientB: expected 1 patch, got %d", len(clientB.patches)) + } +} + +// TestIMP_ADOPT_008_PartialSuccess verifies that a timeout on one cluster +// does not block adoption on another. +func TestIMP_ADOPT_008_PartialSuccess(t *testing.T) { + clientA := newMockCOClient() + clientA.addScanSetting("openshift-compliance", "cis-weekly") + + clientB := newMockCOClient() + // Don't add ScanSetting on B — it times out. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 50 * time.Millisecond} + results := adopter.Adopt(context.Background(), []Request{ + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-a", + ClusterLabel: "ctx-a", + COClient: clientA, + }, + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-b", + ClusterLabel: "ctx-b", + COClient: clientB, + }, + }) + + if len(results) != 2 { + t.Fatalf("expected 2 results, got %d", len(results)) + } + + // ctx-a should succeed. + if !results[0].Adopted { + t.Errorf("ctx-a: expected Adopted=true; message: %s", results[0].Message) + } + // ctx-b should time out without error. + if !results[1].TimedOut { + t.Errorf("ctx-b: expected TimedOut=true; message: %s", results[1].Message) + } + if results[1].Err != nil { + t.Errorf("ctx-b: expected Err=nil on timeout, got %v", results[1].Err) + } +} + +// TestIMP_ADOPT_PatchError verifies that a patch failure is recorded as an error. +func TestIMP_ADOPT_PatchError(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + client.patchErr = errors.New("permission denied") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if r.Adopted { + t.Error("expected Adopted=false on patch error") + } + if r.Err == nil { + t.Error("expected non-nil Err on patch failure") + } +} + +// TestIMP_ADOPT_DelayedScanSetting verifies that the adopter polls and +// succeeds when the ScanSetting appears after a delay. +func TestIMP_ADOPT_DelayedScanSetting(t *testing.T) { + client := newMockCOClient() + + // Add the ScanSetting after a short delay. + go func() { + time.Sleep(30 * time.Millisecond) + client.addScanSetting("openshift-compliance", "cis-weekly") + }() + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.Adopted { + t.Errorf("expected Adopted=true after delayed ScanSetting; message: %s", r.Message) + } +} + +func containsStr(s, substr string) bool { + return len(s) >= len(substr) && searchStr(s, substr) +} + +func searchStr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go index eed2160c15ce1..6347179bf9f3e 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/client.go +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -2,12 +2,14 @@ package cofetch import ( "context" + "encoding/json" "errors" "fmt" "github.com/stackrox/co-acs-importer/internal/models" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -203,6 +205,26 @@ func parseScanSetting(obj map[string]interface{}) (*ScanSetting, error) { }, nil } +// PatchSSBSettingsRef patches the settingsRef.name of a ScanSettingBinding. +func (c *k8sClient) PatchSSBSettingsRef(ctx context.Context, namespace, ssbName, newSettingsRefName string) error { + patch := map[string]interface{}{ + "settingsRef": map[string]interface{}{ + "name": newSettingsRefName, + }, + } + patchData, err := json.Marshal(patch) + if err != nil { + return fmt.Errorf("marshal patch: %w", err) + } + _, err = c.dynamic.Resource(scanSettingBindingGVR).Namespace(namespace).Patch( + ctx, ssbName, types.MergePatchType, patchData, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("patch SSB %q settingsRef in namespace %q: %w", ssbName, namespace, err) + } + return nil +} + // stringField safely extracts a string value from an unstructured map. func stringField(m map[string]interface{}, key string) string { v, _ := m[key].(string) diff --git a/scripts/compliance-operator-importer/internal/cofetch/types.go b/scripts/compliance-operator-importer/internal/cofetch/types.go index bb04563b71332..0c7430f8fbd0b 100644 --- a/scripts/compliance-operator-importer/internal/cofetch/types.go +++ b/scripts/compliance-operator-importer/internal/cofetch/types.go @@ -45,10 +45,11 @@ type ScanSetting struct { } // COClient abstracts Compliance Operator resource discovery. -// All methods are context-aware and must not mutate cluster state. type COClient interface { // ListScanSettingBindings returns all ScanSettingBindings in the configured namespace(s). ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) // GetScanSetting fetches a named ScanSetting from the given namespace. GetScanSetting(ctx context.Context, namespace, name string) (*ScanSetting, error) + // PatchSSBSettingsRef patches the settingsRef.name of a ScanSettingBinding. + PatchSSBSettingsRef(ctx context.Context, namespace, ssbName, newSettingsRefName string) error } diff --git a/scripts/compliance-operator-importer/internal/merge/merge.go b/scripts/compliance-operator-importer/internal/merge/merge.go new file mode 100644 index 0000000000000..f58dc17fedbf2 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/merge/merge.go @@ -0,0 +1,191 @@ +// Package merge handles merging of ScanSettingBindings across multiple clusters. +package merge + +import ( + "fmt" + "slices" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// MappedSSB represents a ScanSettingBinding that has been mapped to an ACS payload. +type MappedSSB struct { + Name string // SSB name + Profiles []string + Payload models.ACSCreatePayload +} + +// MergeResult holds the output of the merge operation. +type MergeResult struct { + Merged []MappedSSB + Problems []models.Problem +} + +// MergeSSBs merges ScanSettingBindings from multiple clusters. +// +// Input: map of clusterID → []MappedSSB +// Output: []MergedSSB (one per unique SSB name, with merged cluster IDs) +// +// Logic (IMP-MAP-019, IMP-MAP-020, IMP-MAP-021): +// - Group by SSB name across all clusters. +// - For each group: +// - If all SSBs have identical profiles (sorted) and identical schedule: merge into one, union cluster IDs. +// - If profiles or schedule differ: error for that SSB name, add problem entry. +func MergeSSBs(clusterSSBs map[string][]MappedSSB) MergeResult { + // Group SSBs by name. + groups := make(map[string][]clusterSSBEntry) + for clusterID, ssbs := range clusterSSBs { + for _, ssb := range ssbs { + groups[ssb.Name] = append(groups[ssb.Name], clusterSSBEntry{ + clusterID: clusterID, + ssb: ssb, + }) + } + } + + var merged []MappedSSB + var problems []models.Problem + + for ssbName, entries := range groups { + if len(entries) == 1 { + // Only one cluster has this SSB; no merging needed. + merged = append(merged, entries[0].ssb) + continue + } + + // Check if all SSBs in the group are identical (same profiles and schedule). + first := entries[0].ssb + identical := true + var conflictClusters []string + + for _, entry := range entries[1:] { + if !ssbsAreIdentical(first, entry.ssb) { + identical = false + conflictClusters = append(conflictClusters, entry.clusterID) + } + } + + if !identical { + // IMP-MAP-020: profiles or schedule differ. + conflictClusters = append([]string{entries[0].clusterID}, conflictClusters...) + problems = append(problems, models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryConflict, + ResourceRef: ssbName, + Description: fmt.Sprintf( + "ScanSettingBinding %q exists in multiple clusters with different profiles or schedules: %s", + ssbName, strings.Join(conflictClusters, ", "), + ), + FixHint: "Ensure SSBs with the same name have identical profiles and schedules across all clusters, or rename them uniquely per cluster.", + Skipped: true, + }) + continue + } + + // IMP-MAP-019, IMP-MAP-021: merge clusters. + mergedSSB := first + var allClusters []string + for _, entry := range entries { + allClusters = append(allClusters, entry.clusterID) + } + slices.Sort(allClusters) + mergedSSB.Payload.Clusters = allClusters + merged = append(merged, mergedSSB) + } + + return MergeResult{ + Merged: merged, + Problems: problems, + } +} + +// clusterSSBEntry pairs a cluster ID with an SSB. +type clusterSSBEntry struct { + clusterID string + ssb MappedSSB +} + +// ssbsAreIdentical checks if two SSBs have the same profiles and schedule. +func ssbsAreIdentical(a, b MappedSSB) bool { + // Compare sorted profiles. + aProfiles := make([]string, len(a.Profiles)) + bProfiles := make([]string, len(b.Profiles)) + copy(aProfiles, a.Profiles) + copy(bProfiles, b.Profiles) + slices.Sort(aProfiles) + slices.Sort(bProfiles) + + if !stringSlicesEqual(aProfiles, bProfiles) { + return false + } + + // Compare schedules. + return schedulesEqual(a.Payload.ScanConfig.ScanSchedule, b.Payload.ScanConfig.ScanSchedule) +} + +// stringSlicesEqual checks if two string slices are equal. +func stringSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// schedulesEqual checks if two ACS schedules are equal. +func schedulesEqual(a, b *models.ACSSchedule) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + if a.Hour != b.Hour || a.Minute != b.Minute { + return false + } + + if a.IntervalType != b.IntervalType { + return false + } + + // Compare DaysOfWeek. + if (a.DaysOfWeek == nil) != (b.DaysOfWeek == nil) { + return false + } + if a.DaysOfWeek != nil && b.DaysOfWeek != nil { + if !int32SlicesEqual(a.DaysOfWeek.Days, b.DaysOfWeek.Days) { + return false + } + } + + // Compare DaysOfMonth. + if (a.DaysOfMonth == nil) != (b.DaysOfMonth == nil) { + return false + } + if a.DaysOfMonth != nil && b.DaysOfMonth != nil { + if !int32SlicesEqual(a.DaysOfMonth.Days, b.DaysOfMonth.Days) { + return false + } + } + + return true +} + +// int32SlicesEqual checks if two int32 slices are equal. +func int32SlicesEqual(a, b []int32) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/scripts/compliance-operator-importer/internal/merge/merge_test.go b/scripts/compliance-operator-importer/internal/merge/merge_test.go new file mode 100644 index 0000000000000..4f1085fbc9632 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/merge/merge_test.go @@ -0,0 +1,295 @@ +package merge + +import ( + "slices" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// TestIMP_MAP_019_MergeSameProfilesSameSchedule verifies that SSBs with the +// same name, same profiles, and same schedule are merged across clusters. +func TestIMP_MAP_019_MergeSameProfilesSameSchedule(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "cis-benchmark", + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + Payload: models.ACSCreatePayload{ + ScanName: "cis-benchmark", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 2, + Minute: 30, + }, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "cis-benchmark", + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + Payload: models.ACSCreatePayload{ + ScanName: "cis-benchmark", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 2, + Minute: 30, + }, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 1 { + t.Fatalf("expected 1 merged SSB, got %d", len(result.Merged)) + } + + merged := result.Merged[0] + if merged.Name != "cis-benchmark" { + t.Errorf("expected SSB name 'cis-benchmark', got %q", merged.Name) + } + + // Clusters should be merged. + slices.Sort(merged.Payload.Clusters) + expected := []string{"cluster-1", "cluster-2"} + if !stringSlicesEqual(merged.Payload.Clusters, expected) { + t.Errorf("expected clusters %v, got %v", expected, merged.Payload.Clusters) + } + + if len(result.Problems) != 0 { + t.Errorf("expected no problems, got %d", len(result.Problems)) + } +} + +// TestIMP_MAP_021_MergeIdenticalSSBsUnion verifies that identical SSBs are +// merged with a union of cluster IDs. +func TestIMP_MAP_021_MergeIdenticalSSBsUnion(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-a": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-a"}, + }, + }, + }, + "cluster-b": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-b"}, + }, + }, + }, + "cluster-c": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-c"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 1 { + t.Fatalf("expected 1 merged SSB, got %d", len(result.Merged)) + } + + merged := result.Merged[0] + slices.Sort(merged.Payload.Clusters) + expected := []string{"cluster-a", "cluster-b", "cluster-c"} + if !stringSlicesEqual(merged.Payload.Clusters, expected) { + t.Errorf("expected clusters %v, got %v", expected, merged.Payload.Clusters) + } +} + +// TestIMP_MAP_020_DifferentProfilesError verifies that SSBs with the same name +// but different profiles produce an error. +func TestIMP_MAP_020_DifferentProfilesError(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-conflict", + Profiles: []string{"profile-a"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-a"}, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-conflict", + Profiles: []string{"profile-b"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-b"}, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 0 { + t.Errorf("expected no merged SSBs when profiles differ, got %d", len(result.Merged)) + } + if len(result.Problems) != 1 { + t.Fatalf("expected 1 problem, got %d", len(result.Problems)) + } + + problem := result.Problems[0] + if problem.Severity != models.SeverityError { + t.Errorf("expected error severity, got %v", problem.Severity) + } + if problem.Category != models.CategoryConflict { + t.Errorf("expected conflict category, got %v", problem.Category) + } +} + +// TestIMP_MAP_020_DifferentScheduleError verifies that SSBs with the same name +// and profiles but different schedules produce an error. +func TestIMP_MAP_020_DifferentScheduleError(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-sched-conflict", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-sched-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-sched-conflict", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-sched-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 14, + Minute: 30, + }, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 0 { + t.Errorf("expected no merged SSBs when schedules differ, got %d", len(result.Merged)) + } + if len(result.Problems) != 1 { + t.Fatalf("expected 1 problem, got %d", len(result.Problems)) + } + + problem := result.Problems[0] + if problem.Severity != models.SeverityError { + t.Errorf("expected error severity, got %v", problem.Severity) + } +} + +// TestSSBsUniqueToEachCluster verifies that SSBs unique to each cluster are +// not merged. +func TestSSBsUniqueToEachCluster(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-unique-1", + Profiles: []string{"profile-a"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-unique-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-a"}, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-unique-2", + Profiles: []string{"profile-b"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-unique-2", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-b"}, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 2 { + t.Fatalf("expected 2 merged SSBs (unique ones not merged), got %d", len(result.Merged)) + } + + names := []string{result.Merged[0].Name, result.Merged[1].Name} + slices.Sort(names) + expected := []string{"ssb-unique-1", "ssb-unique-2"} + if !stringSlicesEqual(names, expected) { + t.Errorf("expected SSB names %v, got %v", expected, names) + } + + // Each should have only one cluster. + for _, merged := range result.Merged { + if len(merged.Payload.Clusters) != 1 { + t.Errorf("expected 1 cluster for unique SSB %q, got %d", merged.Name, len(merged.Payload.Clusters)) + } + } +} diff --git a/scripts/compliance-operator-importer/internal/run/run.go b/scripts/compliance-operator-importer/internal/run/run.go index 2c661a1fb9836..4260a74eeb633 100644 --- a/scripts/compliance-operator-importer/internal/run/run.go +++ b/scripts/compliance-operator-importer/internal/run/run.go @@ -9,6 +9,7 @@ import ( "io" "os" + "github.com/stackrox/co-acs-importer/internal/adopt" "github.com/stackrox/co-acs-importer/internal/cofetch" "github.com/stackrox/co-acs-importer/internal/mapping" "github.com/stackrox/co-acs-importer/internal/models" @@ -104,8 +105,23 @@ func (r *Runner) Run(ctx context.Context) int { // Step 3: process each binding independently. r.status.Stage("Reconcile", "applying scan configurations to ACS") + var adoptRequests []adopt.Request for _, binding := range bindings { - r.processBinding(ctx, binding, existingNames, rec, collector, builder) + action := r.processBinding(ctx, binding, existingNames, rec, collector, builder) + if action == "create" && !r.cfg.DryRun { + adoptRequests = append(adoptRequests, adopt.Request{ + SSBName: binding.Name, + SSBNamespace: binding.Namespace, + OldSettingRef: binding.ScanSettingName, + ClusterLabel: "default", + COClient: r.coClient, + }) + } + } + + // Step 3b: adopt SSBs whose scan configs were just created. + if len(adoptRequests) > 0 { + r.runAdoption(ctx, adoptRequests) } // Step 4: build the final report. @@ -135,6 +151,7 @@ func (r *Runner) Run(ctx context.Context) int { // processBinding handles a single ScanSettingBinding: fetches its ScanSetting, // maps it to an ACS payload, and calls the reconciler. All failures are recorded // as problems and do not abort processing of remaining bindings. +// Returns the action type ("create", "update", "skip", "fail", or "" on early return). func (r *Runner) processBinding( ctx context.Context, binding cofetch.ScanSettingBinding, @@ -142,7 +159,7 @@ func (r *Runner) processBinding( rec *reconcile.Reconciler, collector *problems.Collector, builder *report.Builder, -) { +) string { // Derive a stable resource reference for problem entries. resourceRef := fmt.Sprintf("%s/%s", binding.Namespace, binding.Name) @@ -171,7 +188,7 @@ func (r *Runner) processBinding( Reason: "ScanSetting not found", Error: err.Error(), }) - return + return "" } // Map the CO resources to an ACS create payload. @@ -185,7 +202,7 @@ func (r *Runner) processBinding( Reason: "mapping error", Error: result.Problem.Description, }) - return + return "" } // Reconcile: create, update, or skip. @@ -199,7 +216,11 @@ func (r *Runner) processBinding( case "skip": r.status.Detailf("%s → skipped (already exists)", binding.Name) case "fail": - r.status.Failf("%s → %s", binding.Name, action.Reason) + if action.Err != nil { + r.status.Failf("%s → %s", binding.Name, action.Err) + } else { + r.status.Failf("%s → %s", binding.Name, action.Reason) + } } item := models.ReportItem{ @@ -217,6 +238,26 @@ func (r *Runner) processBinding( if action.Problem != nil { collector.Add(*action.Problem) } + return action.ActionType +} + +// runAdoption runs the SSB adoption step for all requests, logging results. +func (r *Runner) runAdoption(ctx context.Context, requests []adopt.Request) { + r.status.Stage("Adopt", "patching SSB settingsRef to ACS-managed ScanSettings") + adopter := adopt.New() + results := adopter.Adopt(ctx, requests) + for _, res := range results { + switch { + case res.Adopted: + r.status.OKf("%s", res.Message) + case res.Skipped: + r.status.Detailf("%s", res.Message) + case res.TimedOut: + r.status.Warnf("%s", res.Message) + case res.Err != nil: + r.status.Warnf("%s", res.Message) + } + } } // printSummary writes the console summary to the configured output. diff --git a/scripts/compliance-operator-importer/internal/run/run_test.go b/scripts/compliance-operator-importer/internal/run/run_test.go index 44372ea9f08c9..e42b750332d11 100644 --- a/scripts/compliance-operator-importer/internal/run/run_test.go +++ b/scripts/compliance-operator-importer/internal/run/run_test.go @@ -85,6 +85,10 @@ func (m *mockCOClient) GetScanSetting(_ context.Context, _, _ string) (*cofetch. return m.scanSetting, nil } +func (m *mockCOClient) PatchSSBSettingsRef(_ context.Context, _, _, _ string) error { + return nil +} + // Compile-time check: mockCOClient satisfies cofetch.COClient. var _ cofetch.COClient = (*mockCOClient)(nil) @@ -489,6 +493,10 @@ func (s *selectiveCOClientByOrder) GetScanSetting(ctx context.Context, namespace return s.base.GetScanSetting(ctx, namespace, name) } +func (s *selectiveCOClientByOrder) PatchSSBSettingsRef(_ context.Context, _, _, _ string) error { + return nil +} + // --------------------------------------------------------------------------- // Tests: dry-run mode (IMP-IDEM-004..007) // --------------------------------------------------------------------------- diff --git a/scripts/compliance-operator-importer/internal/status/status.go b/scripts/compliance-operator-importer/internal/status/status.go new file mode 100644 index 0000000000000..664e2d29c58e7 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/status/status.go @@ -0,0 +1,74 @@ +// Package status provides compact, stage-by-stage progress output +// inspired by modern CLI tools and LLM chat interfaces. +package status + +import ( + "fmt" + "io" + "os" +) + +// Printer writes structured status messages to an output writer. +type Printer struct { + out io.Writer +} + +// New creates a Printer that writes to os.Stderr. +func New() *Printer { + return &Printer{out: os.Stderr} +} + +// NewWithWriter creates a Printer that writes to w. +func NewWithWriter(w io.Writer) *Printer { + return &Printer{out: w} +} + +// Stage prints a stage header: "▸ Stage: message". +func (p *Printer) Stage(stage, msg string) { + fmt.Fprintf(p.out, "▸ %s: %s\n", stage, msg) +} + +// Stagef prints a formatted stage header. +func (p *Printer) Stagef(stage, format string, args ...any) { + p.Stage(stage, fmt.Sprintf(format, args...)) +} + +// Detail prints an indented detail line under the current stage. +func (p *Printer) Detail(msg string) { + fmt.Fprintf(p.out, " %s\n", msg) +} + +// Detailf prints a formatted detail line. +func (p *Printer) Detailf(format string, args ...any) { + p.Detail(fmt.Sprintf(format, args...)) +} + +// OK prints a success result for the current stage. +func (p *Printer) OK(msg string) { + fmt.Fprintf(p.out, " ✓ %s\n", msg) +} + +// OKf prints a formatted success result. +func (p *Printer) OKf(format string, args ...any) { + p.OK(fmt.Sprintf(format, args...)) +} + +// Warn prints a warning result for the current stage. +func (p *Printer) Warn(msg string) { + fmt.Fprintf(p.out, " ! %s\n", msg) +} + +// Warnf prints a formatted warning. +func (p *Printer) Warnf(format string, args ...any) { + p.Warn(fmt.Sprintf(format, args...)) +} + +// Fail prints a failure result for the current stage. +func (p *Printer) Fail(msg string) { + fmt.Fprintf(p.out, " ✗ %s\n", msg) +} + +// Failf prints a formatted failure. +func (p *Printer) Failf(format string, args ...any) { + p.Fail(fmt.Sprintf(format, args...)) +} From 7b911ca29f3c4ebda701d2303c7bcd2a9da99363 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:54:33 +0100 Subject: [PATCH 19/24] feat(co-importer): add container image build (IMP-IMG-001..006) - Dockerfile: multi-stage build with ubi9-micro base, CGO_ENABLED=0 static binary, multi-arch support (amd64+arm64) - Makefile: build, test, lint, image targets - .dockerignore: exclude test fixtures and vendor from build context - Spec 07: container image requirements and acceptance criteria Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../.dockerignore | 2 + .../compliance-operator-importer/Dockerfile | 10 +++ scripts/compliance-operator-importer/Makefile | 69 +++++++++++++++++++ .../specs/07-container-image.md | 59 ++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 scripts/compliance-operator-importer/.dockerignore create mode 100644 scripts/compliance-operator-importer/Dockerfile create mode 100644 scripts/compliance-operator-importer/Makefile create mode 100644 scripts/compliance-operator-importer/specs/07-container-image.md diff --git a/scripts/compliance-operator-importer/.dockerignore b/scripts/compliance-operator-importer/.dockerignore new file mode 100644 index 0000000000000..c008207f5a26d --- /dev/null +++ b/scripts/compliance-operator-importer/.dockerignore @@ -0,0 +1,2 @@ +* +!compliance-operator-importer diff --git a/scripts/compliance-operator-importer/Dockerfile b/scripts/compliance-operator-importer/Dockerfile new file mode 100644 index 0000000000000..6ec55de4a1c4b --- /dev/null +++ b/scripts/compliance-operator-importer/Dockerfile @@ -0,0 +1,10 @@ +FROM registry.access.redhat.com/ubi9-micro:latest + +LABEL org.opencontainers.image.title="co-acs-importer" \ + org.opencontainers.image.description="Compliance Operator to ACS scan configuration importer" \ + org.opencontainers.image.source="https://github.com/stackrox/stackrox" + +COPY compliance-operator-importer /compliance-operator-importer + +USER 65534:65534 +ENTRYPOINT ["/compliance-operator-importer"] diff --git a/scripts/compliance-operator-importer/Makefile b/scripts/compliance-operator-importer/Makefile new file mode 100644 index 0000000000000..cdef54c9eed74 --- /dev/null +++ b/scripts/compliance-operator-importer/Makefile @@ -0,0 +1,69 @@ +BINARY := compliance-operator-importer +MODULE := ./cmd/importer +IMAGE ?= localhost/compliance-operator-importer +TAG ?= latest +ARCHS ?= amd64 arm64 +GOARCH ?= $(shell go env GOARCH) +CONTAINER ?= $(shell command -v podman 2>/dev/null || echo docker) + +.PHONY: build test image image-push image-multiarch clean \ + demo-seed demo-seed-down demo-seed-status demo help test-v + +## ── Build ────────────────────────────────────────────────────────────────── + +build: ## Build the importer binary + CGO_ENABLED=0 go build -o $(BINARY) $(MODULE) + +## ── Test ─────────────────────────────────────────────────────────────────── + +test: ## Run all unit tests + go test ./... + +test-v: ## Run all unit tests (verbose) + go test -v ./... + +## ── Container image ──────────────────────────────────────────────────────── + +image: ## Build container image for host arch + CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o $(BINARY) $(MODULE) + $(CONTAINER) build -t $(IMAGE):$(TAG) . + +image-multiarch: ## Build per-arch images (use before image-push) + @for arch in $(ARCHS); do \ + echo "── Building $(IMAGE):$(TAG)-$$arch ──"; \ + CGO_ENABLED=0 GOOS=linux GOARCH=$$arch go build -o $(BINARY) $(MODULE) && \ + $(CONTAINER) build --platform linux/$$arch -t $(IMAGE):$(TAG)-$$arch . ; \ + done + +image-push: image-multiarch ## Push multi-arch images and create manifest + @for arch in $(ARCHS); do \ + $(CONTAINER) push $(IMAGE):$(TAG)-$$arch ; \ + done + $(CONTAINER) manifest create $(IMAGE):$(TAG) \ + $(foreach arch,$(ARCHS),$(IMAGE):$(TAG)-$(arch)) + $(CONTAINER) manifest push $(IMAGE):$(TAG) + @echo "" + @echo "Pushed: $(IMAGE):$(TAG)" + +## ── Demo ─────────────────────────────────────────────────────────────────── + +demo-seed: ## Seed demo fixtures (2 ACS scans + 3 SSBs, 1 conflicting) + ./hack/demo-seed.sh up + +demo-seed-down: ## Remove demo fixtures + ./hack/demo-seed.sh down + +demo-seed-status: ## Show demo fixture state + ./hack/demo-seed.sh status + +demo: build ## Run interactive demo + ./hack/demo.sh + +## ── Misc ─────────────────────────────────────────────────────────────────── + +clean: ## Remove build artifacts + rm -f $(BINARY) + +help: ## Show this help + @grep -E '^[a-zA-Z_-]+:.*##' $(MAKEFILE_LIST) | \ + awk 'BEGIN {FS = ":.*## "}; {printf " \033[36m%-18s\033[0m %s\n", $$1, $$2}' diff --git a/scripts/compliance-operator-importer/specs/07-container-image.md b/scripts/compliance-operator-importer/specs/07-container-image.md new file mode 100644 index 0000000000000..3e53da2c706c4 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/07-container-image.md @@ -0,0 +1,59 @@ +# 07 - Container Image Packaging + +This spec defines how the importer is packaged and distributed as a +multi-architecture container image. + +## Design decisions + +- Single-stage build using `ubi9-micro` as base (includes CA certificates). +- Multi-arch: `linux/amd64` and `linux/arm64`. +- Static binary: `CGO_ENABLED=0`, pure Go. +- Non-root: runs as UID `65534` (nobody). + +## Requirements + +### IMP-IMG-001: Dockerfile + +The Dockerfile MUST: +- Use `registry.access.redhat.com/ubi9-micro:latest` as base. +- COPY the pre-compiled binary as `/compliance-operator-importer`. +- Set `USER 65534:65534`. +- Set `ENTRYPOINT ["/compliance-operator-importer"]`. + +### IMP-IMG-002: Static binary + +The Go binary MUST be compiled with: +- `CGO_ENABLED=0` +- `GOOS=linux` +- `GOARCH` set to the target architecture (`amd64` or `arm64`) + +### IMP-IMG-003: Multi-architecture support + +The build MUST: +- Build the Go binary once per target architecture. +- Build a container image per architecture, tagged with an `-$ARCH` suffix + (e.g. `$IMAGE:$TAG-amd64`, `$IMAGE:$TAG-arm64`). +- Create a multi-arch manifest list under the plain tag + (`$IMAGE:$TAG`) combining all architecture-specific images. +- Support at least `linux/amd64` and `linux/arm64`. + +### IMP-IMG-004: Build targets + +The Makefile MUST provide: +- `make image` — build container image for the host architecture. +- `make image-push` — build and push multi-arch images + manifest. +- Image name configurable via `IMAGE` env var with a placeholder default. +- Tag configurable via `TAG` env var (default: `latest`). + +### IMP-IMG-005: Image metadata + +The image MUST include OCI labels: +- `org.opencontainers.image.title=co-acs-importer` +- `org.opencontainers.image.description=Compliance Operator to ACS scan configuration importer` +- `org.opencontainers.image.source=https://github.com/stackrox/stackrox` + +## Non-goals + +- CI/CD pipeline integration (future work). +- Helm chart or operator packaging. +- Signing or SBOM generation (deferred). From 026e1846bb9f192fa3264096357c6b5982b0c498 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 13:54:46 +0100 Subject: [PATCH 20/24] feat(co-importer): add demo seed script for quick cluster setup hack/demo-seed.sh creates ScanSetting + ScanSettingBindings on the current cluster for demo/testing purposes. Tracks the last seed ID in hack/.demo-seed-id to enable cleanup on re-run. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../hack/.demo-seed-id | 1 + .../hack/demo-seed.sh | 340 ++++++++++++++++++ 2 files changed, 341 insertions(+) create mode 100644 scripts/compliance-operator-importer/hack/.demo-seed-id create mode 100755 scripts/compliance-operator-importer/hack/demo-seed.sh diff --git a/scripts/compliance-operator-importer/hack/.demo-seed-id b/scripts/compliance-operator-importer/hack/.demo-seed-id new file mode 100644 index 0000000000000..2c6374ac9b228 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/.demo-seed-id @@ -0,0 +1 @@ +64005cb1 diff --git a/scripts/compliance-operator-importer/hack/demo-seed.sh b/scripts/compliance-operator-importer/hack/demo-seed.sh new file mode 100755 index 0000000000000..deb2e831850fd --- /dev/null +++ b/scripts/compliance-operator-importer/hack/demo-seed.sh @@ -0,0 +1,340 @@ +#!/usr/bin/env bash +# demo-seed.sh — Seed demo fixtures for the CO → ACS importer. +# +# Creates 2 scan configs in ACS and 3 SSBs in Kubernetes. One SSB +# intentionally shares a name with an ACS scan config to demonstrate +# conflict handling. +# +# All resources are tagged with a short unique ID (e.g. "d7f2") so +# they can be identified and cleaned up reliably. +# +# Usage: +# ./demo-seed.sh up # create fixtures +# ./demo-seed.sh down # tear down fixtures +# ./demo-seed.sh status # show what exists +# +# Prerequisites: +# ROX_ENDPOINT, ROX_ADMIN_PASSWORD (or ROX_API_TOKEN), kubectl access. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +STATE_FILE="${SCRIPT_DIR}/.demo-seed-id" +CO_NS="openshift-compliance" + +# ── ACS connection ─────────────────────────────────────────────────────────── + +ACS_ENDPOINT="${ROX_ENDPOINT:?ROX_ENDPOINT must be set}" +ACS_URL="${ACS_ENDPOINT#http://}" +ACS_URL="${ACS_URL#https://}" +ACS_URL="https://${ACS_URL}" + +if [[ -n "${ROX_ADMIN_PASSWORD:-}" ]]; then + CURL_AUTH=(-u "admin:${ROX_ADMIN_PASSWORD}") +elif [[ -n "${ROX_API_TOKEN:-}" ]]; then + CURL_AUTH=(-H "Authorization: Bearer ${ROX_API_TOKEN}") +else + echo "ERROR: set ROX_ADMIN_PASSWORD or ROX_API_TOKEN" >&2 + exit 1 +fi + +# ── Helpers ────────────────────────────────────────────────────────────────── + +BOLD='\033[1m' DIM='\033[2m' GREEN='\033[32m' RED='\033[31m' +YELLOW='\033[33m' CYAN='\033[36m' RESET='\033[0m' + +ok() { echo -e " ${GREEN}✓${RESET} $1"; } +fail() { echo -e " ${RED}✗${RESET} $1"; } +info() { echo -e " ${DIM}$1${RESET}"; } +hdr() { echo -e "\n${CYAN}${BOLD}── $1 ──${RESET}\n"; } + +acs_api() { + local method="$1" path="$2" + shift 2 + curl -sk "${CURL_AUTH[@]}" -X "$method" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + "${ACS_URL}${path}" "$@" +} + +# Get the ACS cluster ID for the current context. +get_acs_cluster_id() { + local clusters + clusters=$(acs_api GET "/v1/clusters" 2> /dev/null) + # Match by provider metadata (OpenShift cluster ID). + local ocp_id + ocp_id=$(kubectl get clusterversion version -o jsonpath='{.spec.clusterID}' 2> /dev/null || true) + if [[ -n "$ocp_id" ]]; then + local matched + matched=$(echo "$clusters" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('clusters', []): + pid = c.get('status',{}).get('providerMetadata',{}).get('cluster',{}).get('id','') + if pid == '${ocp_id}': + print(c['id']); break +" 2> /dev/null || true) + if [[ -n "$matched" ]]; then + echo "$matched" + return + fi + fi + # Fallback: first cluster. + echo "$clusters" | python3 -c " +import sys, json +data = json.load(sys.stdin) +cs = data.get('clusters', []) +if cs: print(cs[0]['id']) +" 2> /dev/null +} + +generate_id() { + # Use od to avoid SIGPIPE from tr|head under pipefail. + od -An -tx1 -N4 /dev/urandom | tr -d ' \n' +} + +load_id() { + if [[ ! -f "$STATE_FILE" ]]; then + echo "ERROR: no active seed found (${STATE_FILE} missing). Run '$0 up' first." >&2 + exit 1 + fi + cat "$STATE_FILE" +} + +# Resource names derived from seed ID. +# ACS-only scans: demo-{id}-stig-weekly, demo-{id}-cis-audit +# K8s SSBs: demo-{id}-cis-audit (CONFLICT!), demo-{id}-moderate-daily, demo-{id}-pci-scan +# K8s ScanSetting: demo-{id}-setting +names_for() { + local id="$1" + ACS_SCAN_1="demo-${id}-stig-weekly" + ACS_SCAN_2="demo-${id}-cis-audit" + SSB_1="demo-${id}-cis-audit" # same as ACS_SCAN_2 → conflict + SSB_2="demo-${id}-moderate-daily" + SSB_3="demo-${id}-pci-scan" + SCAN_SETTING="demo-${id}-setting" +} + +# ── UP ─────────────────────────────────────────────────────────────────────── + +cmd_up() { + if [[ -f "$STATE_FILE" ]]; then + local old_id + old_id=$(cat "$STATE_FILE") + echo -e "${YELLOW}WARNING: seed '${old_id}' already exists. Run '$0 down' first or '$0 up --force'.${RESET}" + if [[ "${1:-}" != "--force" ]]; then exit 1; fi + cmd_down + fi + + local id + id=$(generate_id) + names_for "$id" + + echo -e "${BOLD}Seeding demo fixtures [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + # ── K8s: ScanSetting ───────────────────────────────────────────────── + hdr "Kubernetes: ScanSetting" + kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSetting +metadata: + name: ${SCAN_SETTING} + namespace: ${CO_NS} + labels: + demo-seed: "${id}" +schedule: "0 3 * * *" +roles: [worker, master] +rawResultStorage: + rotation: 3 + size: 1Gi +EOF + ok "${SCAN_SETTING} (daily 03:00)" + + # ── K8s: SSBs ──────────────────────────────────────────────────────── + hdr "Kubernetes: ScanSettingBindings" + for pair in \ + "${SSB_1}:ocp4-cis" \ + "${SSB_2}:ocp4-moderate" \ + "${SSB_3}:ocp4-pci-dss"; do + local name="${pair%%:*}" profile="${pair#*:}" + kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${name} + namespace: ${CO_NS} + labels: + demo-seed: "${id}" +profiles: + - name: ${profile} + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${SCAN_SETTING} + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + local note="" + [[ "$name" == "$SSB_1" ]] && note=" ← will conflict with ACS scan" + ok "${name} (${profile})${note}" + done + + # ── ACS: scan configs ──────────────────────────────────────────────── + hdr "ACS: Scan Configurations" + local cluster_id + cluster_id=$(get_acs_cluster_id) + if [[ -z "$cluster_id" ]]; then + fail "Could not determine ACS cluster ID" + exit 1 + fi + info "Using ACS cluster ID: ${cluster_id}" + + # Scan 1: STIG weekly (no conflict with any SSB). + acs_api POST "/v2/compliance/scan/configurations" -d "{ + \"scanName\": \"${ACS_SCAN_1}\", + \"scanConfig\": { + \"oneTimeScan\": false, + \"profiles\": [\"ocp4-stig\"], + \"scanSchedule\": { + \"intervalType\": \"WEEKLY\", + \"hour\": 4, \"minute\": 0, + \"daysOfWeek\": { \"days\": [1] } + }, + \"description\": \"Demo seed ${id}: STIG weekly scan (no conflict)\" + }, + \"clusters\": [\"${cluster_id}\"] + }" > /dev/null 2>&1 + ok "${ACS_SCAN_1} (ocp4-stig, weekly Mon 04:00)" + + # Scan 2: CIS audit — same name as SSB_1 → deliberate conflict. + acs_api POST "/v2/compliance/scan/configurations" -d "{ + \"scanName\": \"${ACS_SCAN_2}\", + \"scanConfig\": { + \"oneTimeScan\": false, + \"profiles\": [\"ocp4-cis\"], + \"scanSchedule\": { + \"intervalType\": \"WEEKLY\", + \"hour\": 6, \"minute\": 30, + \"daysOfWeek\": { \"days\": [5] } + }, + \"description\": \"Demo seed ${id}: CIS audit — pre-existing, will conflict with SSB\" + }, + \"clusters\": [\"${cluster_id}\"] + }" > /dev/null 2>&1 + ok "${ACS_SCAN_2} (ocp4-cis, weekly Fri 06:30) ← conflicts with SSB" + + # ── Save state ─────────────────────────────────────────────────────── + echo "$id" > "$STATE_FILE" + + hdr "Summary" + echo -e " ${BOLD}Seed ID:${RESET} ${CYAN}${id}${RESET}" + echo -e " ${BOLD}K8s SSBs:${RESET} ${SSB_1}, ${SSB_2}, ${SSB_3}" + echo -e " ${BOLD}ACS scans:${RESET} ${ACS_SCAN_1}, ${ACS_SCAN_2}" + echo -e " ${BOLD}Conflict:${RESET} ${RED}${SSB_1}${RESET} (SSB) vs ${RED}${ACS_SCAN_2}${RESET} (ACS)" + echo "" + echo -e " ${DIM}Run the importer to see conflict handling:${RESET}" + echo -e " ${DIM} ./compliance-operator-importer --endpoint \$ROX_ENDPOINT --insecure-skip-verify${RESET}" + echo -e " ${DIM} ./compliance-operator-importer --endpoint \$ROX_ENDPOINT --insecure-skip-verify --overwrite-existing${RESET}" + echo "" + echo -e " ${DIM}Tear down: $0 down${RESET}" + echo "" +} + +# ── DOWN ───────────────────────────────────────────────────────────────────── + +cmd_down() { + local id + id=$(load_id) + names_for "$id" + + echo -e "${BOLD}Removing demo fixtures [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + # ── K8s ────────────────────────────────────────────────────────────── + hdr "Kubernetes" + for name in "$SSB_1" "$SSB_2" "$SSB_3"; do + if kubectl delete scansettingbinding "$name" -n "$CO_NS" --ignore-not-found 2> /dev/null; then + ok "Deleted SSB ${name}" + fi + done + if kubectl delete scansetting "$SCAN_SETTING" -n "$CO_NS" --ignore-not-found 2> /dev/null; then + ok "Deleted ScanSetting ${SCAN_SETTING}" + fi + + # ── ACS ────────────────────────────────────────────────────────────── + hdr "ACS" + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + + # Delete any scan config whose name starts with "demo-{id}-". + echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +prefix = 'demo-${id}-' +for c in data.get('configurations', []): + if c['scanName'].startswith(prefix): + print(c['id'] + ' ' + c['scanName']) +" 2> /dev/null | while read -r cfg_id cfg_name; do + acs_api DELETE "/v2/compliance/scan/configurations/${cfg_id}" > /dev/null 2>&1 + ok "Deleted ACS scan config ${cfg_name} (${cfg_id})" + done + + rm -f "$STATE_FILE" + echo "" + ok "All demo-${id} fixtures removed." + echo "" +} + +# ── STATUS ─────────────────────────────────────────────────────────────────── + +cmd_status() { + local id + id=$(load_id) + names_for "$id" + + echo -e "${BOLD}Demo fixtures status [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + hdr "Kubernetes (namespace: ${CO_NS})" + kubectl get scansettingbindings.compliance.openshift.io,scansettings.compliance.openshift.io \ + -n "$CO_NS" -l "demo-seed=${id}" \ + -o custom-columns='KIND:.kind,NAME:.metadata.name' --no-headers 2> /dev/null \ + | while read -r kind name; do + info "${kind}: ${name}" + done + + hdr "ACS" + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +prefix = 'demo-${id}-' +for c in data.get('configurations', []): + if c['scanName'].startswith(prefix): + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + profiles = c.get('scanConfig', {}).get('profiles', []) + interval = sched.get('intervalType', '?') + hour = sched.get('hour', '?') + minute = sched.get('minute', 0) + print(f\" {c['scanName']} ({', '.join(profiles)}, {interval} {hour}:{minute:02d}) id={c['id']}\") +" 2> /dev/null + echo "" +} + +# ── Main ───────────────────────────────────────────────────────────────────── +function help { + echo "Usage: $0 {up|down|status}" + echo "" + echo " up Create 2 ACS scan configs + 3 K8s SSBs (1 conflicting)" + echo " down Remove all fixtures created by 'up'" + echo " status Show current fixture state" +} + +case "${1:-}" in + up) cmd_up "${2:-}" ;; + down) cmd_down ;; + status) cmd_status ;; + help) help ;; + -h) help ;; + --help) help ;; + *) cmd_up "${2:-}" ;; +esac From 167729a0b207815dc5b5e7ed92d9e944da887f0a Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 18:44:31 +0100 Subject: [PATCH 21/24] fix(co-importer): show error details and mapping warnings in console output Three issues fixed: 1. Non-transient HTTP errors (400, 401, etc.) only showed the status code in the Reason field but not the response body detail. Now Reason includes the full error message from the ACS API. 2. Multi-cluster fail path printed action.Reason instead of action.Err, losing the response body. Now checks action.Err first, matching the single-cluster code path. 3. Multi-cluster mapping/ScanSetting errors were collected into the problem collector but never printed to the console, causing SSBs to be silently dropped. Now emits Warnf for both ScanSetting fetch failures and mapping errors (e.g. unsupported cron step notation). Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/reconcile/create_only.go | 4 ++-- .../internal/run/multi_cluster.go | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only.go b/scripts/compliance-operator-importer/internal/reconcile/create_only.go index 90c6394e83c38..e572b537e53eb 100644 --- a/scripts/compliance-operator-importer/internal/reconcile/create_only.go +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only.go @@ -142,7 +142,7 @@ func (r *Reconciler) Apply( if !transientStatusCodes[code] { // Non-transient: fail immediately, no more attempts action.ActionType = "fail" - action.Reason = fmt.Sprintf("non-transient HTTP %d error updating scan configuration", code) + action.Reason = fmt.Sprintf("non-transient HTTP %d error updating scan configuration: %v", code, lastErr) action.Err = lastErr return action } @@ -205,7 +205,7 @@ func (r *Reconciler) Apply( if !transientStatusCodes[code] { // Non-transient: fail immediately, no more attempts action.ActionType = "fail" - action.Reason = fmt.Sprintf("non-transient HTTP %d error creating scan configuration", code) + action.Reason = fmt.Sprintf("non-transient HTTP %d error creating scan configuration: %v", code, lastErr) action.Err = lastErr return action } diff --git a/scripts/compliance-operator-importer/internal/run/multi_cluster.go b/scripts/compliance-operator-importer/internal/run/multi_cluster.go index 8c5e6b8e6a099..ace1790519380 100644 --- a/scripts/compliance-operator-importer/internal/run/multi_cluster.go +++ b/scripts/compliance-operator-importer/internal/run/multi_cluster.go @@ -79,6 +79,7 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i // Fetch the ScanSetting. ss, err := source.COClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) if err != nil { + r.status.Warnf("%s → ScanSetting %q not found on cluster %s: %v", binding.Name, binding.ScanSettingName, source.Label, err) collector.Add(models.Problem{ Severity: models.SeverityError, Category: models.CategoryInput, @@ -97,6 +98,7 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i result := mapping.MapBinding(binding, ss, &tempCfg) if result.Problem != nil { + r.status.Warnf("%s → mapping error: %s", binding.Name, result.Problem.Description) collector.Add(*result.Problem) continue } @@ -157,7 +159,11 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i case "skip": r.status.Detailf("%s → skipped (already exists)", merged.Name) case "fail": - r.status.Failf("%s → %s", merged.Name, action.Reason) + if action.Err != nil { + r.status.Failf("%s → %s", merged.Name, action.Err) + } else { + r.status.Failf("%s → %s", merged.Name, action.Reason) + } } item := models.ReportItem{ From 199c0a6c8416321ac74dba11596e92f1bf6b5400 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 18:45:01 +0100 Subject: [PATCH 22/24] feat(co-importer): add run.sh wrapper for easy container usage Wrapper script that auto-mounts kubeconfig files and forwards ACS auth env vars so running via container is a one-liner: ROX_API_TOKEN=... ./run.sh --endpoint central.example.com --dry-run Handles KUBECONFIG with multiple colon-separated paths, forwards ROX_ENDPOINT, ROX_API_TOKEN, ROX_ADMIN_PASSWORD, ROX_ADMIN_USER. Supports IMAGE and CONTAINER_RT overrides. Also reverts Makefile IMAGE default to localhost/ (local build target). Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- scripts/compliance-operator-importer/run.sh | 71 +++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100755 scripts/compliance-operator-importer/run.sh diff --git a/scripts/compliance-operator-importer/run.sh b/scripts/compliance-operator-importer/run.sh new file mode 100755 index 0000000000000..46e322ed5b9cc --- /dev/null +++ b/scripts/compliance-operator-importer/run.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# run.sh — Run the compliance-operator-importer via container. +# +# Automatically mounts kubeconfig files and forwards ACS auth env vars +# so you don't have to spell out docker/podman flags manually. +# +# USAGE: +# ./run.sh --endpoint central.example.com --dry-run +# ./run.sh --endpoint central.example.com --context my-cluster +# +# ENVIRONMENT (read from host, forwarded to container): +# KUBECONFIG Colon-separated kubeconfig paths (default: ~/.kube/config) +# ROX_ENDPOINT ACS Central URL (alternative to --endpoint) +# ROX_API_TOKEN API token auth +# ROX_ADMIN_PASSWORD Basic auth password +# ROX_ADMIN_USER Basic auth username (default: admin) +# +# IMAGE override: +# IMAGE=my-registry/co-importer:v1 ./run.sh --endpoint ... + +set -euo pipefail + +IMAGE="${IMAGE:-localhost/compliance-operator-importer:latest}" +CONTAINER_RT="${CONTAINER_RT:-$(command -v podman 2>/dev/null || echo docker)}" + +# ── Kubeconfig mounts ──────────────────────────────────────────────────────── + +kubeconfig_paths="${KUBECONFIG:-$HOME/.kube/config}" + +mount_args=() +container_paths=() +i=0 + +IFS=':' read -ra kc_files <<< "$kubeconfig_paths" +for f in "${kc_files[@]}"; do + f="${f/#\~/$HOME}" + if [[ ! -f "$f" ]]; then + echo "WARNING: kubeconfig not found, skipping: $f" >&2 + continue + fi + target="/kubeconfig/config-${i}" + mount_args+=(-v "$f:$target:ro") + container_paths+=("$target") + ((++i)) +done + +if [[ ${#container_paths[@]} -eq 0 ]]; then + echo "ERROR: no kubeconfig files found" >&2 + exit 1 +fi + +# Join container paths with ':' for the in-container KUBECONFIG. +joined=$(IFS=':'; echo "${container_paths[*]}") + +# ── Auth env vars ──────────────────────────────────────────────────────────── + +env_args=(-e "KUBECONFIG=$joined") + +for var in ROX_ENDPOINT ROX_API_TOKEN ROX_ADMIN_PASSWORD ROX_ADMIN_USER; do + if [[ -n "${!var:-}" ]]; then + env_args+=(-e "$var=${!var}") + fi +done + +# ── Run ────────────────────────────────────────────────────────────────────── + +exec "$CONTAINER_RT" run --rm \ + "${mount_args[@]}" \ + "${env_args[@]}" \ + "$IMAGE" \ + "$@" From e0dea1ea962dd18d26cb67aed867247f6097c891 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 19:15:02 +0100 Subject: [PATCH 23/24] fix(co-importer): skip adoption when target ScanSetting pre-exists on cluster When the importer creates a scan config in ACS, ACS pushes a ScanSetting to the cluster. The adoption step then patches the SSB's settingsRef to point to it. But if a ScanSetting with that name already existed before reconciliation, the adoption poll would find it immediately and patch the SSB onto a resource ACS doesn't control. Fix: snapshot which ScanSettings (named after SSBs) exist on each cluster before reconciliation. During adoption, if the target name was already in the snapshot, skip with a warning instead of patching. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- .../internal/adopt/adopt.go | 20 +++++++++++ .../internal/adopt/adopt_test.go | 33 +++++++++++++++++++ .../internal/run/multi_cluster.go | 22 ++++++++++--- .../internal/run/run.go | 21 +++++++++--- .../internal/run/run_test.go | 21 +++++++++--- 5 files changed, 103 insertions(+), 14 deletions(-) diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt.go b/scripts/compliance-operator-importer/internal/adopt/adopt.go index 58f88a4a761c1..85794511a4e74 100644 --- a/scripts/compliance-operator-importer/internal/adopt/adopt.go +++ b/scripts/compliance-operator-importer/internal/adopt/adopt.go @@ -26,6 +26,12 @@ type Request struct { OldSettingRef string // current settingsRef.name on the SSB ClusterLabel string // kubeconfig context name, for logging COClient cofetch.COClient // k8s client scoped to this cluster + + // PreExistingScanSettings is the set of ScanSetting names that existed + // on this cluster before reconciliation. If the target name is in this + // set, adoption is skipped to avoid patching the SSB onto a resource + // that ACS doesn't control. + PreExistingScanSettings map[string]bool } // Result records the outcome for one adoption request. @@ -76,6 +82,20 @@ func (a *Adopter) adoptOne(ctx context.Context, req Request) Result { } } + // IMP-ADOPT-007: if a ScanSetting with the target name already existed + // on the cluster before reconciliation, it's a pre-existing resource + // that would conflict with the ACS-managed one. Skip adoption to + // avoid patching the SSB onto a ScanSetting that ACS doesn't control. + if req.PreExistingScanSettings[newSettingName] { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Skipped: true, + Message: fmt.Sprintf("ScanSetting %q already exists on cluster %s but SSB %s/%s references %q; skipping adoption to avoid conflict with pre-existing resource", + newSettingName, req.ClusterLabel, req.SSBNamespace, req.SSBName, req.OldSettingRef), + } + } + // Poll for the ACS-created ScanSetting to appear on the cluster. if err := a.waitForScanSetting(ctx, req.COClient, req.SSBNamespace, newSettingName); err != nil { // IMP-ADOPT-004, IMP-ADOPT-005, IMP-ADOPT-006: timeout is a warning, not an error. diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt_test.go b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go index 081b9ea6a9c3d..be086c66bccd5 100644 --- a/scripts/compliance-operator-importer/internal/adopt/adopt_test.go +++ b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go @@ -328,6 +328,39 @@ func TestIMP_ADOPT_DelayedScanSetting(t *testing.T) { } } +// TestIMP_ADOPT_PreExistingScanSettingSkip verifies that adoption is skipped +// when a ScanSetting with the target name already existed before reconciliation. +func TestIMP_ADOPT_PreExistingScanSettingSkip(t *testing.T) { + client := newMockCOClient() + // The ScanSetting exists on the cluster (pre-existing). + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + // Mark the ScanSetting as pre-existing. + PreExistingScanSettings: map[string]bool{"cis-weekly": true}, + }}) + + r := results[0] + if !r.Skipped { + t.Errorf("expected Skipped=true for pre-existing ScanSetting; message: %s", r.Message) + } + if r.Adopted { + t.Error("expected Adopted=false when pre-existing ScanSetting conflicts") + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls, got %d", len(client.patches)) + } + if !containsStr(r.Message, "pre-existing") { + t.Errorf("message should mention pre-existing, got %q", r.Message) + } +} + func containsStr(s, substr string) bool { return len(s) >= len(substr) && searchStr(s, substr) } diff --git a/scripts/compliance-operator-importer/internal/run/multi_cluster.go b/scripts/compliance-operator-importer/internal/run/multi_cluster.go index ace1790519380..f3bc0455f6bd6 100644 --- a/scripts/compliance-operator-importer/internal/run/multi_cluster.go +++ b/scripts/compliance-operator-importer/internal/run/multi_cluster.go @@ -49,6 +49,7 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i oldSettingRef string clusterLabel string coClient cofetch.COClient + preExistingSS map[string]bool } // Key: SSB name, value: list of cluster infos (one per cluster that has the SSB). ssbAdoptionMap := make(map[string][]ssbClusterInfo) @@ -75,6 +76,15 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i r.status.OKf("found %d ScanSettingBindings", len(bindings)) + // Snapshot which ScanSettings (named after SSBs) already exist on + // this cluster before reconciliation, for the adoption pre-existence check. + preExistingSS := make(map[string]bool) + for _, b := range bindings { + if _, err := source.COClient.GetScanSetting(ctx, b.Namespace, b.Name); err == nil { + preExistingSS[b.Name] = true + } + } + for _, binding := range bindings { // Fetch the ScanSetting. ss, err := source.COClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) @@ -109,6 +119,7 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i oldSettingRef: binding.ScanSettingName, clusterLabel: source.Label, coClient: source.COClient, + preExistingSS: preExistingSS, }) // Add to the cluster's SSB list for merging. @@ -186,11 +197,12 @@ func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) i if action.ActionType == "create" && !r.cfg.DryRun { for _, info := range ssbAdoptionMap[merged.Name] { adoptRequests = append(adoptRequests, adopt.Request{ - SSBName: merged.Name, - SSBNamespace: info.namespace, - OldSettingRef: info.oldSettingRef, - ClusterLabel: info.clusterLabel, - COClient: info.coClient, + SSBName: merged.Name, + SSBNamespace: info.namespace, + OldSettingRef: info.oldSettingRef, + ClusterLabel: info.clusterLabel, + COClient: info.coClient, + PreExistingScanSettings: info.preExistingSS, }) } } diff --git a/scripts/compliance-operator-importer/internal/run/run.go b/scripts/compliance-operator-importer/internal/run/run.go index 4260a74eeb633..a32182007b630 100644 --- a/scripts/compliance-operator-importer/internal/run/run.go +++ b/scripts/compliance-operator-importer/internal/run/run.go @@ -103,6 +103,16 @@ func (r *Runner) Run(ctx context.Context) int { } rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun, r.cfg.OverwriteExisting) + // Snapshot which ScanSettings (named after SSBs) already exist on the + // cluster before reconciliation. Used during adoption to avoid patching + // an SSB onto a pre-existing ScanSetting that ACS doesn't control. + preExistingSS := make(map[string]bool) + for _, binding := range bindings { + if _, err := r.coClient.GetScanSetting(ctx, binding.Namespace, binding.Name); err == nil { + preExistingSS[binding.Name] = true + } + } + // Step 3: process each binding independently. r.status.Stage("Reconcile", "applying scan configurations to ACS") var adoptRequests []adopt.Request @@ -110,11 +120,12 @@ func (r *Runner) Run(ctx context.Context) int { action := r.processBinding(ctx, binding, existingNames, rec, collector, builder) if action == "create" && !r.cfg.DryRun { adoptRequests = append(adoptRequests, adopt.Request{ - SSBName: binding.Name, - SSBNamespace: binding.Namespace, - OldSettingRef: binding.ScanSettingName, - ClusterLabel: "default", - COClient: r.coClient, + SSBName: binding.Name, + SSBNamespace: binding.Namespace, + OldSettingRef: binding.ScanSettingName, + ClusterLabel: "default", + COClient: r.coClient, + PreExistingScanSettings: preExistingSS, }) } } diff --git a/scripts/compliance-operator-importer/internal/run/run_test.go b/scripts/compliance-operator-importer/internal/run/run_test.go index e42b750332d11..92f96bffe24a2 100644 --- a/scripts/compliance-operator-importer/internal/run/run_test.go +++ b/scripts/compliance-operator-importer/internal/run/run_test.go @@ -67,8 +67,13 @@ var _ models.ACSClient = (*mockACSClient)(nil) type mockCOClient struct { bindings []cofetch.ScanSettingBinding listErr error - scanSetting *cofetch.ScanSetting + scanSetting *cofetch.ScanSetting // returned for any name not in scanSettingsByName settingErr error + // scanSettingsByName provides name-aware lookups. When set, only names + // present in this map (plus the primary scanSetting's own name) return + // a result. When nil, the primary scanSetting is returned for any name + // (legacy behaviour). + scanSettingsByName map[string]*cofetch.ScanSetting } func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.ScanSettingBinding, error) { @@ -78,10 +83,17 @@ func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.Sca return m.bindings, nil } -func (m *mockCOClient) GetScanSetting(_ context.Context, _, _ string) (*cofetch.ScanSetting, error) { +func (m *mockCOClient) GetScanSetting(_ context.Context, _, name string) (*cofetch.ScanSetting, error) { if m.settingErr != nil { return nil, m.settingErr } + if m.scanSettingsByName != nil { + if ss, ok := m.scanSettingsByName[name]; ok { + return ss, nil + } + return nil, fmt.Errorf("ScanSetting %q not found", name) + } + // Legacy: return the primary scan setting for any name. return m.scanSetting, nil } @@ -449,10 +461,11 @@ func TestIMP_ERR_004_MissingScanSettingRecordedAsProblem(t *testing.T) { scanSetting: goodScanSetting(), } - // Fail GetScanSetting on the first call (for "broken"), succeed on the second ("ok"). + // Fail GetScanSetting on the third call (for "broken" binding's ScanSetting lookup). + // The first 2 calls are pre-existence snapshot probes (one per binding). coClient2 := &selectiveCOClientByOrder{ base: coClient, - failAtCall: 1, + failAtCall: 3, failErr: errors.New("ScanSetting not found"), } From cb8cc85eac64b43b3049c380acd5739342360278 Mon Sep 17 00:00:00 2001 From: Guzman Date: Thu, 26 Mar 2026 22:30:06 +0100 Subject: [PATCH 24/24] fix(co-importer): clean stale manifest before creating new one podman manifest create fails if a manifest with the same name already exists from a previous push. Remove it first to make image-push idempotent. Partially generated by AI. Co-Authored-By: Claude Opus 4.6 --- scripts/compliance-operator-importer/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/compliance-operator-importer/Makefile b/scripts/compliance-operator-importer/Makefile index cdef54c9eed74..52678ea31f499 100644 --- a/scripts/compliance-operator-importer/Makefile +++ b/scripts/compliance-operator-importer/Makefile @@ -39,6 +39,7 @@ image-push: image-multiarch ## Push multi-arch images and create manifest @for arch in $(ARCHS); do \ $(CONTAINER) push $(IMAGE):$(TAG)-$$arch ; \ done + $(CONTAINER) manifest rm $(IMAGE):$(TAG) 2>/dev/null || true $(CONTAINER) manifest create $(IMAGE):$(TAG) \ $(foreach arch,$(ARCHS),$(IMAGE):$(TAG)-$(arch)) $(CONTAINER) manifest push $(IMAGE):$(TAG)